diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 285b897..87730fb 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -35,6 +35,6 @@
// Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
"remoteUser": "node",
"features": {
- "golang": "1.19"
+ "golang": "1.21"
}
}
diff --git a/.dockerignore b/.dockerignore
index 720e7a0..804ab22 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -22,3 +22,4 @@
**/secrets.dev.yaml
**/values.dev.yaml
README.md
+!Dockerfile.rootless
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
deleted file mode 100644
index 30ef4f4..0000000
--- a/.github/dependabot.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-version: 2
-updates:
- # Fetch and update latest `npm` packages
- - package-ecosystem: npm
- directory: "/frontend"
- schedule:
- interval: daily
- time: "00:00"
- open-pull-requests-limit: 10
- reviewers:
- - hay-kot
- assignees:
- - hay-kot
- commit-message:
- prefix: fix
- prefix-development: chore
- include: scope
- - package-ecosystem: gomod
- directory: backend
- schedule:
- interval: daily
- time: "00:00"
- open-pull-requests-limit: 10
- reviewers:
- - hay-kot
- assignees:
- - hay-kot
- commit-message:
- prefix: fix
- prefix-development: chore
- include: scope
diff --git a/.github/workflows/partial-backend.yaml b/.github/workflows/partial-backend.yaml
index 3e23d59..fe4dac2 100644
--- a/.github/workflows/partial-backend.yaml
+++ b/.github/workflows/partial-backend.yaml
@@ -7,12 +7,12 @@ jobs:
Go:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
- name: Set up Go
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v5
with:
- go-version: 1.19
+ go-version: "1.21"
- name: Install Task
uses: arduino/setup-task@v1
@@ -20,7 +20,7 @@ jobs:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: golangci-lint
- uses: golangci/golangci-lint-action@v3
+ uses: golangci/golangci-lint-action@v4
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
diff --git a/.github/workflows/partial-frontend.yaml b/.github/workflows/partial-frontend.yaml
index c83a1bd..f849406 100644
--- a/.github/workflows/partial-frontend.yaml
+++ b/.github/workflows/partial-frontend.yaml
@@ -4,11 +4,37 @@ on:
workflow_call:
jobs:
- Frontend:
+ lint:
+ name: Lint
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - uses: pnpm/action-setup@v3.0.0
+ with:
+ version: 6.0.2
+
+ - name: Install dependencies
+ run: pnpm install --shamefully-hoist
+ working-directory: frontend
+
+ - name: Run Lint
+ run: pnpm run lint:ci
+ working-directory: frontend
+
+ - name: Run Typecheck
+ run: pnpm run typecheck
+ working-directory: frontend
+
+ integration-tests:
+ name: Integration Tests
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -18,15 +44,15 @@ jobs:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Go
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v5
with:
- go-version: 1.19
+ go-version: "1.21"
- - uses: actions/setup-node@v3
+ - uses: actions/setup-node@v4
with:
node-version: 18
- - uses: pnpm/action-setup@v2.2.2
+ - uses: pnpm/action-setup@v3.0.0
with:
version: 6.0.2
@@ -34,9 +60,5 @@ jobs:
run: pnpm install
working-directory: frontend
- - name: Run linter 👀
- run: pnpm lint
- working-directory: "frontend"
-
- name: Run Integration Tests
run: task test:ci
diff --git a/.github/workflows/partial-publish.yaml b/.github/workflows/partial-publish.yaml
index 899cb76..542171d 100644
--- a/.github/workflows/partial-publish.yaml
+++ b/.github/workflows/partial-publish.yaml
@@ -20,22 +20,22 @@ jobs:
name: "Publish Homebox"
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
- name: Set up Go
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v5
with:
- go-version: 1.19
+ go-version: "1.20"
- name: Set up QEMU
id: qemu
- uses: docker/setup-qemu-action@v1
+ uses: docker/setup-qemu-action@v3
with:
image: tonistiigi/binfmt:latest
platforms: all
- name: install buildx
id: buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v3
with:
install: true
@@ -44,7 +44,7 @@ jobs:
env:
CR_PAT: ${{ secrets.GH_TOKEN }}
- - name: build nightly the image
+ - name: build nightly image
if: ${{ inputs.release == false }}
run: |
docker build --push --no-cache \
@@ -53,6 +53,16 @@ jobs:
--build-arg=BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
--platform=linux/amd64,linux/arm64,linux/arm/v7 .
+ - name: build nightly-rootless image
+ if: ${{ inputs.release == false }}
+ run: |
+ docker build --push --no-cache \
+ --tag=ghcr.io/hay-kot/homebox:${{ inputs.tag }}-rootless \
+ --build-arg=COMMIT=$(git rev-parse HEAD) \
+ --build-arg=BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
+ --file Dockerfile.rootless \
+ --platform=linux/amd64,linux/arm64,linux/arm/v7 .
+
- name: build release tagged the image
if: ${{ inputs.release == true }}
run: |
@@ -64,3 +74,16 @@ jobs:
--build-arg COMMIT=$(git rev-parse HEAD) \
--build-arg BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
--platform linux/amd64,linux/arm64,linux/arm/v7 .
+
+ - name: build release tagged the rootless image
+ if: ${{ inputs.release == true }}
+ run: |
+ docker build --push --no-cache \
+ --tag ghcr.io/hay-kot/homebox:nightly-rootless \
+ --tag ghcr.io/hay-kot/homebox:latest-rootless \
+ --tag ghcr.io/hay-kot/homebox:${{ inputs.tag }}-rootless \
+ --build-arg VERSION=${{ inputs.tag }} \
+ --build-arg COMMIT=$(git rev-parse HEAD) \
+ --build-arg BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
+ --platform linux/amd64,linux/arm64,linux/arm/v7 \
+ --file Dockerfile.rootless .
diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml
index b02daf4..e91e8ec 100644
--- a/.github/workflows/publish.yaml
+++ b/.github/workflows/publish.yaml
@@ -1,73 +1,29 @@
-name: Build Nightly
+name: Publish Dockers
on:
push:
branches:
- main
- release:
- types:
- - published
env:
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
jobs:
- backend-tests:
- name: "Backend Server Tests"
- uses: hay-kot/homebox/.github/workflows/partial-backend.yaml@main
-
- frontend-tests:
- name: "Frontend and End-to-End Tests"
- uses: hay-kot/homebox/.github/workflows/partial-frontend.yaml@main
-
deploy:
name: "Deploy Nightly to Fly.io"
runs-on: ubuntu-latest
- needs:
- - backend-tests
- - frontend-tests
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
- uses: superfly/flyctl-actions/setup-flyctl@master
- run: flyctl deploy --remote-only
publish-nightly:
name: "Publish Nightly"
if: github.event_name != 'release'
- needs:
- - backend-tests
- - frontend-tests
uses: hay-kot/homebox/.github/workflows/partial-publish.yaml@main
with:
tag: nightly
secrets:
GH_TOKEN: ${{ secrets.CR_PAT }}
- publish-tag:
- name: "Publish Tag"
- if: github.event_name == 'release'
- needs:
- - backend-tests
- - frontend-tests
- uses: hay-kot/homebox/.github/workflows/partial-publish.yaml@main
- with:
- release: true
- tag: ${{ github.event.release.tag_name }}
- secrets:
- GH_TOKEN: ${{ secrets.CR_PAT }}
- deploy-docs:
- name: Deploy docs
- needs:
- - publish-tag
- runs-on: ubuntu-latest
- steps:
- - name: Checkout main
- uses: actions/checkout@v2
-
- - name: Deploy docs
- uses: mhausenblas/mkdocs-deploy-gh-pages@master
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- CONFIG_FILE: docs/mkdocs.yml
- EXTRA_PACKAGES: build-base
diff --git a/.github/workflows/pull-requests.yaml b/.github/workflows/pull-requests.yaml
index 2debdbd..f39539b 100644
--- a/.github/workflows/pull-requests.yaml
+++ b/.github/workflows/pull-requests.yaml
@@ -12,4 +12,4 @@ jobs:
frontend-tests:
name: "Frontend and End-to-End Tests"
- uses: ./.github/workflows/partial-frontend.yaml
+ uses: ./.github/workflows/partial-frontend.yaml
\ No newline at end of file
diff --git a/.github/workflows/tag.yaml b/.github/workflows/tag.yaml
new file mode 100644
index 0000000..8ac7c54
--- /dev/null
+++ b/.github/workflows/tag.yaml
@@ -0,0 +1,77 @@
+name: Publish Release
+
+on:
+ push:
+ tags:
+ - v*
+
+env:
+ FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
+
+jobs:
+ backend-tests:
+ name: "Backend Server Tests"
+ uses: hay-kot/homebox/.github/workflows/partial-backend.yaml@main
+
+ frontend-tests:
+ name: "Frontend and End-to-End Tests"
+ uses: hay-kot/homebox/.github/workflows/partial-frontend.yaml@main
+
+ goreleaser:
+ name: goreleaser
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+
+ - uses: pnpm/action-setup@v2
+ with:
+ version: 7.30.1
+
+ - name: Build Frontend and Copy to Backend
+ working-directory: frontend
+ run: |
+ pnpm install --shamefully-hoist
+ pnpm run build
+ cp -r ./.output/public ../backend/app/api/static/
+
+ - name: Run GoReleaser
+ uses: goreleaser/goreleaser-action@v5
+ with:
+ workdir: "backend"
+ distribution: goreleaser
+ version: latest
+ args: release --clean
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ publish-tag:
+ name: "Publish Tag"
+ uses: hay-kot/homebox/.github/workflows/partial-publish.yaml@main
+ with:
+ release: true
+ tag: ${{ github.ref_name }}
+ secrets:
+ GH_TOKEN: ${{ secrets.CR_PAT }}
+
+ deploy-docs:
+ name: Deploy docs
+ needs:
+ - publish-tag
+ - goreleaser
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout main
+ uses: actions/checkout@v4
+
+ - name: Deploy docs
+ uses: mhausenblas/mkdocs-deploy-gh-pages@master
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ CONFIG_FILE: docs/mkdocs.yml
+ EXTRA_PACKAGES: build-base
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 7fbecd2..d247138 100644
--- a/.gitignore
+++ b/.gitignore
@@ -45,3 +45,13 @@ node_modules
.output
.env
dist
+
+.pnpm-store
+backend/app/api/app
+backend/app/api/__debug_bin
+dist/
+
+# Nuxt Publish Dir
+backend/app/api/static/public/*
+!backend/app/api/static/public/.gitkeep
+backend/api
\ No newline at end of file
diff --git a/.scaffold/model/scaffold.yaml b/.scaffold/model/scaffold.yaml
new file mode 100644
index 0000000..028d2fa
--- /dev/null
+++ b/.scaffold/model/scaffold.yaml
@@ -0,0 +1,33 @@
+---
+# yaml-language-server: $schema=https://hay-kot.github.io/scaffold/schema.json
+messages:
+ pre: |
+ # Ent Model Generation
+
+ With Boilerplate!
+ post: |
+ Complete!
+
+questions:
+ - name: "model"
+ prompt:
+ message: "What is the name of the model? (PascalCase)"
+ required: true
+
+ - name: "by_group"
+ prompt:
+ confirm: "Include a Group Edge? (group_id -> id)"
+ required: true
+
+rewrites:
+ - from: 'templates/model.go'
+ to: 'backend/internal/data/ent/schema/{{ lower .Scaffold.model }}.go'
+
+inject:
+ - name: "Insert Groups Edge"
+ path: 'backend/internal/data/ent/schema/group.go'
+ at: // $scaffold_edge
+ template: |
+ {{- if .Scaffold.by_group -}}
+ owned("{{ lower .Scaffold.model }}s", {{ .Scaffold.model }}.Type),
+ {{- end -}}
diff --git a/.scaffold/model/templates/model.go b/.scaffold/model/templates/model.go
new file mode 100644
index 0000000..b73ac16
--- /dev/null
+++ b/.scaffold/model/templates/model.go
@@ -0,0 +1,40 @@
+package schema
+
+import (
+ "entgo.io/ent"
+
+ "github.com/hay-kot/homebox/backend/internal/data/ent/schema/mixins"
+)
+
+type {{ .Scaffold.model }} struct {
+ ent.Schema
+}
+
+func ({{ .Scaffold.model }}) Mixin() []ent.Mixin {
+ return []ent.Mixin{
+ mixins.BaseMixin{},
+ {{- if .Scaffold.by_group }}
+ GroupMixin{ref: "{{ snakecase .Scaffold.model }}s"},
+ {{- end }}
+ }
+}
+
+// Fields of the {{ .Scaffold.model }}.
+func ({{ .Scaffold.model }}) Fields() []ent.Field {
+ return []ent.Field{
+ // field.String("name").
+ }
+}
+
+// Edges of the {{ .Scaffold.model }}.
+func ({{ .Scaffold.model }}) Edges() []ent.Edge {
+ return []ent.Edge{
+ // edge.From("group", Group.Type).
+ }
+}
+
+func ({{ .Scaffold.model }}) Indexes() []ent.Index {
+ return []ent.Index{
+ // index.Fields("token"),
+ }
+}
\ No newline at end of file
diff --git a/.vscode/launch.json b/.vscode/launch.json
new file mode 100644
index 0000000..d375395
--- /dev/null
+++ b/.vscode/launch.json
@@ -0,0 +1,47 @@
+{
+ // Use IntelliSense to learn about possible attributes.
+ // Hover to view descriptions of existing attributes.
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "compounds": [
+ {
+ "name": "Full Stack",
+ "configurations": ["Launch Backend", "Launch Frontend"],
+ "stopAll": true
+ }
+ ],
+ "configurations": [
+ {
+ "name": "Launch Backend",
+ "type": "go",
+ "request": "launch",
+ "mode": "debug",
+ "program": "${workspaceRoot}/backend/app/api/",
+ "args": [],
+ "env": {
+ "HBOX_DEMO": "true",
+ "HBOX_LOG_LEVEL": "debug",
+ "HBOX_DEBUG_ENABLED": "true",
+ "HBOX_STORAGE_DATA": "${workspaceRoot}/backend/.data",
+ "HBOX_STORAGE_SQLITE_URL": "${workspaceRoot}/backend/.data/homebox.db?_fk=1"
+ },
+ },
+ {
+ "name": "Launch Frontend",
+ "type": "node",
+ "request": "launch",
+ "runtimeExecutable": "pnpm",
+ "runtimeArgs": [
+ "run",
+ "dev"
+ ],
+ "cwd": "${workspaceFolder}/frontend",
+ "serverReadyAction": {
+ "action": "debugWithChrome",
+ "pattern": "Local: http://localhost:([0-9]+)",
+ "uriFormat": "http://localhost:%s",
+ "webRoot": "${workspaceFolder}/frontend"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
index b330533..09c7a0e 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1,7 +1,4 @@
{
- "editor.codeActionsOnSave": {
- "source.fixAll.eslint": true
- },
"yaml.schemas": {
"https://squidfunk.github.io/mkdocs-material/schema.json": "mkdocs.yml"
},
@@ -12,6 +9,26 @@
"README.md": "LICENSE, SECURITY.md"
},
"cSpell.words": [
- "debughandlers"
- ]
+ "debughandlers",
+ "Homebox"
+ ],
+ // use ESLint to format code on save
+ "editor.formatOnSave": false,
+ "editor.defaultFormatter": "dbaeumer.vscode-eslint",
+ "editor.codeActionsOnSave": {
+ "source.fixAll.eslint": "explicit"
+ },
+ "[typescript]": {
+ "editor.defaultFormatter": "dbaeumer.vscode-eslint"
+ },
+ "eslint.format.enable": true,
+ "css.validate": false,
+ "tailwindCSS.includeLanguages": {
+ "vue": "html",
+ "vue-html": "html"
+ },
+ "editor.quickSuggestions": {
+ "strings": true
+ },
+ "tailwindCSS.experimental.configFile": "./frontend/tailwind.config.js"
}
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 90095ac..52b2a0f 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,16 +1,16 @@
# Contributing
-## We Develop with Github
+## We Develop with GitHub
-We use github to host code, to track issues and feature requests, as well as accept pull requests.
+We use GitHub to host code, to track issues and feature requests, as well as accept pull requests.
## Branch Flow
-We use the `main` branch as the development branch. All PRs should be made to the `main` branch from a feature branch. To create a pull request you can use the following steps:
+We use the `main` branch as the development branch. All PRs should be made to the `main` branch from a feature branch. To create a pull request, you can use the following steps:
1. Fork the repository and create a new branch from `main`.
2. If you've added code that should be tested, add tests.
-3. If you've changed API's, update the documentation.
+3. If you've changed APIs, update the documentation.
4. Ensure that the test suite and linters pass
5. Issue your pull request
@@ -18,7 +18,7 @@ We use the `main` branch as the development branch. All PRs should be made to th
### Prerequisites
-There is a devcontainer available for this project. If you are using VSCode, you can use the devcontainer to get started. If you are not using VSCode, you can need to ensure that you have the following tools installed:
+There is a devcontainer available for this project. If you are using VSCode, you can use the devcontainer to get started. If you are not using VSCode, you need to ensure that you have the following tools installed:
- [Go 1.19+](https://golang.org/doc/install)
- [Swaggo](https://github.com/swaggo/swag)
@@ -31,21 +31,27 @@ If you're using `taskfile` you can run `task --list-all` for a list of all comma
### Setup
-If you're using the taskfile you can use the `task setup` command to run the required setup commands. Otherwise you can review the commands required in the `Taskfile.yml` file.
+If you're using the taskfile, you can use the `task setup` command to run the required setup commands. Otherwise, you can review the commands required in the `Taskfile.yml` file.
-Note that when installing dependencies with pnpm you must use the `--shamefully-hoist` flag. If you don't use this flag you will get an error when running the the frontend server.
+Note that when installing dependencies with pnpm you must use the `--shamefully-hoist` flag. If you don't use this flag, you will get an error when running the frontend server.
### API Development Notes
start command `task go:run`
1. API Server does not auto reload. You'll need to restart the server after making changes.
-2. Unit tests should be written in Go, however end-to-end or user story tests should be written in TypeScript using the client library in the frontend directory.
+2. Unit tests should be written in Go, however, end-to-end or user story tests should be written in TypeScript using the client library in the frontend directory.
### Frontend Development Notes
start command `task: ui:dev`
1. The frontend is a Vue 3 app with Nuxt.js that uses Tailwind and DaisyUI for styling.
-2. We're using Vitest for our automated testing. you can run these with `task ui:watch`.
-3. Tests require the API server to be running and in some cases the first run will fail due to a race condition. If this happens just run the tests again and they should pass.
\ No newline at end of file
+2. We're using Vitest for our automated testing. You can run these with `task ui:watch`.
+3. Tests require the API server to be running, and in some cases the first run will fail due to a race condition. If this happens, just run the tests again and they should pass.
+
+## Publishing Release
+
+Create a new tag in GitHub with the version number vX.X.X. This will trigger a new release to be created.
+
+Test -> Goreleaser -> Publish Release -> Trigger Docker Builds -> Deploy Docs + Fly.io Demo
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
index d93ea2b..11d5c74 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,49 +1,48 @@
-
-# Build Nuxt
-FROM node:17-alpine as frontend-builder
-WORKDIR /app
-RUN npm install -g pnpm
-COPY frontend/package.json frontend/pnpm-lock.yaml ./
-RUN pnpm install --frozen-lockfile --shamefully-hoist
-COPY frontend .
-RUN pnpm build
-
-# Build API
-FROM golang:alpine AS builder
-ARG BUILD_TIME
-ARG COMMIT
-ARG VERSION
-RUN apk update && \
- apk upgrade && \
- apk add --update git build-base gcc g++
-
-WORKDIR /go/src/app
-COPY ./backend .
-RUN go get -d -v ./...
-RUN rm -rf ./app/api/public
-COPY --from=frontend-builder /app/.output/public ./app/api/static/public
-RUN CGO_ENABLED=1 GOOS=linux go build \
- -ldflags "-s -w -X main.commit=$COMMIT -X main.buildTime=$BUILD_TIME -X main.version=$VERSION" \
- -o /go/bin/api \
- -v ./app/api/*.go
-
-# Production Stage
-FROM alpine:latest
-
-ENV HBOX_MODE=production
-ENV HBOX_STORAGE_DATA=/data/
-ENV HBOX_STORAGE_SQLITE_URL=/data/homebox.db?_fk=1
-
-RUN apk --no-cache add ca-certificates
-RUN mkdir /app
-COPY --from=builder /go/bin/api /app
-
-RUN chmod +x /app/api
-
-LABEL Name=homebox Version=0.0.1
-EXPOSE 7745
-WORKDIR /app
-VOLUME [ "/data" ]
-
-ENTRYPOINT [ "/app/api" ]
-CMD [ "/data/config.yml" ]
\ No newline at end of file
+
+# Build Nuxt
+FROM r.batts.cloud/nodejs:18 as frontend-builder
+WORKDIR /app
+RUN npm install -g pnpm@latest-9
+COPY frontend/package.json frontend/pnpm-lock.yaml ./
+RUN pnpm install --frozen-lockfile --shamefully-hoist
+COPY frontend .
+RUN pnpm build
+
+# Build API
+FROM r.batts.cloud/golang:1.24 AS builder
+ARG BUILD_TIME
+ARG COMMIT
+ARG VERSION
+RUN apt update && \
+ apt install -y git build-essential gcc g++
+
+WORKDIR /go/src/app
+COPY ./backend .
+RUN go get -d -v ./...
+RUN rm -rf ./app/api/public
+COPY --from=frontend-builder /app/.output/public ./app/api/static/public
+RUN CGO_ENABLED=0 GOOS=linux go build \
+ -ldflags "-s -w -X main.commit=$COMMIT -X main.buildTime=$BUILD_TIME -X main.version=$VERSION" \
+ -o /go/bin/api \
+ -v ./app/api/*.go
+
+# Production Stage
+FROM r.batts.cloud/debian:trixie
+
+ENV HBOX_MODE=production
+ENV HBOX_STORAGE_DATA=/data/
+ENV HBOX_STORAGE_SQLITE_URL=/data/homebox.db?_pragma=busy_timeout=2000&_pragma=journal_mode=WAL&_fk=1
+
+RUN mkdir /app
+COPY --from=builder /go/bin/api /app
+
+RUN chmod +x /app/api
+
+LABEL Name=homebox Version=0.0.1
+LABEL org.opencontainers.image.source="https://github.com/hay-kot/homebox"
+EXPOSE 7745
+WORKDIR /app
+VOLUME [ "/data" ]
+
+ENTRYPOINT [ "/app/api" ]
+CMD [ "/data/config.yml" ]
diff --git a/Dockerfile.rootless b/Dockerfile.rootless
new file mode 100644
index 0000000..e1c98aa
--- /dev/null
+++ b/Dockerfile.rootless
@@ -0,0 +1,53 @@
+
+# Build Nuxt
+FROM node:17-alpine as frontend-builder
+WORKDIR /app
+RUN npm install -g pnpm
+COPY frontend/package.json frontend/pnpm-lock.yaml ./
+RUN pnpm install --frozen-lockfile --shamefully-hoist
+COPY frontend .
+RUN pnpm build
+
+# Build API
+FROM golang:alpine AS builder
+ARG BUILD_TIME
+ARG COMMIT
+ARG VERSION
+RUN apk update && \
+ apk upgrade && \
+ apk add --update git build-base gcc g++
+
+WORKDIR /go/src/app
+COPY ./backend .
+RUN go get -d -v ./...
+RUN rm -rf ./app/api/public
+COPY --from=frontend-builder /app/.output/public ./app/api/static/public
+RUN CGO_ENABLED=0 GOOS=linux go build \
+ -ldflags "-s -w -X main.commit=$COMMIT -X main.buildTime=$BUILD_TIME -X main.version=$VERSION" \
+ -o /go/bin/api \
+ -v ./app/api/*.go && \
+ chmod +x /go/bin/api && \
+ # create a directory so that we can copy it in the next stage
+ mkdir /data
+
+# Production Stage
+FROM gcr.io/distroless/static
+
+ENV HBOX_MODE=production
+ENV HBOX_STORAGE_DATA=/data/
+ENV HBOX_STORAGE_SQLITE_URL=/data/homebox.db?_fk=1
+
+# Copy the binary and the (empty) /data dir and
+# change the ownership to the low-privileged user
+COPY --from=builder --chown=nonroot /go/bin/api /app
+COPY --from=builder --chown=nonroot /data /data
+
+LABEL Name=homebox Version=0.0.1
+LABEL org.opencontainers.image.source="https://github.com/hay-kot/homebox"
+EXPOSE 7745
+VOLUME [ "/data" ]
+
+# Drop root and run as low-privileged user
+USER nonroot
+ENTRYPOINT [ "/app" ]
+CMD [ "/data/config.yml" ]
diff --git a/README.md b/README.md
index 691504f..148322b 100644
--- a/README.md
+++ b/README.md
@@ -16,12 +16,28 @@
[Configuration & Docker Compose](https://hay-kot.github.io/homebox/quick-start)
```bash
-docker run --name=homebox \
- --restart=always \
- --publish=3100:7745 \
- ghcr.io/hay-kot/homebox:latest
+# If using the rootless image, ensure data
+# folder has correct permissions
+mkdir -p /path/to/data/folder
+chown 65532:65532 -R /path/to/data/folder
+docker run -d \
+ --name homebox \
+ --restart unless-stopped \
+ --publish 3100:7745 \
+ --env TZ=Europe/Bucharest \
+ --volume /path/to/data/folder/:/data \
+ ghcr.io/hay-kot/homebox:latest
+# ghcr.io/hay-kot/homebox:latest-rootless
```
+
+## Contributing
+
+Contributions are what make the open source community such an amazing place to learn, inspire, and create. Any contributions you make are **greatly appreciated**.
+
+If you are not a coder, you can still contribute financially. Financial contributions help me prioritize working on this project over others and helps me know that there is a real demand for project development.
+
+
## Credits
- Logo by [@lakotelman](https://github.com/lakotelman)
diff --git a/Taskfile.yml b/Taskfile.yml
index 3dd05de..4d9c1aa 100644
--- a/Taskfile.yml
+++ b/Taskfile.yml
@@ -1,7 +1,9 @@
version: "3"
env:
- HBOX_STORAGE_SQLITE_URL: .data/homebox.db?_fk=1
+ HBOX_LOG_LEVEL: debug
+ HBOX_STORAGE_SQLITE_URL: .data/homebox.db?_pragma=busy_timeout=1000&_pragma=journal_mode=WAL&_fk=1
+ HBOX_OPTIONS_ALLOW_REGISTRATION: true
UNSAFE_DISABLE_PASSWORD_PROJECTION: "yes_i_am_sure"
tasks:
setup:
@@ -11,61 +13,77 @@ tasks:
- cd backend && go mod tidy
- cd frontend && pnpm install --shamefully-hoist
- generate:
- desc: |
- Generates collateral files from the backend project
- including swagger docs and typescripts type for the frontend
- deps:
- - db:generate
+ swag:
+ desc: Generate swagger docs
+ dir: backend/app/api/static/
+ vars:
+ API: "../"
+ INTERNAL: "../../../internal"
+ PKGS: "../../../pkgs"
+ cmds:
+ - swag fmt --dir={{ .API }}
+ - swag init --dir={{ .API }},{{ .INTERNAL }}/core/services,{{ .INTERNAL }}/data/repo --parseDependency
+ sources:
+ - "./backend/app/api/**/*"
+ - "./backend/internal/data/**"
+ - "./backend/internal/core/services/**/*"
+ - "./backend/app/tools/typegen/main.go"
+
+ typescript-types:
+ desc: Generates typescript types from swagger definition
cmds:
- - cd backend/app/api/static && swag fmt --dir=../
- - cd backend/app/api/static && swag init --dir=../,../../../internal,../../../pkgs
- |
npx swagger-typescript-api \
--no-client \
--modular \
--path ./backend/app/api/static/docs/swagger.json \
--output ./frontend/lib/api/types
- - python3 ./scripts/process-types.py ./frontend/lib/api/types/data-contracts.ts
+ - go run ./backend/app/tools/typegen/main.go ./frontend/lib/api/types/data-contracts.ts
sources:
- - "./backend/app/api/**/*"
- - "./backend/internal/data/**"
- - "./backend/internal/services/**/*"
- - "./scripts/process-types.py"
- generates:
- - "./frontend/lib/api/types/data-contracts.ts"
- - "./backend/internal/data/ent/schema"
- - "./backend/app/api/static/docs/swagger.json"
- - "./backend/app/api/static/docs/swagger.yaml"
+ - ./backend/app/tools/typegen/main.go
+ - ./backend/app/api/static/docs/swagger.json
+
+ generate:
+ deps:
+ - db:generate
+ cmds:
+ - task: swag
+ - task: typescript-types
+ - cp ./backend/app/api/static/docs/swagger.json docs/docs/api/openapi-2.0.json
go:run:
desc: Starts the backend api server (depends on generate task)
+ dir: backend
deps:
- generate
cmds:
- - cd backend && go run ./app/api/ {{ .CLI_ARGS }}
+ - go run ./app/api/ {{ .CLI_ARGS }}
silent: false
go:test:
desc: Runs all go tests using gotestsum - supports passing gotestsum args
+ dir: backend
cmds:
- - cd backend && gotestsum {{ .CLI_ARGS }} ./...
+ - gotestsum {{ .CLI_ARGS }} ./...
go:coverage:
desc: Runs all go tests with -race flag and generates a coverage report
+ dir: backend
cmds:
- - cd backend && go test -race -coverprofile=coverage.out -covermode=atomic ./app/... ./internal/... ./pkgs/... -v -cover
+ - go test -race -coverprofile=coverage.out -covermode=atomic ./app/... ./internal/... ./pkgs/... -v -cover
silent: true
go:tidy:
desc: Runs go mod tidy on the backend
+ dir: backend
cmds:
- - cd backend && go mod tidy
+ - go mod tidy
go:lint:
desc: Runs golangci-lint
+ dir: backend
cmds:
- - cd backend && golangci-lint run ./...
+ - golangci-lint run ./...
go:all:
desc: Runs all go test and lint related tasks
@@ -76,19 +94,18 @@ tasks:
go:build:
desc: Builds the backend binary
+ dir: backend
cmds:
- - cd backend && go build -o ../build/backend ./app/api
+ - go build -o ../build/backend ./app/api
db:generate:
desc: Run Entgo.io Code Generation
+ dir: backend/internal/
cmds:
- |
- cd backend/internal/ && go generate ./... \
- --template=./data/ent/schema/templates/has_id.tmpl
+ go generate ./...
sources:
- "./backend/internal/data/ent/schema/**/*"
- generates:
- - "./backend/internal/ent/"
db:migration:
desc: Runs the database diff engine to generate a SQL migration files
@@ -99,13 +116,27 @@ tasks:
ui:watch:
desc: Starts the vitest test runner in watch mode
+ dir: frontend
cmds:
- - cd frontend && pnpm run test:watch
+ - pnpm run test:watch
ui:dev:
desc: Run frontend development server
+ dir: frontend
cmds:
- - cd frontend && pnpm dev
+ - pnpm dev
+
+ ui:fix:
+ desc: Runs prettier and eslint on the frontend
+ dir: frontend
+ cmds:
+ - pnpm run lint:fix
+
+ ui:check:
+ desc: Runs type checking
+ dir: frontend
+ cmds:
+ - pnpm run typecheck
test:ci:
desc: Runs end-to-end test on a live server (only for use in CI)
@@ -115,3 +146,12 @@ tasks:
- sleep 5
- cd frontend && pnpm run test:ci
silent: true
+
+ pr:
+ desc: Runs all tasks required for a PR
+ cmds:
+ - task: generate
+ - task: go:all
+ - task: ui:check
+ - task: ui:fix
+ - task: test:ci
diff --git a/backend/.gitignore b/backend/.gitignore
new file mode 100644
index 0000000..cde0123
--- /dev/null
+++ b/backend/.gitignore
@@ -0,0 +1,2 @@
+
+dist/
diff --git a/backend/.golangci.yml b/backend/.golangci.yml
new file mode 100644
index 0000000..8f63110
--- /dev/null
+++ b/backend/.golangci.yml
@@ -0,0 +1,74 @@
+run:
+ timeout: 10m
+ skip-dirs:
+ - internal/data/ent.*
+linters-settings:
+ goconst:
+ min-len: 5
+ min-occurrences: 5
+ exhaustive:
+ default-signifies-exhaustive: true
+ revive:
+ ignore-generated-header: false
+ severity: warning
+ confidence: 3
+ depguard:
+ rules:
+ main:
+ deny:
+ - pkg: io/util
+ desc: |
+ Deprecated: As of Go 1.16, the same functionality is now provided by
+ package io or package os, and those implementations should be
+ preferred in new code. See the specific function documentation for
+ details.
+ gocritic:
+ enabled-checks:
+ - ruleguard
+ testifylint:
+ enable-all: true
+ tagalign:
+ order:
+ - json
+ - schema
+ - yaml
+ - yml
+ - toml
+ - validate
+linters:
+ disable-all: true
+ enable:
+ - asciicheck
+ - bodyclose
+ - depguard
+ - dogsled
+ - errcheck
+ - errorlint
+ - exhaustive
+ - exportloopref
+ - gochecknoinits
+ - goconst
+ - gocritic
+ - gocyclo
+ - gofmt
+ - goprintffuncname
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - nakedret
+ - revive
+ - staticcheck
+ - stylecheck
+ - tagalign
+ - testifylint
+ - typecheck
+ - typecheck
+ - unconvert
+ - unused
+ - whitespace
+ - zerologlint
+ - sqlclosecheck
+issues:
+ exclude-use-default: false
+ fix: true
diff --git a/backend/.goreleaser.yaml b/backend/.goreleaser.yaml
new file mode 100644
index 0000000..37752ec
--- /dev/null
+++ b/backend/.goreleaser.yaml
@@ -0,0 +1,54 @@
+# This is an example .goreleaser.yml file with some sensible defaults.
+# Make sure to check the documentation at https://goreleaser.com
+before:
+ hooks:
+ # you may remove this if you don't need go generate
+ - go generate ./...
+builds:
+ - main: ./app/api
+ env:
+ - CGO_ENABLED=0
+ goos:
+ - linux
+ - windows
+ - darwin
+ goarch:
+ - amd64
+ - "386"
+ - arm
+ - arm64
+ ignore:
+ - goos: windows
+ goarch: arm
+ - goos: windows
+ goarch: "386"
+
+archives:
+ - format: tar.gz
+ # this name template makes the OS and Arch compatible with the results of uname.
+ name_template: >-
+ {{ .ProjectName }}_
+ {{- title .Os }}_
+ {{- if eq .Arch "amd64" }}x86_64
+ {{- else if eq .Arch "386" }}i386
+ {{- else }}{{ .Arch }}{{ end }}
+ {{- if .Arm }}v{{ .Arm }}{{ end }}
+ # use zip for windows archives
+ format_overrides:
+ - goos: windows
+ format: zip
+checksum:
+ name_template: 'checksums.txt'
+snapshot:
+ name_template: "{{ incpatch .Version }}-next"
+changelog:
+ sort: asc
+ filters:
+ exclude:
+ - '^docs:'
+ - '^test:'
+
+# The lines beneath this are called `modelines`. See `:help modeline`
+# Feel free to remove those if you don't want/use them.
+# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
+# vim: set ts=2 sw=2 tw=0 fo=cnqoj
diff --git a/backend/app/api/app.go b/backend/app/api/app.go
index 854c4e5..5d285d3 100644
--- a/backend/app/api/app.go
+++ b/backend/app/api/app.go
@@ -1,23 +1,21 @@
package main
import (
- "time"
-
"github.com/hay-kot/homebox/backend/internal/core/services"
+ "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/config"
"github.com/hay-kot/homebox/backend/pkgs/mailer"
- "github.com/hay-kot/homebox/backend/pkgs/server"
)
type app struct {
conf *config.Config
mailer mailer.Mailer
db *ent.Client
- server *server.Server
repos *repo.AllRepos
services *services.AllServices
+ bus *eventbus.EventBus
}
func new(conf *config.Config) *app {
@@ -35,10 +33,3 @@ func new(conf *config.Config) *app {
return s
}
-
-func (a *app) startBgTask(t time.Duration, fn func()) {
- for {
- a.server.Background(fn)
- time.Sleep(t)
- }
-}
diff --git a/backend/app/api/bgrunner.go b/backend/app/api/bgrunner.go
new file mode 100644
index 0000000..ce4b7cc
--- /dev/null
+++ b/backend/app/api/bgrunner.go
@@ -0,0 +1,37 @@
+package main
+
+import (
+ "context"
+ "time"
+)
+
+type BackgroundTask struct {
+ name string
+ Interval time.Duration
+ Fn func(context.Context)
+}
+
+func (tsk *BackgroundTask) Name() string {
+ return tsk.name
+}
+
+func NewTask(name string, interval time.Duration, fn func(context.Context)) *BackgroundTask {
+ return &BackgroundTask{
+ Interval: interval,
+ Fn: fn,
+ }
+}
+
+func (tsk *BackgroundTask) Start(ctx context.Context) error {
+ timer := time.NewTimer(tsk.Interval)
+
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+ case <-timer.C:
+ timer.Reset(tsk.Interval)
+ tsk.Fn(ctx)
+ }
+ }
+}
diff --git a/backend/app/api/demo.go b/backend/app/api/demo.go
index 538655d..183e0e0 100644
--- a/backend/app/api/demo.go
+++ b/backend/app/api/demo.go
@@ -2,57 +2,61 @@ package main
import (
"context"
- "encoding/csv"
"strings"
+ "time"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/rs/zerolog/log"
)
func (a *app) SetupDemo() {
- csvText := `Import Ref,Location,Labels,Quantity,Name,Description,Insured,Serial Number,Model Number,Manufacturer,Notes,Purchase From,Purchased Price,Purchased Time,Lifetime Warranty,Warranty Expires,Warranty Details,Sold To,Sold Price,Sold Time,Sold Notes
+ csvText := `HB.import_ref,HB.location,HB.labels,HB.quantity,HB.name,HB.description,HB.insured,HB.serial_number,HB.model_number,HB.manufacturer,HB.notes,HB.purchase_from,HB.purchase_price,HB.purchase_time,HB.lifetime_warranty,HB.warranty_expires,HB.warranty_details,HB.sold_to,HB.sold_price,HB.sold_time,HB.sold_notes
,Garage,IOT;Home Assistant; Z-Wave,1,Zooz Universal Relay ZEN17,"Zooz 700 Series Z-Wave Universal Relay ZEN17 for Awnings, Garage Doors, Sprinklers, and More | 2 NO-C-NC Relays (20A, 10A) | Signal Repeater | Hub Required (Compatible with SmartThings and Hubitat)",,,ZEN17,Zooz,,Amazon,39.95,10/13/2021,,,,,,,
,Living Room,IOT;Home Assistant; Z-Wave,1,Zooz Motion Sensor,"Zooz Z-Wave Plus S2 Motion Sensor ZSE18 with Magnetic Mount, Works with Vera and SmartThings",,,ZSE18,Zooz,,Amazon,29.95,10/15/2021,,,,,,,
,Office,IOT;Home Assistant; Z-Wave,1,Zooz 110v Power Switch,"Zooz Z-Wave Plus Power Switch ZEN15 for 110V AC Units, Sump Pumps, Humidifiers, and More",,,ZEN15,Zooz,,Amazon,39.95,10/13/2021,,,,,,,
,Downstairs,IOT;Home Assistant; Z-Wave,1,Ecolink Z-Wave PIR Motion Sensor,"Ecolink Z-Wave PIR Motion Detector Pet Immune, White (PIRZWAVE2.5-ECO)",,,PIRZWAVE2.5-ECO,Ecolink,,Amazon,35.58,10/21/2020,,,,,,,
,Entry,IOT;Home Assistant; Z-Wave,1,Yale Security Touchscreen Deadbolt,"Yale Security YRD226-ZW2-619 YRD226ZW2619 Touchscreen Deadbolt, Satin Nickel",,,YRD226ZW2619,Yale,,Amazon,120.39,10/14/2020,,,,,,,
-,Kitchen,IOT;Home Assistant; Z-Wave,1,Smart Rocker Light Dimmer,"UltraPro Z-Wave Smart Rocker Light Dimmer with QuickFit and SimpleWire, 3-Way Ready, Compatible with Alexa, Google Assistant, ZWave Hub Required, Repeater/Range Extender, White Paddle Only, 39351",,,39351,Honeywell,,Amazon,65.98,09/30/0202,,,,,,,
+,Kitchen,IOT;Home Assistant; Z-Wave,1,Smart Rocker Light Dimmer,"UltraPro Z-Wave Smart Rocker Light Dimmer with QuickFit and SimpleWire, 3-Way Ready, Compatible with Alexa, Google Assistant, ZWave Hub Required, Repeater/Range Extender, White Paddle Only, 39351",,,39351,Honeywell,,Amazon,65.98,09/30/0202,,,,,,,
`
- var (
- registration = services.UserRegistration{
- Email: "demo@example.com",
- Name: "Demo",
- Password: "demo",
- }
- )
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ registration := services.UserRegistration{
+ Email: "demo@example.com",
+ Name: "Demo",
+ Password: "demo",
+ }
// First check if we've already setup a demo user and skip if so
- _, err := a.services.User.Login(context.Background(), registration.Email, registration.Password)
+ log.Debug().Msg("Checking if demo user already exists")
+ _, err := a.services.User.Login(ctx, registration.Email, registration.Password, false)
if err == nil {
+ log.Info().Msg("Demo user already exists, skipping setup")
return
}
- _, err = a.services.User.RegisterUser(context.Background(), registration)
+ log.Debug().Msg("Demo user does not exist, setting up demo")
+ _, err = a.services.User.RegisterUser(ctx, registration)
if err != nil {
log.Err(err).Msg("Failed to register demo user")
log.Fatal().Msg("Failed to setup demo")
}
- token, _ := a.services.User.Login(context.Background(), registration.Email, registration.Password)
- self, _ := a.services.User.GetSelf(context.Background(), token.Raw)
-
- // Read CSV Text
- reader := csv.NewReader(strings.NewReader(csvText))
- reader.Comma = ','
-
- records, err := reader.ReadAll()
+ token, err := a.services.User.Login(ctx, registration.Email, registration.Password, false)
if err != nil {
- log.Err(err).Msg("Failed to read CSV")
+ log.Err(err).Msg("Failed to login demo user")
log.Fatal().Msg("Failed to setup demo")
+ return
+ }
+ self, err := a.services.User.GetSelf(ctx, token.Raw)
+ if err != nil {
+ log.Err(err).Msg("Failed to get self")
+ log.Fatal().Msg("Failed to setup demo")
+ return
}
- _, err = a.services.Items.CsvImport(context.Background(), self.GroupID, records)
+ _, err = a.services.Items.CsvImport(ctx, self.GroupID, strings.NewReader(csvText))
if err != nil {
log.Err(err).Msg("Failed to import CSV")
log.Fatal().Msg("Failed to setup demo")
diff --git a/backend/app/api/handlers/debughandlers/debug.go b/backend/app/api/handlers/debughandlers/debug.go
index ffba624..5f66fed 100644
--- a/backend/app/api/handlers/debughandlers/debug.go
+++ b/backend/app/api/handlers/debughandlers/debug.go
@@ -1,3 +1,4 @@
+// Package debughandlers provides handlers for debugging.
package debughandlers
import (
diff --git a/backend/app/api/handlers/v1/assets/QRIcon.png b/backend/app/api/handlers/v1/assets/QRIcon.png
new file mode 100644
index 0000000..016ceaf
Binary files /dev/null and b/backend/app/api/handlers/v1/assets/QRIcon.png differ
diff --git a/backend/app/api/handlers/v1/controller.go b/backend/app/api/handlers/v1/controller.go
index 4f7a73e..eb60212 100644
--- a/backend/app/api/handlers/v1/controller.go
+++ b/backend/app/api/handlers/v1/controller.go
@@ -1,13 +1,38 @@
+// Package v1 provides the API handlers for version 1 of the API.
package v1
import (
+ "encoding/json"
"net/http"
+ "time"
+ "github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/core/services"
+ "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
"github.com/hay-kot/homebox/backend/internal/data/repo"
- "github.com/hay-kot/homebox/backend/pkgs/server"
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/server"
+ "github.com/rs/zerolog/log"
+
+ "github.com/olahol/melody"
)
+type Results[T any] struct {
+ Items []T `json:"items"`
+}
+
+func WrapResults[T any](items []T) Results[T] {
+ return Results[T]{Items: items}
+}
+
+type Wrapped struct {
+ Item interface{} `json:"item"`
+}
+
+func Wrap(v any) Wrapped {
+ return Wrapped{Item: v}
+}
+
func WithMaxUploadSize(maxUploadSize int64) func(*V1Controller) {
return func(ctrl *V1Controller) {
ctrl.maxUploadSize = maxUploadSize
@@ -26,12 +51,20 @@ func WithRegistration(allowRegistration bool) func(*V1Controller) {
}
}
+func WithSecureCookies(secure bool) func(*V1Controller) {
+ return func(ctrl *V1Controller) {
+ ctrl.cookieSecure = secure
+ }
+}
+
type V1Controller struct {
+ cookieSecure bool
repo *repo.AllRepos
svc *services.AllServices
maxUploadSize int64
isDemo bool
allowRegistration bool
+ bus *eventbus.EventBus
}
type (
@@ -43,27 +76,29 @@ type (
BuildTime string `json:"buildTime"`
}
- ApiSummary struct {
- Healthy bool `json:"health"`
- Versions []string `json:"versions"`
- Title string `json:"title"`
- Message string `json:"message"`
- Build Build `json:"build"`
- Demo bool `json:"demo"`
+ APISummary struct {
+ Healthy bool `json:"health"`
+ Versions []string `json:"versions"`
+ Title string `json:"title"`
+ Message string `json:"message"`
+ Build Build `json:"build"`
+ Demo bool `json:"demo"`
+ AllowRegistration bool `json:"allowRegistration"`
}
)
-func BaseUrlFunc(prefix string) func(s string) string {
+func BaseURLFunc(prefix string) func(s string) string {
return func(s string) string {
return prefix + "/v1" + s
}
}
-func NewControllerV1(svc *services.AllServices, repos *repo.AllRepos, options ...func(*V1Controller)) *V1Controller {
+func NewControllerV1(svc *services.AllServices, repos *repo.AllRepos, bus *eventbus.EventBus, options ...func(*V1Controller)) *V1Controller {
ctrl := &V1Controller{
repo: repos,
svc: svc,
allowRegistration: true,
+ bus: bus,
}
for _, opt := range options {
@@ -74,19 +109,105 @@ func NewControllerV1(svc *services.AllServices, repos *repo.AllRepos, options ..
}
// HandleBase godoc
-// @Summary Retrieves the basic information about the API
-// @Tags Base
-// @Produce json
-// @Success 200 {object} ApiSummary
-// @Router /v1/status [GET]
-func (ctrl *V1Controller) HandleBase(ready ReadyFunc, build Build) server.HandlerFunc {
+//
+// @Summary Application Info
+// @Tags Base
+// @Produce json
+// @Success 200 {object} APISummary
+// @Router /v1/status [GET]
+func (ctrl *V1Controller) HandleBase(ready ReadyFunc, build Build) errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
- return server.Respond(w, http.StatusOK, ApiSummary{
- Healthy: ready(),
- Title: "Go API Template",
- Message: "Welcome to the Go API Template Application!",
- Build: build,
- Demo: ctrl.isDemo,
+ return server.JSON(w, http.StatusOK, APISummary{
+ Healthy: ready(),
+ Title: "Homebox",
+ Message: "Track, Manage, and Organize your Things",
+ Build: build,
+ Demo: ctrl.isDemo,
+ AllowRegistration: ctrl.allowRegistration,
})
}
}
+
+// HandleCurrency godoc
+//
+// @Summary Currency
+// @Tags Base
+// @Produce json
+// @Success 200 {object} currencies.Currency
+// @Router /v1/currency [GET]
+func (ctrl *V1Controller) HandleCurrency() errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ // Set Cache for 10 Minutes
+ w.Header().Set("Cache-Control", "max-age=600")
+
+ return server.JSON(w, http.StatusOK, ctrl.svc.Currencies.Slice())
+ }
+}
+
+func (ctrl *V1Controller) HandleCacheWS() errchain.HandlerFunc {
+ type eventMsg struct {
+ Event string `json:"event"`
+ }
+
+ m := melody.New()
+
+ m.HandleConnect(func(s *melody.Session) {
+ auth := services.NewContext(s.Request.Context())
+ s.Set("gid", auth.GID)
+ })
+
+ factory := func(e string) func(data any) {
+ return func(data any) {
+ eventData, ok := data.(eventbus.GroupMutationEvent)
+ if !ok {
+ log.Log().Msgf("invalid event data: %v", data)
+ return
+ }
+
+ msg := &eventMsg{Event: e}
+
+ jsonBytes, err := json.Marshal(msg)
+ if err != nil {
+ log.Log().Msgf("error marshling event data %v: %v", data, err)
+ return
+ }
+
+ _ = m.BroadcastFilter(jsonBytes, func(s *melody.Session) bool {
+ groupIDStr, ok := s.Get("gid")
+ if !ok {
+ return false
+ }
+
+ GID := groupIDStr.(uuid.UUID)
+ return GID == eventData.GID
+ })
+ }
+ }
+
+ ctrl.bus.Subscribe(eventbus.EventLabelMutation, factory("label.mutation"))
+ ctrl.bus.Subscribe(eventbus.EventLocationMutation, factory("location.mutation"))
+ ctrl.bus.Subscribe(eventbus.EventItemMutation, factory("item.mutation"))
+
+ // Persistent asynchronous ticker that keeps all websocket connections alive with periodic pings.
+ go func() {
+ const interval = 10 * time.Second
+
+ ping := time.NewTicker(interval)
+ defer ping.Stop()
+
+ for range ping.C {
+ msg := &eventMsg{Event: "ping"}
+
+ pingBytes, err := json.Marshal(msg)
+ if err != nil {
+ log.Log().Msgf("error marshaling ping: %v", err)
+ } else {
+ _ = m.Broadcast(pingBytes)
+ }
+ }
+ }()
+
+ return func(w http.ResponseWriter, r *http.Request) error {
+ return m.HandleRequest(w, r)
+ }
+}
diff --git a/backend/app/api/handlers/v1/partials.go b/backend/app/api/handlers/v1/partials.go
index 763805f..5c81ad5 100644
--- a/backend/app/api/handlers/v1/partials.go
+++ b/backend/app/api/handlers/v1/partials.go
@@ -21,7 +21,7 @@ func (ctrl *V1Controller) routeID(r *http.Request) (uuid.UUID, error) {
func (ctrl *V1Controller) routeUUID(r *http.Request, key string) (uuid.UUID, error) {
ID, err := uuid.Parse(chi.URLParam(r, key))
if err != nil {
- return uuid.Nil, validate.NewInvalidRouteKeyError(key)
+ return uuid.Nil, validate.NewRouteKeyError(key)
}
return ID, nil
}
diff --git a/backend/app/api/handlers/v1/query_params.go b/backend/app/api/handlers/v1/query_params.go
new file mode 100644
index 0000000..0ac84d8
--- /dev/null
+++ b/backend/app/api/handlers/v1/query_params.go
@@ -0,0 +1,36 @@
+package v1
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/google/uuid"
+)
+
+func queryUUIDList(params url.Values, key string) []uuid.UUID {
+ var ids []uuid.UUID
+ for _, id := range params[key] {
+ uid, err := uuid.Parse(id)
+ if err != nil {
+ continue
+ }
+ ids = append(ids, uid)
+ }
+ return ids
+}
+
+func queryIntOrNegativeOne(s string) int {
+ i, err := strconv.Atoi(s)
+ if err != nil {
+ return -1
+ }
+ return i
+}
+
+func queryBool(s string) bool {
+ b, err := strconv.ParseBool(s)
+ if err != nil {
+ return false
+ }
+ return b
+}
diff --git a/backend/app/api/handlers/v1/v1_ctrl_actions.go b/backend/app/api/handlers/v1/v1_ctrl_actions.go
new file mode 100644
index 0000000..75f39a5
--- /dev/null
+++ b/backend/app/api/handlers/v1/v1_ctrl_actions.go
@@ -0,0 +1,83 @@
+package v1
+
+import (
+ "context"
+ "net/http"
+
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/core/services"
+ "github.com/hay-kot/homebox/backend/internal/sys/validate"
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/server"
+ "github.com/rs/zerolog/log"
+)
+
+type ActionAmountResult struct {
+ Completed int `json:"completed"`
+}
+
+func actionHandlerFactory(ref string, fn func(context.Context, uuid.UUID) (int, error)) errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ ctx := services.NewContext(r.Context())
+
+ totalCompleted, err := fn(ctx, ctx.GID)
+ if err != nil {
+ log.Err(err).Str("action_ref", ref).Msg("failed to run action")
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+
+ return server.JSON(w, http.StatusOK, ActionAmountResult{Completed: totalCompleted})
+ }
+}
+
+// HandleEnsureAssetID godoc
+//
+// @Summary Ensure Asset IDs
+// @Description Ensures all items in the database have an asset ID
+// @Tags Actions
+// @Produce json
+// @Success 200 {object} ActionAmountResult
+// @Router /v1/actions/ensure-asset-ids [Post]
+// @Security Bearer
+func (ctrl *V1Controller) HandleEnsureAssetID() errchain.HandlerFunc {
+ return actionHandlerFactory("ensure asset IDs", ctrl.svc.Items.EnsureAssetID)
+}
+
+// HandleEnsureImportRefs godoc
+//
+// @Summary Ensures Import Refs
+// @Description Ensures all items in the database have an import ref
+// @Tags Actions
+// @Produce json
+// @Success 200 {object} ActionAmountResult
+// @Router /v1/actions/ensure-import-refs [Post]
+// @Security Bearer
+func (ctrl *V1Controller) HandleEnsureImportRefs() errchain.HandlerFunc {
+ return actionHandlerFactory("ensure import refs", ctrl.svc.Items.EnsureImportRef)
+}
+
+// HandleItemDateZeroOut godoc
+//
+// @Summary Zero Out Time Fields
+// @Description Resets all item date fields to the beginning of the day
+// @Tags Actions
+// @Produce json
+// @Success 200 {object} ActionAmountResult
+// @Router /v1/actions/zero-item-time-fields [Post]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemDateZeroOut() errchain.HandlerFunc {
+ return actionHandlerFactory("zero out date time", ctrl.repo.Items.ZeroOutTimeFields)
+}
+
+// HandleSetPrimaryPhotos godoc
+//
+// @Summary Set Primary Photos
+// @Description Sets the first photo of each item as the primary photo
+// @Tags Actions
+// @Produce json
+// @Success 200 {object} ActionAmountResult
+// @Router /v1/actions/set-primary-photos [Post]
+// @Security Bearer
+func (ctrl *V1Controller) HandleSetPrimaryPhotos() errchain.HandlerFunc {
+ return actionHandlerFactory("ensure asset IDs", ctrl.repo.Items.SetPrimaryPhotos)
+}
diff --git a/backend/app/api/handlers/v1/v1_ctrl_assets.go b/backend/app/api/handlers/v1/v1_ctrl_assets.go
new file mode 100644
index 0000000..91e9a3c
--- /dev/null
+++ b/backend/app/api/handlers/v1/v1_ctrl_assets.go
@@ -0,0 +1,62 @@
+package v1
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+
+ "github.com/go-chi/chi/v5"
+ "github.com/hay-kot/homebox/backend/internal/core/services"
+ "github.com/hay-kot/homebox/backend/internal/data/repo"
+ "github.com/hay-kot/homebox/backend/internal/sys/validate"
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/server"
+
+ "github.com/rs/zerolog/log"
+)
+
+// HandleAssetGet godocs
+//
+// @Summary Get Item by Asset ID
+// @Tags Items
+// @Produce json
+// @Param id path string true "Asset ID"
+// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{}
+// @Router /v1/assets/{id} [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleAssetGet() errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ ctx := services.NewContext(r.Context())
+ assetIDParam := chi.URLParam(r, "id")
+ assetIDParam = strings.ReplaceAll(assetIDParam, "-", "") // Remove dashes
+ // Convert the asset ID to an int64
+ assetID, err := strconv.ParseInt(assetIDParam, 10, 64)
+ if err != nil {
+ return err
+ }
+ pageParam := r.URL.Query().Get("page")
+ var page int64 = -1
+ if pageParam != "" {
+ page, err = strconv.ParseInt(pageParam, 10, 64)
+ if err != nil {
+ return server.JSON(w, http.StatusBadRequest, "Invalid page number")
+ }
+ }
+
+ pageSizeParam := r.URL.Query().Get("pageSize")
+ var pageSize int64 = -1
+ if pageSizeParam != "" {
+ pageSize, err = strconv.ParseInt(pageSizeParam, 10, 64)
+ if err != nil {
+ return server.JSON(w, http.StatusBadRequest, "Invalid page size")
+ }
+ }
+
+ items, err := ctrl.repo.Items.QueryByAssetID(r.Context(), ctx.GID, repo.AssetID(assetID), int(page), int(pageSize))
+ if err != nil {
+ log.Err(err).Msg("failed to get item")
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+ return server.JSON(w, http.StatusOK, items)
+ }
+}
diff --git a/backend/app/api/handlers/v1/v1_ctrl_auth.go b/backend/app/api/handlers/v1/v1_ctrl_auth.go
index b005a9d..47b69fd 100644
--- a/backend/app/api/handlers/v1/v1_ctrl_auth.go
+++ b/backend/app/api/handlers/v1/v1_ctrl_auth.go
@@ -3,92 +3,140 @@ package v1
import (
"errors"
"net/http"
+ "strconv"
+ "strings"
"time"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
- "github.com/hay-kot/homebox/backend/pkgs/server"
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/server"
"github.com/rs/zerolog/log"
)
+const (
+ cookieNameToken = "hb.auth.token"
+ cookieNameRemember = "hb.auth.remember"
+ cookieNameSession = "hb.auth.session"
+)
+
type (
TokenResponse struct {
- Token string `json:"token"`
- ExpiresAt time.Time `json:"expiresAt"`
+ Token string `json:"token"`
+ ExpiresAt time.Time `json:"expiresAt"`
+ AttachmentToken string `json:"attachmentToken"`
}
LoginForm struct {
- Username string `json:"username"`
- Password string `json:"password"`
+ Username string `json:"username"`
+ Password string `json:"password"`
+ StayLoggedIn bool `json:"stayLoggedIn"`
}
)
+type CookieContents struct {
+ Token string
+ ExpiresAt time.Time
+ Remember bool
+}
+
+func GetCookies(r *http.Request) (*CookieContents, error) {
+ cookie, err := r.Cookie(cookieNameToken)
+ if err != nil {
+ return nil, errors.New("authorization cookie is required")
+ }
+
+ rememberCookie, err := r.Cookie(cookieNameRemember)
+ if err != nil {
+ return nil, errors.New("remember cookie is required")
+ }
+
+ return &CookieContents{
+ Token: cookie.Value,
+ ExpiresAt: cookie.Expires,
+ Remember: rememberCookie.Value == "true",
+ }, nil
+}
+
+// AuthProvider is an interface that can be implemented by any authentication provider.
+// to extend authentication methods for the API.
+type AuthProvider interface {
+ // Name returns the name of the authentication provider. This should be a unique name.
+ // that is URL friendly.
+ //
+ // Example: "local", "ldap"
+ Name() string
+ // Authenticate is called when a user attempts to login to the API. The implementation
+ // should return an error if the user cannot be authenticated. If an error is returned
+ // the API controller will return a vague error message to the user.
+ //
+ // Authenticate should do the following:
+ //
+ // 1. Ensure that the user exists within the database (either create, or get)
+ // 2. On successful authentication, they must set the user cookies.
+ Authenticate(w http.ResponseWriter, r *http.Request) (services.UserAuthTokenDetail, error)
+}
+
// HandleAuthLogin godoc
-// @Summary User Login
-// @Tags Authentication
-// @Accept x-www-form-urlencoded
-// @Accept application/json
-// @Param username formData string false "string" example(admin@admin.com)
-// @Param password formData string false "string" example(admin)
-// @Produce json
-// @Success 200 {object} TokenResponse
-// @Router /v1/users/login [POST]
-func (ctrl *V1Controller) HandleAuthLogin() server.HandlerFunc {
+//
+// @Summary User Login
+// @Tags Authentication
+// @Accept x-www-form-urlencoded
+// @Accept application/json
+// @Param username formData string false "string" example(admin@admin.com)
+// @Param password formData string false "string" example(admin)
+// @Param payload body LoginForm true "Login Data"
+// @Param provider query string false "auth provider"
+// @Produce json
+// @Success 200 {object} TokenResponse
+// @Router /v1/users/login [POST]
+func (ctrl *V1Controller) HandleAuthLogin(ps ...AuthProvider) errchain.HandlerFunc {
+ if len(ps) == 0 {
+ panic("no auth providers provided")
+ }
+
+ providers := make(map[string]AuthProvider)
+ for _, p := range ps {
+ log.Info().Str("name", p.Name()).Msg("registering auth provider")
+ providers[p.Name()] = p
+ }
+
return func(w http.ResponseWriter, r *http.Request) error {
- loginForm := &LoginForm{}
-
- switch r.Header.Get("Content-Type") {
- case server.ContentFormUrlEncoded:
- err := r.ParseForm()
- if err != nil {
- return server.Respond(w, http.StatusBadRequest, server.Wrap(err))
- }
-
- loginForm.Username = r.PostFormValue("username")
- loginForm.Password = r.PostFormValue("password")
- case server.ContentJSON:
- err := server.Decode(r, loginForm)
-
- if err != nil {
- log.Err(err).Msg("failed to decode login form")
- }
- default:
- return server.Respond(w, http.StatusBadRequest, errors.New("invalid content type"))
+ // Extract provider query
+ provider := r.URL.Query().Get("provider")
+ if provider == "" {
+ provider = "local"
}
- if loginForm.Username == "" || loginForm.Password == "" {
- return validate.NewFieldErrors(
- validate.FieldError{
- Field: "username",
- Error: "username or password is empty",
- },
- validate.FieldError{
- Field: "password",
- Error: "username or password is empty",
- },
- )
+ // Get the provider
+ p, ok := providers[provider]
+ if !ok {
+ return validate.NewRequestError(errors.New("invalid auth provider"), http.StatusBadRequest)
}
- newToken, err := ctrl.svc.User.Login(r.Context(), loginForm.Username, loginForm.Password)
-
+ newToken, err := p.Authenticate(w, r)
if err != nil {
- return validate.NewRequestError(errors.New("authentication failed"), http.StatusInternalServerError)
+ log.Err(err).Msg("failed to authenticate")
+ return server.JSON(w, http.StatusInternalServerError, err.Error())
}
- return server.Respond(w, http.StatusOK, TokenResponse{
- Token: "Bearer " + newToken.Raw,
- ExpiresAt: newToken.ExpiresAt,
+ ctrl.setCookies(w, noPort(r.Host), newToken.Raw, newToken.ExpiresAt, true)
+ return server.JSON(w, http.StatusOK, TokenResponse{
+ Token: "Bearer " + newToken.Raw,
+ ExpiresAt: newToken.ExpiresAt,
+ AttachmentToken: newToken.AttachmentToken,
})
}
}
// HandleAuthLogout godoc
-// @Summary User Logout
-// @Tags Authentication
-// @Success 204
-// @Router /v1/users/logout [POST]
-// @Security Bearer
-func (ctrl *V1Controller) HandleAuthLogout() server.HandlerFunc {
+//
+// @Summary User Logout
+// @Tags Authentication
+// @Success 204
+// @Router /v1/users/logout [POST]
+// @Security Bearer
+func (ctrl *V1Controller) HandleAuthLogout() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
token := services.UseTokenCtx(r.Context())
if token == "" {
@@ -100,19 +148,21 @@ func (ctrl *V1Controller) HandleAuthLogout() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
- return server.Respond(w, http.StatusNoContent, nil)
+ ctrl.unsetCookies(w, noPort(r.Host))
+ return server.JSON(w, http.StatusNoContent, nil)
}
}
-// HandleAuthLogout godoc
-// @Summary User Token Refresh
-// @Description handleAuthRefresh returns a handler that will issue a new token from an existing token.
-// @Description This does not validate that the user still exists within the database.
-// @Tags Authentication
-// @Success 200
-// @Router /v1/users/refresh [GET]
-// @Security Bearer
-func (ctrl *V1Controller) HandleAuthRefresh() server.HandlerFunc {
+// HandleAuthRefresh godoc
+//
+// @Summary User Token Refresh
+// @Description handleAuthRefresh returns a handler that will issue a new token from an existing token.
+// @Description This does not validate that the user still exists within the database.
+// @Tags Authentication
+// @Success 200
+// @Router /v1/users/refresh [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleAuthRefresh() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
requestToken := services.UseTokenCtx(r.Context())
if requestToken == "" {
@@ -124,6 +174,78 @@ func (ctrl *V1Controller) HandleAuthRefresh() server.HandlerFunc {
return validate.NewUnauthorizedError()
}
- return server.Respond(w, http.StatusOK, newToken)
+ ctrl.setCookies(w, noPort(r.Host), newToken.Raw, newToken.ExpiresAt, false)
+ return server.JSON(w, http.StatusOK, newToken)
}
}
+
+func noPort(host string) string {
+ return strings.Split(host, ":")[0]
+}
+
+func (ctrl *V1Controller) setCookies(w http.ResponseWriter, domain, token string, expires time.Time, remember bool) {
+ http.SetCookie(w, &http.Cookie{
+ Name: cookieNameRemember,
+ Value: strconv.FormatBool(remember),
+ Expires: expires,
+ Domain: domain,
+ Secure: ctrl.cookieSecure,
+ HttpOnly: true,
+ Path: "/",
+ })
+
+ // Set HTTP only cookie
+ http.SetCookie(w, &http.Cookie{
+ Name: cookieNameToken,
+ Value: token,
+ Expires: expires,
+ Domain: domain,
+ Secure: ctrl.cookieSecure,
+ HttpOnly: true,
+ Path: "/",
+ })
+
+ // Set Fake Session cookie
+ http.SetCookie(w, &http.Cookie{
+ Name: cookieNameSession,
+ Value: "true",
+ Expires: expires,
+ Domain: domain,
+ Secure: ctrl.cookieSecure,
+ HttpOnly: false,
+ Path: "/",
+ })
+}
+
+func (ctrl *V1Controller) unsetCookies(w http.ResponseWriter, domain string) {
+ http.SetCookie(w, &http.Cookie{
+ Name: cookieNameToken,
+ Value: "",
+ Expires: time.Unix(0, 0),
+ Domain: domain,
+ Secure: ctrl.cookieSecure,
+ HttpOnly: true,
+ Path: "/",
+ })
+
+ http.SetCookie(w, &http.Cookie{
+ Name: cookieNameRemember,
+ Value: "false",
+ Expires: time.Unix(0, 0),
+ Domain: domain,
+ Secure: ctrl.cookieSecure,
+ HttpOnly: true,
+ Path: "/",
+ })
+
+ // Set Fake Session cookie
+ http.SetCookie(w, &http.Cookie{
+ Name: cookieNameSession,
+ Value: "false",
+ Expires: time.Unix(0, 0),
+ Domain: domain,
+ Secure: ctrl.cookieSecure,
+ HttpOnly: false,
+ Path: "/",
+ })
+}
diff --git a/backend/app/api/handlers/v1/v1_ctrl_group.go b/backend/app/api/handlers/v1/v1_ctrl_group.go
index b27622d..69bc024 100644
--- a/backend/app/api/handlers/v1/v1_ctrl_group.go
+++ b/backend/app/api/handlers/v1/v1_ctrl_group.go
@@ -7,13 +7,13 @@ import (
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
- "github.com/hay-kot/homebox/backend/pkgs/server"
- "github.com/rs/zerolog/log"
+ "github.com/hay-kot/homebox/backend/internal/web/adapters"
+ "github.com/hay-kot/httpkit/errchain"
)
type (
GroupInvitationCreate struct {
- Uses int `json:"uses"`
+ Uses int `json:"uses" validate:"required,min=1,max=100"`
ExpiresAt time.Time `json:"expiresAt"`
}
@@ -25,113 +25,73 @@ type (
)
// HandleGroupGet godoc
-// @Summary Get the current user's group
-// @Tags Group
-// @Produce json
-// @Success 200 {object} repo.GroupStatistics
-// @Router /v1/groups/statistics [Get]
-// @Security Bearer
-func (ctrl *V1Controller) HandleGroupStatistics() server.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- ctx := services.NewContext(r.Context())
-
- stats, err := ctrl.repo.Groups.GroupStatistics(ctx, ctx.GID)
- if err != nil {
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- return server.Respond(w, http.StatusOK, stats)
+//
+// @Summary Get Group
+// @Tags Group
+// @Produce json
+// @Success 200 {object} repo.Group
+// @Router /v1/groups [Get]
+// @Security Bearer
+func (ctrl *V1Controller) HandleGroupGet() errchain.HandlerFunc {
+ fn := func(r *http.Request) (repo.Group, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Groups.GroupByID(auth, auth.GID)
}
-}
-// HandleGroupGet godoc
-// @Summary Get the current user's group
-// @Tags Group
-// @Produce json
-// @Success 200 {object} repo.Group
-// @Router /v1/groups [Get]
-// @Security Bearer
-func (ctrl *V1Controller) HandleGroupGet() server.HandlerFunc {
- return ctrl.handleGroupGeneral()
+ return adapters.Command(fn, http.StatusOK)
}
// HandleGroupUpdate godoc
-// @Summary Updates some fields of the current users group
-// @Tags Group
-// @Produce json
-// @Param payload body repo.GroupUpdate true "User Data"
-// @Success 200 {object} repo.Group
-// @Router /v1/groups [Put]
-// @Security Bearer
-func (ctrl *V1Controller) HandleGroupUpdate() server.HandlerFunc {
- return ctrl.handleGroupGeneral()
-}
+//
+// @Summary Update Group
+// @Tags Group
+// @Produce json
+// @Param payload body repo.GroupUpdate true "User Data"
+// @Success 200 {object} repo.Group
+// @Router /v1/groups [Put]
+// @Security Bearer
+func (ctrl *V1Controller) HandleGroupUpdate() errchain.HandlerFunc {
+ fn := func(r *http.Request, body repo.GroupUpdate) (repo.Group, error) {
+ auth := services.NewContext(r.Context())
-func (ctrl *V1Controller) handleGroupGeneral() server.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- ctx := services.NewContext(r.Context())
-
- switch r.Method {
- case http.MethodGet:
- group, err := ctrl.repo.Groups.GroupByID(ctx, ctx.GID)
- if err != nil {
- log.Err(err).Msg("failed to get group")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- return server.Respond(w, http.StatusOK, group)
-
- case http.MethodPut:
- data := repo.GroupUpdate{}
- if err := server.Decode(r, &data); err != nil {
- return validate.NewRequestError(err, http.StatusBadRequest)
- }
-
- group, err := ctrl.svc.Group.UpdateGroup(ctx, data)
- if err != nil {
- log.Err(err).Msg("failed to update group")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- return server.Respond(w, http.StatusOK, group)
+ ok := ctrl.svc.Currencies.IsSupported(body.Currency)
+ if !ok {
+ return repo.Group{}, validate.NewFieldErrors(
+ validate.NewFieldError("currency", "currency '"+body.Currency+"' is not supported"),
+ )
}
- return nil
+ return ctrl.svc.Group.UpdateGroup(auth, body)
}
+
+ return adapters.Action(fn, http.StatusOK)
}
// HandleGroupInvitationsCreate godoc
-// @Summary Get the current user
-// @Tags Group
-// @Produce json
-// @Param payload body GroupInvitationCreate true "User Data"
-// @Success 200 {object} GroupInvitation
-// @Router /v1/groups/invitations [Post]
-// @Security Bearer
-func (ctrl *V1Controller) HandleGroupInvitationsCreate() server.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- data := GroupInvitationCreate{}
- if err := server.Decode(r, &data); err != nil {
- log.Err(err).Msg("failed to decode user registration data")
- return validate.NewRequestError(err, http.StatusBadRequest)
+//
+// @Summary Create Group Invitation
+// @Tags Group
+// @Produce json
+// @Param payload body GroupInvitationCreate true "User Data"
+// @Success 200 {object} GroupInvitation
+// @Router /v1/groups/invitations [Post]
+// @Security Bearer
+func (ctrl *V1Controller) HandleGroupInvitationsCreate() errchain.HandlerFunc {
+ fn := func(r *http.Request, body GroupInvitationCreate) (GroupInvitation, error) {
+ if body.ExpiresAt.IsZero() {
+ body.ExpiresAt = time.Now().Add(time.Hour * 24)
}
- if data.ExpiresAt.IsZero() {
- data.ExpiresAt = time.Now().Add(time.Hour * 24)
- }
+ auth := services.NewContext(r.Context())
- ctx := services.NewContext(r.Context())
+ token, err := ctrl.svc.Group.NewInvitation(auth, body.Uses, body.ExpiresAt)
- token, err := ctrl.svc.Group.NewInvitation(ctx, data.Uses, data.ExpiresAt)
- if err != nil {
- log.Err(err).Msg("failed to create new token")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- return server.Respond(w, http.StatusCreated, GroupInvitation{
+ return GroupInvitation{
Token: token,
- ExpiresAt: data.ExpiresAt,
- Uses: data.Uses,
- })
+ ExpiresAt: body.ExpiresAt,
+ Uses: body.Uses,
+ }, err
}
+
+ return adapters.Action(fn, http.StatusCreated)
}
diff --git a/backend/app/api/handlers/v1/v1_ctrl_items.go b/backend/app/api/handlers/v1/v1_ctrl_items.go
index 732600f..6a25663 100644
--- a/backend/app/api/handlers/v1/v1_ctrl_items.go
+++ b/backend/app/api/handlers/v1/v1_ctrl_items.go
@@ -1,201 +1,293 @@
package v1
import (
+ "database/sql"
"encoding/csv"
+ "errors"
"net/http"
- "net/url"
- "strconv"
+ "strings"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
- "github.com/hay-kot/homebox/backend/pkgs/server"
+ "github.com/hay-kot/homebox/backend/internal/web/adapters"
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/server"
"github.com/rs/zerolog/log"
)
// HandleItemsGetAll godoc
-// @Summary Get All Items
-// @Tags Items
-// @Produce json
-// @Param q query string false "search string"
-// @Param page query int false "page number"
-// @Param pageSize query int false "items per page"
-// @Param labels query []string false "label Ids" collectionFormat(multi)
-// @Param locations query []string false "location Ids" collectionFormat(multi)
-// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{}
-// @Router /v1/items [GET]
-// @Security Bearer
-func (ctrl *V1Controller) HandleItemsGetAll() server.HandlerFunc {
- uuidList := func(params url.Values, key string) []uuid.UUID {
- var ids []uuid.UUID
- for _, id := range params[key] {
- uid, err := uuid.Parse(id)
- if err != nil {
- continue
- }
- ids = append(ids, uid)
- }
- return ids
- }
-
- intOrNegativeOne := func(s string) int {
- i, err := strconv.Atoi(s)
- if err != nil {
- return -1
- }
- return i
- }
-
- getBool := func(s string) bool {
- b, err := strconv.ParseBool(s)
- if err != nil {
- return false
- }
- return b
- }
-
+//
+// @Summary Query All Items
+// @Tags Items
+// @Produce json
+// @Param q query string false "search string"
+// @Param page query int false "page number"
+// @Param pageSize query int false "items per page"
+// @Param labels query []string false "label Ids" collectionFormat(multi)
+// @Param locations query []string false "location Ids" collectionFormat(multi)
+// @Param parentIds query []string false "parent Ids" collectionFormat(multi)
+// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{}
+// @Router /v1/items [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemsGetAll() errchain.HandlerFunc {
extractQuery := func(r *http.Request) repo.ItemQuery {
params := r.URL.Query()
- return repo.ItemQuery{
- Page: intOrNegativeOne(params.Get("page")),
- PageSize: intOrNegativeOne(params.Get("perPage")),
- Search: params.Get("q"),
- LocationIDs: uuidList(params, "locations"),
- LabelIDs: uuidList(params, "labels"),
- IncludeArchived: getBool(params.Get("includeArchived")),
+ filterFieldItems := func(raw []string) []repo.FieldQuery {
+ var items []repo.FieldQuery
+
+ for _, v := range raw {
+ parts := strings.SplitN(v, "=", 2)
+ if len(parts) == 2 {
+ items = append(items, repo.FieldQuery{
+ Name: parts[0],
+ Value: parts[1],
+ })
+ }
+ }
+
+ return items
}
+
+ v := repo.ItemQuery{
+ Page: queryIntOrNegativeOne(params.Get("page")),
+ PageSize: queryIntOrNegativeOne(params.Get("pageSize")),
+ Search: params.Get("q"),
+ LocationIDs: queryUUIDList(params, "locations"),
+ LabelIDs: queryUUIDList(params, "labels"),
+ ParentItemIDs: queryUUIDList(params, "parentIds"),
+ IncludeArchived: queryBool(params.Get("includeArchived")),
+ Fields: filterFieldItems(params["fields"]),
+ OrderBy: params.Get("orderBy"),
+ }
+
+ if strings.HasPrefix(v.Search, "#") {
+ aidStr := strings.TrimPrefix(v.Search, "#")
+
+ aid, ok := repo.ParseAssetID(aidStr)
+ if ok {
+ v.Search = ""
+ v.AssetID = aid
+ }
+ }
+
+ return v
}
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
+
items, err := ctrl.repo.Items.QueryByGroup(ctx, ctx.GID, extractQuery(r))
if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return server.JSON(w, http.StatusOK, repo.PaginationResult[repo.ItemSummary]{
+ Items: []repo.ItemSummary{},
+ })
+ }
log.Err(err).Msg("failed to get items")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
- return server.Respond(w, http.StatusOK, items)
+ return server.JSON(w, http.StatusOK, items)
}
}
+// HandleItemFullPath godoc
+//
+// @Summary Get the full path of an item
+// @Tags Items
+// @Produce json
+// @Param id path string true "Item ID"
+// @Success 200 {object} []repo.ItemPath
+// @Router /v1/items/{id}/path [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemFullPath() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID) ([]repo.ItemPath, error) {
+ auth := services.NewContext(r.Context())
+ item, err := ctrl.repo.Items.GetOneByGroup(auth, auth.GID, ID)
+ if err != nil {
+ return nil, err
+ }
+
+ paths, err := ctrl.repo.Locations.PathForLoc(auth, auth.GID, item.Location.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ if item.Parent != nil {
+ paths = append(paths, repo.ItemPath{
+ Type: repo.ItemTypeItem,
+ ID: item.Parent.ID,
+ Name: item.Parent.Name,
+ })
+ }
+
+ paths = append(paths, repo.ItemPath{
+ Type: repo.ItemTypeItem,
+ ID: item.ID,
+ Name: item.Name,
+ })
+
+ return paths, nil
+ }
+
+ return adapters.CommandID("id", fn, http.StatusOK)
+}
+
// HandleItemsCreate godoc
-// @Summary Create a new item
-// @Tags Items
-// @Produce json
-// @Param payload body repo.ItemCreate true "Item Data"
-// @Success 200 {object} repo.ItemSummary
-// @Router /v1/items [POST]
-// @Security Bearer
-func (ctrl *V1Controller) HandleItemsCreate() server.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- createData := repo.ItemCreate{}
- if err := server.Decode(r, &createData); err != nil {
- log.Err(err).Msg("failed to decode request body")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- user := services.UseUserCtx(r.Context())
- item, err := ctrl.repo.Items.Create(r.Context(), user.GroupID, createData)
- if err != nil {
- log.Err(err).Msg("failed to create item")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- return server.Respond(w, http.StatusCreated, item)
+//
+// @Summary Create Item
+// @Tags Items
+// @Produce json
+// @Param payload body repo.ItemCreate true "Item Data"
+// @Success 201 {object} repo.ItemSummary
+// @Router /v1/items [POST]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemsCreate() errchain.HandlerFunc {
+ fn := func(r *http.Request, body repo.ItemCreate) (repo.ItemOut, error) {
+ return ctrl.svc.Items.Create(services.NewContext(r.Context()), body)
}
+
+ return adapters.Action(fn, http.StatusCreated)
}
// HandleItemGet godocs
-// @Summary Gets a item and fields
-// @Tags Items
-// @Produce json
-// @Param id path string true "Item ID"
-// @Success 200 {object} repo.ItemOut
-// @Router /v1/items/{id} [GET]
-// @Security Bearer
-func (ctrl *V1Controller) HandleItemGet() server.HandlerFunc {
- return ctrl.handleItemsGeneral()
+//
+// @Summary Get Item
+// @Tags Items
+// @Produce json
+// @Param id path string true "Item ID"
+// @Success 200 {object} repo.ItemOut
+// @Router /v1/items/{id} [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemGet() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID) (repo.ItemOut, error) {
+ auth := services.NewContext(r.Context())
+
+ return ctrl.repo.Items.GetOneByGroup(auth, auth.GID, ID)
+ }
+
+ return adapters.CommandID("id", fn, http.StatusOK)
}
// HandleItemDelete godocs
-// @Summary deletes a item
-// @Tags Items
-// @Produce json
-// @Param id path string true "Item ID"
-// @Success 204
-// @Router /v1/items/{id} [DELETE]
-// @Security Bearer
-func (ctrl *V1Controller) HandleItemDelete() server.HandlerFunc {
- return ctrl.handleItemsGeneral()
+//
+// @Summary Delete Item
+// @Tags Items
+// @Produce json
+// @Param id path string true "Item ID"
+// @Success 204
+// @Router /v1/items/{id} [DELETE]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemDelete() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID) (any, error) {
+ auth := services.NewContext(r.Context())
+ err := ctrl.repo.Items.DeleteByGroup(auth, auth.GID, ID)
+ return nil, err
+ }
+
+ return adapters.CommandID("id", fn, http.StatusNoContent)
}
// HandleItemUpdate godocs
-// @Summary updates a item
-// @Tags Items
-// @Produce json
-// @Param id path string true "Item ID"
-// @Param payload body repo.ItemUpdate true "Item Data"
-// @Success 200 {object} repo.ItemOut
-// @Router /v1/items/{id} [PUT]
-// @Security Bearer
-func (ctrl *V1Controller) HandleItemUpdate() server.HandlerFunc {
- return ctrl.handleItemsGeneral()
+//
+// @Summary Update Item
+// @Tags Items
+// @Produce json
+// @Param id path string true "Item ID"
+// @Param payload body repo.ItemUpdate true "Item Data"
+// @Success 200 {object} repo.ItemOut
+// @Router /v1/items/{id} [PUT]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemUpdate() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID, body repo.ItemUpdate) (repo.ItemOut, error) {
+ auth := services.NewContext(r.Context())
+
+ body.ID = ID
+ return ctrl.repo.Items.UpdateByGroup(auth, auth.GID, body)
+ }
+
+ return adapters.ActionID("id", fn, http.StatusOK)
}
-func (ctrl *V1Controller) handleItemsGeneral() server.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- ctx := services.NewContext(r.Context())
- ID, err := ctrl.routeID(r)
+// HandleItemPatch godocs
+//
+// @Summary Update Item
+// @Tags Items
+// @Produce json
+// @Param id path string true "Item ID"
+// @Param payload body repo.ItemPatch true "Item Data"
+// @Success 200 {object} repo.ItemOut
+// @Router /v1/items/{id} [Patch]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemPatch() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID, body repo.ItemPatch) (repo.ItemOut, error) {
+ auth := services.NewContext(r.Context())
+
+ body.ID = ID
+ err := ctrl.repo.Items.Patch(auth, auth.GID, ID, body)
if err != nil {
- return err
+ return repo.ItemOut{}, err
}
- switch r.Method {
- case http.MethodGet:
- items, err := ctrl.repo.Items.GetOneByGroup(r.Context(), ctx.GID, ID)
- if err != nil {
- log.Err(err).Msg("failed to get item")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
- return server.Respond(w, http.StatusOK, items)
- case http.MethodDelete:
- err = ctrl.repo.Items.DeleteByGroup(r.Context(), ctx.GID, ID)
- if err != nil {
- log.Err(err).Msg("failed to delete item")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
- return server.Respond(w, http.StatusNoContent, nil)
- case http.MethodPut:
- body := repo.ItemUpdate{}
- if err := server.Decode(r, &body); err != nil {
- log.Err(err).Msg("failed to decode request body")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
- body.ID = ID
- result, err := ctrl.repo.Items.UpdateByGroup(r.Context(), ctx.GID, body)
- if err != nil {
- log.Err(err).Msg("failed to update item")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
- return server.Respond(w, http.StatusOK, result)
- }
-
- return nil
+ return ctrl.repo.Items.GetOneByGroup(auth, auth.GID, ID)
}
+
+ return adapters.ActionID("id", fn, http.StatusOK)
+}
+
+// HandleGetAllCustomFieldNames godocs
+//
+// @Summary Get All Custom Field Names
+// @Tags Items
+// @Produce json
+// @Success 200
+// @Router /v1/items/fields [GET]
+// @Success 200 {object} []string
+// @Security Bearer
+func (ctrl *V1Controller) HandleGetAllCustomFieldNames() errchain.HandlerFunc {
+ fn := func(r *http.Request) ([]string, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Items.GetAllCustomFieldNames(auth, auth.GID)
+ }
+
+ return adapters.Command(fn, http.StatusOK)
+}
+
+// HandleGetAllCustomFieldValues godocs
+//
+// @Summary Get All Custom Field Values
+// @Tags Items
+// @Produce json
+// @Success 200
+// @Router /v1/items/fields/values [GET]
+// @Success 200 {object} []string
+// @Security Bearer
+func (ctrl *V1Controller) HandleGetAllCustomFieldValues() errchain.HandlerFunc {
+ type query struct {
+ Field string `schema:"field" validate:"required"`
+ }
+
+ fn := func(r *http.Request, q query) ([]string, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Items.GetAllCustomFieldValues(auth, auth.GID, q.Field)
+ }
+
+ return adapters.Query(fn, http.StatusOK)
}
// HandleItemsImport godocs
-// @Summary imports items into the database
-// @Tags Items
-// @Produce json
-// @Success 204
-// @Param csv formData file true "Image to upload"
-// @Router /v1/items/import [Post]
-// @Security Bearer
-func (ctrl *V1Controller) HandleItemsImport() server.HandlerFunc {
+//
+// @Summary Import Items
+// @Tags Items
+// @Produce json
+// @Success 204
+// @Param csv formData file true "Image to upload"
+// @Router /v1/items/import [Post]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemsImport() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
-
err := r.ParseMultipartForm(ctrl.maxUploadSize << 20)
if err != nil {
log.Err(err).Msg("failed to parse multipart form")
@@ -208,21 +300,40 @@ func (ctrl *V1Controller) HandleItemsImport() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
- reader := csv.NewReader(file)
- data, err := reader.ReadAll()
- if err != nil {
- log.Err(err).Msg("failed to read csv")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
user := services.UseUserCtx(r.Context())
- _, err = ctrl.svc.Items.CsvImport(r.Context(), user.GroupID, data)
+ _, err = ctrl.svc.Items.CsvImport(r.Context(), user.GroupID, file)
if err != nil {
log.Err(err).Msg("failed to import items")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
- return server.Respond(w, http.StatusNoContent, nil)
+ return server.JSON(w, http.StatusNoContent, nil)
+ }
+}
+
+// HandleItemsExport godocs
+//
+// @Summary Export Items
+// @Tags Items
+// @Success 200 {string} string "text/csv"
+// @Router /v1/items/export [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemsExport() errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ ctx := services.NewContext(r.Context())
+
+ csvData, err := ctrl.svc.Items.ExportTSV(r.Context(), ctx.GID)
+ if err != nil {
+ log.Err(err).Msg("failed to export items")
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+
+ w.Header().Set("Content-Type", "text/tsv")
+ w.Header().Set("Content-Disposition", "attachment;filename=homebox-items.tsv")
+
+ writer := csv.NewWriter(w)
+ writer.Comma = '\t'
+ return writer.WriteAll(csvData)
}
}
diff --git a/backend/app/api/handlers/v1/v1_ctrl_items_attachments.go b/backend/app/api/handlers/v1/v1_ctrl_items_attachments.go
index e776e9a..ae2782a 100644
--- a/backend/app/api/handlers/v1/v1_ctrl_items_attachments.go
+++ b/backend/app/api/handlers/v1/v1_ctrl_items_attachments.go
@@ -2,14 +2,16 @@ package v1
import (
"errors"
- "fmt"
"net/http"
+ "path/filepath"
+ "strings"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
- "github.com/hay-kot/homebox/backend/pkgs/server"
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/server"
"github.com/rs/zerolog/log"
)
@@ -19,25 +21,25 @@ type (
}
)
-// HandleItemsImport godocs
-// @Summary imports items into the database
-// @Tags Items Attachments
-// @Produce json
-// @Param id path string true "Item ID"
-// @Param file formData file true "File attachment"
-// @Param type formData string true "Type of file"
-// @Param name formData string true "name of the file including extension"
-// @Success 200 {object} repo.ItemOut
-// @Failure 422 {object} server.ErrorResponse
-// @Router /v1/items/{id}/attachments [POST]
-// @Security Bearer
-func (ctrl *V1Controller) HandleItemAttachmentCreate() server.HandlerFunc {
+// HandleItemAttachmentCreate godocs
+//
+// @Summary Create Item Attachment
+// @Tags Items Attachments
+// @Produce json
+// @Param id path string true "Item ID"
+// @Param file formData file true "File attachment"
+// @Param type formData string true "Type of file"
+// @Param name formData string true "name of the file including extension"
+// @Success 200 {object} repo.ItemOut
+// @Failure 422 {object} validate.ErrorResponse
+// @Router /v1/items/{id}/attachments [POST]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemAttachmentCreate() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
err := r.ParseMultipartForm(ctrl.maxUploadSize << 20)
if err != nil {
log.Err(err).Msg("failed to parse multipart form")
return validate.NewRequestError(errors.New("failed to parse multipart form"), http.StatusBadRequest)
-
}
errs := validate.NewFieldErrors()
@@ -61,12 +63,20 @@ func (ctrl *V1Controller) HandleItemAttachmentCreate() server.HandlerFunc {
}
if !errs.Nil() {
- return server.Respond(w, http.StatusUnprocessableEntity, errs)
+ return server.JSON(w, http.StatusUnprocessableEntity, errs)
}
attachmentType := r.FormValue("type")
if attachmentType == "" {
- attachmentType = attachment.TypeAttachment.String()
+ // Attempt to auto-detect the type of the file
+ ext := filepath.Ext(attachmentName)
+
+ switch strings.ToLower(ext) {
+ case ".jpg", ".jpeg", ".png", ".webp", ".gif", ".bmp", ".tiff":
+ attachmentType = attachment.TypePhoto.String()
+ default:
+ attachmentType = attachment.TypeAttachment.String()
+ }
}
id, err := ctrl.routeID(r)
@@ -83,78 +93,53 @@ func (ctrl *V1Controller) HandleItemAttachmentCreate() server.HandlerFunc {
attachment.Type(attachmentType),
file,
)
-
if err != nil {
log.Err(err).Msg("failed to add attachment")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
- return server.Respond(w, http.StatusCreated, item)
+ return server.JSON(w, http.StatusCreated, item)
}
}
// HandleItemAttachmentGet godocs
-// @Summary retrieves an attachment for an item
-// @Tags Items Attachments
-// @Produce application/octet-stream
-// @Param id path string true "Item ID"
-// @Param token query string true "Attachment token"
-// @Success 200
-// @Router /v1/items/{id}/attachments/download [GET]
-// @Security Bearer
-func (ctrl *V1Controller) HandleItemAttachmentDownload() server.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- token := server.GetParam(r, "token", "")
-
- doc, err := ctrl.svc.Items.AttachmentPath(r.Context(), token)
-
- if err != nil {
- log.Err(err).Msg("failed to get attachment")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", doc.Title))
- w.Header().Set("Content-Type", "application/octet-stream")
- http.ServeFile(w, r, doc.Path)
- return nil
- }
-}
-
-// HandleItemAttachmentToken godocs
-// @Summary retrieves an attachment for an item
-// @Tags Items Attachments
-// @Produce application/octet-stream
-// @Param id path string true "Item ID"
-// @Param attachment_id path string true "Attachment ID"
-// @Success 200 {object} ItemAttachmentToken
-// @Router /v1/items/{id}/attachments/{attachment_id} [GET]
-// @Security Bearer
-func (ctrl *V1Controller) HandleItemAttachmentToken() server.HandlerFunc {
+//
+// @Summary Get Item Attachment
+// @Tags Items Attachments
+// @Produce application/octet-stream
+// @Param id path string true "Item ID"
+// @Param attachment_id path string true "Attachment ID"
+// @Success 200 {object} ItemAttachmentToken
+// @Router /v1/items/{id}/attachments/{attachment_id} [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemAttachmentGet() errchain.HandlerFunc {
return ctrl.handleItemAttachmentsHandler
}
// HandleItemAttachmentDelete godocs
-// @Summary retrieves an attachment for an item
-// @Tags Items Attachments
-// @Param id path string true "Item ID"
-// @Param attachment_id path string true "Attachment ID"
-// @Success 204
-// @Router /v1/items/{id}/attachments/{attachment_id} [DELETE]
-// @Security Bearer
-func (ctrl *V1Controller) HandleItemAttachmentDelete() server.HandlerFunc {
+//
+// @Summary Delete Item Attachment
+// @Tags Items Attachments
+// @Param id path string true "Item ID"
+// @Param attachment_id path string true "Attachment ID"
+// @Success 204
+// @Router /v1/items/{id}/attachments/{attachment_id} [DELETE]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemAttachmentDelete() errchain.HandlerFunc {
return ctrl.handleItemAttachmentsHandler
}
// HandleItemAttachmentUpdate godocs
-// @Summary retrieves an attachment for an item
-// @Tags Items Attachments
-// @Param id path string true "Item ID"
-// @Param attachment_id path string true "Attachment ID"
-// @Param payload body repo.ItemAttachmentUpdate true "Attachment Update"
-// @Success 200 {object} repo.ItemOut
-// @Router /v1/items/{id}/attachments/{attachment_id} [PUT]
-// @Security Bearer
-func (ctrl *V1Controller) HandleItemAttachmentUpdate() server.HandlerFunc {
+//
+// @Summary Update Item Attachment
+// @Tags Items Attachments
+// @Param id path string true "Item ID"
+// @Param attachment_id path string true "Attachment ID"
+// @Param payload body repo.ItemAttachmentUpdate true "Attachment Update"
+// @Success 200 {object} repo.ItemOut
+// @Router /v1/items/{id}/attachments/{attachment_id} [PUT]
+// @Security Bearer
+func (ctrl *V1Controller) HandleItemAttachmentUpdate() errchain.HandlerFunc {
return ctrl.handleItemAttachmentsHandler
}
@@ -171,33 +156,15 @@ func (ctrl *V1Controller) handleItemAttachmentsHandler(w http.ResponseWriter, r
ctx := services.NewContext(r.Context())
switch r.Method {
- // Token Handler
case http.MethodGet:
- token, err := ctrl.svc.Items.AttachmentToken(ctx, ID, attachmentID)
+ doc, err := ctrl.svc.Items.AttachmentPath(r.Context(), attachmentID)
if err != nil {
- switch err {
- case services.ErrNotFound:
- log.Err(err).
- Str("id", attachmentID.String()).
- Msg("failed to find attachment with id")
-
- return validate.NewRequestError(err, http.StatusNotFound)
-
- case services.ErrFileNotFound:
- log.Err(err).
- Str("id", attachmentID.String()).
- Msg("failed to find file path for attachment with id")
- log.Warn().Msg("attachment with no file path removed from database")
-
- return validate.NewRequestError(err, http.StatusNotFound)
-
- default:
- log.Err(err).Msg("failed to get attachment")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
+ log.Err(err).Msg("failed to get attachment path")
+ return validate.NewRequestError(err, http.StatusInternalServerError)
}
- return server.Respond(w, http.StatusOK, ItemAttachmentToken{Token: token})
+ http.ServeFile(w, r, doc.Path)
+ return nil
// Delete Attachment Handler
case http.MethodDelete:
@@ -207,7 +174,7 @@ func (ctrl *V1Controller) handleItemAttachmentsHandler(w http.ResponseWriter, r
return validate.NewRequestError(err, http.StatusInternalServerError)
}
- return server.Respond(w, http.StatusNoContent, nil)
+ return server.JSON(w, http.StatusNoContent, nil)
// Update Attachment Handler
case http.MethodPut:
@@ -225,7 +192,7 @@ func (ctrl *V1Controller) handleItemAttachmentsHandler(w http.ResponseWriter, r
return validate.NewRequestError(err, http.StatusInternalServerError)
}
- return server.Respond(w, http.StatusOK, val)
+ return server.JSON(w, http.StatusOK, val)
}
return nil
diff --git a/backend/app/api/handlers/v1/v1_ctrl_labels.go b/backend/app/api/handlers/v1/v1_ctrl_labels.go
index 2551b46..dae23db 100644
--- a/backend/app/api/handlers/v1/v1_ctrl_labels.go
+++ b/backend/app/api/handlers/v1/v1_ctrl_labels.go
@@ -3,141 +3,100 @@ package v1
import (
"net/http"
+ "github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/core/services"
- "github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/repo"
- "github.com/hay-kot/homebox/backend/internal/sys/validate"
- "github.com/hay-kot/homebox/backend/pkgs/server"
- "github.com/rs/zerolog/log"
+ "github.com/hay-kot/homebox/backend/internal/web/adapters"
+ "github.com/hay-kot/httpkit/errchain"
)
// HandleLabelsGetAll godoc
-// @Summary Get All Labels
-// @Tags Labels
-// @Produce json
-// @Success 200 {object} server.Results{items=[]repo.LabelOut}
-// @Router /v1/labels [GET]
-// @Security Bearer
-func (ctrl *V1Controller) HandleLabelsGetAll() server.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- user := services.UseUserCtx(r.Context())
- labels, err := ctrl.repo.Labels.GetAll(r.Context(), user.GroupID)
- if err != nil {
- log.Err(err).Msg("error getting labels")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
- return server.Respond(w, http.StatusOK, server.Results{Items: labels})
+//
+// @Summary Get All Labels
+// @Tags Labels
+// @Produce json
+// @Success 200 {object} []repo.LabelOut
+// @Router /v1/labels [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleLabelsGetAll() errchain.HandlerFunc {
+ fn := func(r *http.Request) ([]repo.LabelSummary, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Labels.GetAll(auth, auth.GID)
}
+
+ return adapters.Command(fn, http.StatusOK)
}
// HandleLabelsCreate godoc
-// @Summary Create a new label
-// @Tags Labels
-// @Produce json
-// @Param payload body repo.LabelCreate true "Label Data"
-// @Success 200 {object} repo.LabelSummary
-// @Router /v1/labels [POST]
-// @Security Bearer
-func (ctrl *V1Controller) HandleLabelsCreate() server.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- createData := repo.LabelCreate{}
- if err := server.Decode(r, &createData); err != nil {
- log.Err(err).Msg("error decoding label create data")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- user := services.UseUserCtx(r.Context())
- label, err := ctrl.repo.Labels.Create(r.Context(), user.GroupID, createData)
- if err != nil {
- log.Err(err).Msg("error creating label")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- return server.Respond(w, http.StatusCreated, label)
+//
+// @Summary Create Label
+// @Tags Labels
+// @Produce json
+// @Param payload body repo.LabelCreate true "Label Data"
+// @Success 200 {object} repo.LabelSummary
+// @Router /v1/labels [POST]
+// @Security Bearer
+func (ctrl *V1Controller) HandleLabelsCreate() errchain.HandlerFunc {
+ fn := func(r *http.Request, data repo.LabelCreate) (repo.LabelOut, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Labels.Create(auth, auth.GID, data)
}
+
+ return adapters.Action(fn, http.StatusCreated)
}
// HandleLabelDelete godocs
-// @Summary deletes a label
-// @Tags Labels
-// @Produce json
-// @Param id path string true "Label ID"
-// @Success 204
-// @Router /v1/labels/{id} [DELETE]
-// @Security Bearer
-func (ctrl *V1Controller) HandleLabelDelete() server.HandlerFunc {
- return ctrl.handleLabelsGeneral()
+//
+// @Summary Delete Label
+// @Tags Labels
+// @Produce json
+// @Param id path string true "Label ID"
+// @Success 204
+// @Router /v1/labels/{id} [DELETE]
+// @Security Bearer
+func (ctrl *V1Controller) HandleLabelDelete() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID) (any, error) {
+ auth := services.NewContext(r.Context())
+ err := ctrl.repo.Labels.DeleteByGroup(auth, auth.GID, ID)
+ return nil, err
+ }
+
+ return adapters.CommandID("id", fn, http.StatusNoContent)
}
// HandleLabelGet godocs
-// @Summary Gets a label and fields
-// @Tags Labels
-// @Produce json
-// @Param id path string true "Label ID"
-// @Success 200 {object} repo.LabelOut
-// @Router /v1/labels/{id} [GET]
-// @Security Bearer
-func (ctrl *V1Controller) HandleLabelGet() server.HandlerFunc {
- return ctrl.handleLabelsGeneral()
+//
+// @Summary Get Label
+// @Tags Labels
+// @Produce json
+// @Param id path string true "Label ID"
+// @Success 200 {object} repo.LabelOut
+// @Router /v1/labels/{id} [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleLabelGet() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID) (repo.LabelOut, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Labels.GetOneByGroup(auth, auth.GID, ID)
+ }
+
+ return adapters.CommandID("id", fn, http.StatusOK)
}
// HandleLabelUpdate godocs
-// @Summary updates a label
-// @Tags Labels
-// @Produce json
-// @Param id path string true "Label ID"
-// @Success 200 {object} repo.LabelOut
-// @Router /v1/labels/{id} [PUT]
-// @Security Bearer
-func (ctrl *V1Controller) HandleLabelUpdate() server.HandlerFunc {
- return ctrl.handleLabelsGeneral()
-}
-
-func (ctrl *V1Controller) handleLabelsGeneral() server.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- ctx := services.NewContext(r.Context())
- ID, err := ctrl.routeID(r)
- if err != nil {
- return err
- }
-
- switch r.Method {
- case http.MethodGet:
- labels, err := ctrl.repo.Labels.GetOneByGroup(r.Context(), ctx.GID, ID)
- if err != nil {
- if ent.IsNotFound(err) {
- log.Err(err).
- Str("id", ID.String()).
- Msg("label not found")
- return validate.NewRequestError(err, http.StatusNotFound)
- }
- log.Err(err).Msg("error getting label")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
- return server.Respond(w, http.StatusOK, labels)
-
- case http.MethodDelete:
- err = ctrl.repo.Labels.DeleteByGroup(ctx, ctx.GID, ID)
- if err != nil {
- log.Err(err).Msg("error deleting label")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
- return server.Respond(w, http.StatusNoContent, nil)
-
- case http.MethodPut:
- body := repo.LabelUpdate{}
- if err := server.Decode(r, &body); err != nil {
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- body.ID = ID
- result, err := ctrl.repo.Labels.UpdateByGroup(ctx, ctx.GID, body)
- if err != nil {
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
- return server.Respond(w, http.StatusOK, result)
- }
-
- return nil
+//
+// @Summary Update Label
+// @Tags Labels
+// @Produce json
+// @Param id path string true "Label ID"
+// @Success 200 {object} repo.LabelOut
+// @Router /v1/labels/{id} [PUT]
+// @Security Bearer
+func (ctrl *V1Controller) HandleLabelUpdate() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID, data repo.LabelUpdate) (repo.LabelOut, error) {
+ auth := services.NewContext(r.Context())
+ data.ID = ID
+ return ctrl.repo.Labels.UpdateByGroup(auth, auth.GID, data)
}
+
+ return adapters.ActionID("id", fn, http.StatusOK)
}
diff --git a/backend/app/api/handlers/v1/v1_ctrl_locations.go b/backend/app/api/handlers/v1/v1_ctrl_locations.go
index 5e85766..d84ce31 100644
--- a/backend/app/api/handlers/v1/v1_ctrl_locations.go
+++ b/backend/app/api/handlers/v1/v1_ctrl_locations.go
@@ -3,146 +3,120 @@ package v1
import (
"net/http"
+ "github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/core/services"
- "github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/repo"
- "github.com/hay-kot/homebox/backend/internal/sys/validate"
- "github.com/hay-kot/homebox/backend/pkgs/server"
- "github.com/rs/zerolog/log"
+ "github.com/hay-kot/homebox/backend/internal/web/adapters"
+ "github.com/hay-kot/httpkit/errchain"
)
-// HandleLocationGetAll godoc
-// @Summary Get All Locations
-// @Tags Locations
-// @Produce json
-// @Success 200 {object} server.Results{items=[]repo.LocationOutCount}
-// @Router /v1/locations [GET]
-// @Security Bearer
-func (ctrl *V1Controller) HandleLocationGetAll() server.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- user := services.UseUserCtx(r.Context())
- locations, err := ctrl.repo.Locations.GetAll(r.Context(), user.GroupID)
- if err != nil {
- log.Err(err).Msg("failed to get locations")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- return server.Respond(w, http.StatusOK, server.Results{Items: locations})
+// HandleLocationTreeQuery godoc
+//
+// @Summary Get Locations Tree
+// @Tags Locations
+// @Produce json
+// @Param withItems query bool false "include items in response tree"
+// @Success 200 {object} []repo.TreeItem
+// @Router /v1/locations/tree [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleLocationTreeQuery() errchain.HandlerFunc {
+ fn := func(r *http.Request, query repo.TreeQuery) ([]repo.TreeItem, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Locations.Tree(auth, auth.GID, query)
}
+
+ return adapters.Query(fn, http.StatusOK)
+}
+
+// HandleLocationGetAll godoc
+//
+// @Summary Get All Locations
+// @Tags Locations
+// @Produce json
+// @Param filterChildren query bool false "Filter locations with parents"
+// @Success 200 {object} []repo.LocationOutCount
+// @Router /v1/locations [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleLocationGetAll() errchain.HandlerFunc {
+ fn := func(r *http.Request, q repo.LocationQuery) ([]repo.LocationOutCount, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Locations.GetAll(auth, auth.GID, q)
+ }
+
+ return adapters.Query(fn, http.StatusOK)
}
// HandleLocationCreate godoc
-// @Summary Create a new location
-// @Tags Locations
-// @Produce json
-// @Param payload body repo.LocationCreate true "Location Data"
-// @Success 200 {object} repo.LocationSummary
-// @Router /v1/locations [POST]
-// @Security Bearer
-func (ctrl *V1Controller) HandleLocationCreate() server.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- createData := repo.LocationCreate{}
- if err := server.Decode(r, &createData); err != nil {
- log.Err(err).Msg("failed to decode location create data")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- user := services.UseUserCtx(r.Context())
- location, err := ctrl.repo.Locations.Create(r.Context(), user.GroupID, createData)
- if err != nil {
- log.Err(err).Msg("failed to create location")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- return server.Respond(w, http.StatusCreated, location)
+//
+// @Summary Create Location
+// @Tags Locations
+// @Produce json
+// @Param payload body repo.LocationCreate true "Location Data"
+// @Success 200 {object} repo.LocationSummary
+// @Router /v1/locations [POST]
+// @Security Bearer
+func (ctrl *V1Controller) HandleLocationCreate() errchain.HandlerFunc {
+ fn := func(r *http.Request, createData repo.LocationCreate) (repo.LocationOut, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Locations.Create(auth, auth.GID, createData)
}
+
+ return adapters.Action(fn, http.StatusCreated)
}
-// HandleLocationDelete godocs
-// @Summary deletes a location
-// @Tags Locations
-// @Produce json
-// @Param id path string true "Location ID"
-// @Success 204
-// @Router /v1/locations/{id} [DELETE]
-// @Security Bearer
-func (ctrl *V1Controller) HandleLocationDelete() server.HandlerFunc {
- return ctrl.handleLocationGeneral()
-}
-
-// HandleLocationGet godocs
-// @Summary Gets a location and fields
-// @Tags Locations
-// @Produce json
-// @Param id path string true "Location ID"
-// @Success 200 {object} repo.LocationOut
-// @Router /v1/locations/{id} [GET]
-// @Security Bearer
-func (ctrl *V1Controller) HandleLocationGet() server.HandlerFunc {
- return ctrl.handleLocationGeneral()
-}
-
-// HandleLocationUpdate godocs
-// @Summary updates a location
-// @Tags Locations
-// @Produce json
-// @Param id path string true "Location ID"
-// @Param payload body repo.LocationUpdate true "Location Data"
-// @Success 200 {object} repo.LocationOut
-// @Router /v1/locations/{id} [PUT]
-// @Security Bearer
-func (ctrl *V1Controller) HandleLocationUpdate() server.HandlerFunc {
- return ctrl.handleLocationGeneral()
-}
-
-func (ctrl *V1Controller) handleLocationGeneral() server.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- ctx := services.NewContext(r.Context())
- ID, err := ctrl.routeID(r)
- if err != nil {
- return err
- }
-
- switch r.Method {
- case http.MethodGet:
- location, err := ctrl.repo.Locations.GetOneByGroup(r.Context(), ctx.GID, ID)
- if err != nil {
- l := log.Err(err).
- Str("ID", ID.String()).
- Str("GID", ctx.GID.String())
-
- if ent.IsNotFound(err) {
- l.Msg("location not found")
- return validate.NewRequestError(err, http.StatusNotFound)
- }
-
- l.Msg("failed to get location")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
- return server.Respond(w, http.StatusOK, location)
- case http.MethodPut:
- body := repo.LocationUpdate{}
- if err := server.Decode(r, &body); err != nil {
- log.Err(err).Msg("failed to decode location update data")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
-
- body.ID = ID
-
- result, err := ctrl.repo.Locations.UpdateOneByGroup(r.Context(), ctx.GID, ID, body)
- if err != nil {
- log.Err(err).Msg("failed to update location")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
- return server.Respond(w, http.StatusOK, result)
- case http.MethodDelete:
- err = ctrl.repo.Locations.DeleteByGroup(r.Context(), ctx.GID, ID)
- if err != nil {
- log.Err(err).Msg("failed to delete location")
- return validate.NewRequestError(err, http.StatusInternalServerError)
- }
- return server.Respond(w, http.StatusNoContent, nil)
- }
- return nil
+// HandleLocationDelete godoc
+//
+// @Summary Delete Location
+// @Tags Locations
+// @Produce json
+// @Param id path string true "Location ID"
+// @Success 204
+// @Router /v1/locations/{id} [DELETE]
+// @Security Bearer
+func (ctrl *V1Controller) HandleLocationDelete() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID) (any, error) {
+ auth := services.NewContext(r.Context())
+ err := ctrl.repo.Locations.DeleteByGroup(auth, auth.GID, ID)
+ return nil, err
}
+
+ return adapters.CommandID("id", fn, http.StatusNoContent)
+}
+
+// HandleLocationGet godoc
+//
+// @Summary Get Location
+// @Tags Locations
+// @Produce json
+// @Param id path string true "Location ID"
+// @Success 200 {object} repo.LocationOut
+// @Router /v1/locations/{id} [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleLocationGet() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID) (repo.LocationOut, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Locations.GetOneByGroup(auth, auth.GID, ID)
+ }
+
+ return adapters.CommandID("id", fn, http.StatusOK)
+}
+
+// HandleLocationUpdate godoc
+//
+// @Summary Update Location
+// @Tags Locations
+// @Produce json
+// @Param id path string true "Location ID"
+// @Param payload body repo.LocationUpdate true "Location Data"
+// @Success 200 {object} repo.LocationOut
+// @Router /v1/locations/{id} [PUT]
+// @Security Bearer
+func (ctrl *V1Controller) HandleLocationUpdate() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID, body repo.LocationUpdate) (repo.LocationOut, error) {
+ auth := services.NewContext(r.Context())
+ body.ID = ID
+ return ctrl.repo.Locations.UpdateByGroup(auth, auth.GID, ID, body)
+ }
+
+ return adapters.ActionID("id", fn, http.StatusOK)
}
diff --git a/backend/app/api/handlers/v1/v1_ctrl_maint_entry.go b/backend/app/api/handlers/v1/v1_ctrl_maint_entry.go
new file mode 100644
index 0000000..e94c12a
--- /dev/null
+++ b/backend/app/api/handlers/v1/v1_ctrl_maint_entry.go
@@ -0,0 +1,82 @@
+package v1
+
+import (
+ "net/http"
+
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/core/services"
+ "github.com/hay-kot/homebox/backend/internal/data/repo"
+ "github.com/hay-kot/homebox/backend/internal/web/adapters"
+ "github.com/hay-kot/httpkit/errchain"
+)
+
+// HandleMaintenanceLogGet godoc
+//
+// @Summary Get Maintenance Log
+// @Tags Maintenance
+// @Produce json
+// @Success 200 {object} repo.MaintenanceLog
+// @Router /v1/items/{id}/maintenance [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleMaintenanceLogGet() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID, q repo.MaintenanceLogQuery) (repo.MaintenanceLog, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.MaintEntry.GetLog(auth, auth.GID, ID, q)
+ }
+
+ return adapters.QueryID("id", fn, http.StatusOK)
+}
+
+// HandleMaintenanceEntryCreate godoc
+//
+// @Summary Create Maintenance Entry
+// @Tags Maintenance
+// @Produce json
+// @Param payload body repo.MaintenanceEntryCreate true "Entry Data"
+// @Success 201 {object} repo.MaintenanceEntry
+// @Router /v1/items/{id}/maintenance [POST]
+// @Security Bearer
+func (ctrl *V1Controller) HandleMaintenanceEntryCreate() errchain.HandlerFunc {
+ fn := func(r *http.Request, itemID uuid.UUID, body repo.MaintenanceEntryCreate) (repo.MaintenanceEntry, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.MaintEntry.Create(auth, itemID, body)
+ }
+
+ return adapters.ActionID("id", fn, http.StatusCreated)
+}
+
+// HandleMaintenanceEntryDelete godoc
+//
+// @Summary Delete Maintenance Entry
+// @Tags Maintenance
+// @Produce json
+// @Success 204
+// @Router /v1/items/{id}/maintenance/{entry_id} [DELETE]
+// @Security Bearer
+func (ctrl *V1Controller) HandleMaintenanceEntryDelete() errchain.HandlerFunc {
+ fn := func(r *http.Request, entryID uuid.UUID) (any, error) {
+ auth := services.NewContext(r.Context())
+ err := ctrl.repo.MaintEntry.Delete(auth, entryID)
+ return nil, err
+ }
+
+ return adapters.CommandID("entry_id", fn, http.StatusNoContent)
+}
+
+// HandleMaintenanceEntryUpdate godoc
+//
+// @Summary Update Maintenance Entry
+// @Tags Maintenance
+// @Produce json
+// @Param payload body repo.MaintenanceEntryUpdate true "Entry Data"
+// @Success 200 {object} repo.MaintenanceEntry
+// @Router /v1/items/{id}/maintenance/{entry_id} [PUT]
+// @Security Bearer
+func (ctrl *V1Controller) HandleMaintenanceEntryUpdate() errchain.HandlerFunc {
+ fn := func(r *http.Request, entryID uuid.UUID, body repo.MaintenanceEntryUpdate) (repo.MaintenanceEntry, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.MaintEntry.Update(auth, entryID, body)
+ }
+
+ return adapters.ActionID("entry_id", fn, http.StatusOK)
+}
diff --git a/backend/app/api/handlers/v1/v1_ctrl_notifiers.go b/backend/app/api/handlers/v1/v1_ctrl_notifiers.go
new file mode 100644
index 0000000..3c64dc7
--- /dev/null
+++ b/backend/app/api/handlers/v1/v1_ctrl_notifiers.go
@@ -0,0 +1,105 @@
+package v1
+
+import (
+ "net/http"
+
+ "github.com/containrrr/shoutrrr"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/core/services"
+ "github.com/hay-kot/homebox/backend/internal/data/repo"
+ "github.com/hay-kot/homebox/backend/internal/web/adapters"
+ "github.com/hay-kot/httpkit/errchain"
+)
+
+// HandleGetUserNotifiers godoc
+//
+// @Summary Get Notifiers
+// @Tags Notifiers
+// @Produce json
+// @Success 200 {object} []repo.NotifierOut
+// @Router /v1/notifiers [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleGetUserNotifiers() errchain.HandlerFunc {
+ fn := func(r *http.Request, _ struct{}) ([]repo.NotifierOut, error) {
+ user := services.UseUserCtx(r.Context())
+ return ctrl.repo.Notifiers.GetByUser(r.Context(), user.ID)
+ }
+
+ return adapters.Query(fn, http.StatusOK)
+}
+
+// HandleCreateNotifier godoc
+//
+// @Summary Create Notifier
+// @Tags Notifiers
+// @Produce json
+// @Param payload body repo.NotifierCreate true "Notifier Data"
+// @Success 200 {object} repo.NotifierOut
+// @Router /v1/notifiers [POST]
+// @Security Bearer
+func (ctrl *V1Controller) HandleCreateNotifier() errchain.HandlerFunc {
+ fn := func(r *http.Request, in repo.NotifierCreate) (repo.NotifierOut, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Notifiers.Create(auth, auth.GID, auth.UID, in)
+ }
+
+ return adapters.Action(fn, http.StatusCreated)
+}
+
+// HandleDeleteNotifier godocs
+//
+// @Summary Delete a Notifier
+// @Tags Notifiers
+// @Param id path string true "Notifier ID"
+// @Success 204
+// @Router /v1/notifiers/{id} [DELETE]
+// @Security Bearer
+func (ctrl *V1Controller) HandleDeleteNotifier() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID) (any, error) {
+ auth := services.NewContext(r.Context())
+ return nil, ctrl.repo.Notifiers.Delete(auth, auth.UID, ID)
+ }
+
+ return adapters.CommandID("id", fn, http.StatusNoContent)
+}
+
+// HandleUpdateNotifier godocs
+//
+// @Summary Update Notifier
+// @Tags Notifiers
+// @Param id path string true "Notifier ID"
+// @Param payload body repo.NotifierUpdate true "Notifier Data"
+// @Success 200 {object} repo.NotifierOut
+// @Router /v1/notifiers/{id} [PUT]
+// @Security Bearer
+func (ctrl *V1Controller) HandleUpdateNotifier() errchain.HandlerFunc {
+ fn := func(r *http.Request, ID uuid.UUID, in repo.NotifierUpdate) (repo.NotifierOut, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Notifiers.Update(auth, auth.UID, ID, in)
+ }
+
+ return adapters.ActionID("id", fn, http.StatusOK)
+}
+
+// HandlerNotifierTest godoc
+//
+// @Summary Test Notifier
+// @Tags Notifiers
+// @Produce json
+// @Param id path string true "Notifier ID"
+// @Param url query string true "URL"
+// @Success 204
+// @Router /v1/notifiers/test [POST]
+// @Security Bearer
+func (ctrl *V1Controller) HandlerNotifierTest() errchain.HandlerFunc {
+ type body struct {
+ URL string `json:"url" validate:"required"`
+ }
+
+ fn := func(r *http.Request, q body) (any, error) {
+ err := shoutrrr.Send(q.URL, "Test message from Homebox")
+ return nil, err
+ }
+
+ return adapters.Action(fn, http.StatusOK)
+}
diff --git a/backend/app/api/handlers/v1/v1_ctrl_qrcode.go b/backend/app/api/handlers/v1/v1_ctrl_qrcode.go
new file mode 100644
index 0000000..25f7c75
--- /dev/null
+++ b/backend/app/api/handlers/v1/v1_ctrl_qrcode.go
@@ -0,0 +1,72 @@
+package v1
+
+import (
+ "bytes"
+ "image/png"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/hay-kot/homebox/backend/internal/web/adapters"
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/yeqown/go-qrcode/v2"
+ "github.com/yeqown/go-qrcode/writer/standard"
+
+ _ "embed"
+)
+
+//go:embed assets/QRIcon.png
+var qrcodeLogo []byte
+
+// HandleGenerateQRCode godoc
+//
+// @Summary Create QR Code
+// @Tags Items
+// @Produce json
+// @Param data query string false "data to be encoded into qrcode"
+// @Success 200 {string} string "image/jpeg"
+// @Router /v1/qrcode [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleGenerateQRCode() errchain.HandlerFunc {
+ type query struct {
+ // 4,296 characters is the maximum length of a QR code
+ Data string `schema:"data" validate:"required,max=4296"`
+ }
+
+ return func(w http.ResponseWriter, r *http.Request) error {
+ q, err := adapters.DecodeQuery[query](r)
+ if err != nil {
+ return err
+ }
+
+ image, err := png.Decode(bytes.NewReader(qrcodeLogo))
+ if err != nil {
+ panic(err)
+ }
+
+ decodedStr, err := url.QueryUnescape(q.Data)
+ if err != nil {
+ return err
+ }
+
+ qrc, err := qrcode.New(decodedStr)
+ if err != nil {
+ return err
+ }
+
+ toWriteCloser := struct {
+ io.Writer
+ io.Closer
+ }{
+ Writer: w,
+ Closer: io.NopCloser(nil),
+ }
+
+ qrwriter := standard.NewWithWriter(toWriteCloser, standard.WithLogoImage(image))
+
+ // Return the QR code as a jpeg image
+ w.Header().Set("Content-Type", "image/jpeg")
+ w.Header().Set("Content-Disposition", "attachment; filename=qrcode.jpg")
+ return qrc.Save(qrwriter)
+ }
+}
diff --git a/backend/app/api/handlers/v1/v1_ctrl_reporting.go b/backend/app/api/handlers/v1/v1_ctrl_reporting.go
new file mode 100644
index 0000000..40f0d22
--- /dev/null
+++ b/backend/app/api/handlers/v1/v1_ctrl_reporting.go
@@ -0,0 +1,32 @@
+package v1
+
+import (
+ "net/http"
+
+ "github.com/hay-kot/homebox/backend/internal/core/services"
+ "github.com/hay-kot/httpkit/errchain"
+)
+
+// HandleBillOfMaterialsExport godoc
+//
+// @Summary Export Bill of Materials
+// @Tags Reporting
+// @Produce json
+// @Success 200 {string} string "text/csv"
+// @Router /v1/reporting/bill-of-materials [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleBillOfMaterialsExport() errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ actor := services.UseUserCtx(r.Context())
+
+ csv, err := ctrl.svc.Items.ExportBillOfMaterialsTSV(r.Context(), actor.GroupID)
+ if err != nil {
+ return err
+ }
+
+ w.Header().Set("Content-Type", "text/tsv")
+ w.Header().Set("Content-Disposition", "attachment; filename=bill-of-materials.tsv")
+ _, err = w.Write(csv)
+ return err
+ }
+}
diff --git a/backend/app/api/handlers/v1/v1_ctrl_statistics.go b/backend/app/api/handlers/v1/v1_ctrl_statistics.go
new file mode 100644
index 0000000..0a5a319
--- /dev/null
+++ b/backend/app/api/handlers/v1/v1_ctrl_statistics.go
@@ -0,0 +1,104 @@
+package v1
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/hay-kot/homebox/backend/internal/core/services"
+ "github.com/hay-kot/homebox/backend/internal/data/repo"
+ "github.com/hay-kot/homebox/backend/internal/sys/validate"
+ "github.com/hay-kot/homebox/backend/internal/web/adapters"
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/server"
+)
+
+// HandleGroupStatisticsLocations godoc
+//
+// @Summary Get Location Statistics
+// @Tags Statistics
+// @Produce json
+// @Success 200 {object} []repo.TotalsByOrganizer
+// @Router /v1/groups/statistics/locations [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleGroupStatisticsLocations() errchain.HandlerFunc {
+ fn := func(r *http.Request) ([]repo.TotalsByOrganizer, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Groups.StatsLocationsByPurchasePrice(auth, auth.GID)
+ }
+
+ return adapters.Command(fn, http.StatusOK)
+}
+
+// HandleGroupStatisticsLabels godoc
+//
+// @Summary Get Label Statistics
+// @Tags Statistics
+// @Produce json
+// @Success 200 {object} []repo.TotalsByOrganizer
+// @Router /v1/groups/statistics/labels [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleGroupStatisticsLabels() errchain.HandlerFunc {
+ fn := func(r *http.Request) ([]repo.TotalsByOrganizer, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Groups.StatsLabelsByPurchasePrice(auth, auth.GID)
+ }
+
+ return adapters.Command(fn, http.StatusOK)
+}
+
+// HandleGroupStatistics godoc
+//
+// @Summary Get Group Statistics
+// @Tags Statistics
+// @Produce json
+// @Success 200 {object} repo.GroupStatistics
+// @Router /v1/groups/statistics [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleGroupStatistics() errchain.HandlerFunc {
+ fn := func(r *http.Request) (repo.GroupStatistics, error) {
+ auth := services.NewContext(r.Context())
+ return ctrl.repo.Groups.StatsGroup(auth, auth.GID)
+ }
+
+ return adapters.Command(fn, http.StatusOK)
+}
+
+// HandleGroupStatisticsPriceOverTime godoc
+//
+// @Summary Get Purchase Price Statistics
+// @Tags Statistics
+// @Produce json
+// @Success 200 {object} repo.ValueOverTime
+// @Param start query string false "start date"
+// @Param end query string false "end date"
+// @Router /v1/groups/statistics/purchase-price [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleGroupStatisticsPriceOverTime() errchain.HandlerFunc {
+ parseDate := func(datestr string, defaultDate time.Time) (time.Time, error) {
+ if datestr == "" {
+ return defaultDate, nil
+ }
+ return time.Parse("2006-01-02", datestr)
+ }
+
+ return func(w http.ResponseWriter, r *http.Request) error {
+ ctx := services.NewContext(r.Context())
+
+ startDate, err := parseDate(r.URL.Query().Get("start"), time.Now().AddDate(0, -1, 0))
+ if err != nil {
+ return validate.NewRequestError(err, http.StatusBadRequest)
+ }
+
+ endDate, err := parseDate(r.URL.Query().Get("end"), time.Now())
+ if err != nil {
+ return validate.NewRequestError(err, http.StatusBadRequest)
+ }
+
+ stats, err := ctrl.repo.Groups.StatsPurchasePrice(ctx, ctx.GID, startDate, endDate)
+ if err != nil {
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+
+ return server.JSON(w, http.StatusOK, stats)
+ }
+}
diff --git a/backend/app/api/handlers/v1/v1_ctrl_user.go b/backend/app/api/handlers/v1/v1_ctrl_user.go
index 565a723..8708d24 100644
--- a/backend/app/api/handlers/v1/v1_ctrl_user.go
+++ b/backend/app/api/handlers/v1/v1_ctrl_user.go
@@ -1,24 +1,27 @@
package v1
import (
+ "fmt"
"net/http"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
- "github.com/hay-kot/homebox/backend/pkgs/server"
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/server"
"github.com/rs/zerolog/log"
)
-// HandleUserSelf godoc
-// @Summary Get the current user
-// @Tags User
-// @Produce json
-// @Param payload body services.UserRegistration true "User Data"
-// @Success 204
-// @Router /v1/users/register [Post]
-func (ctrl *V1Controller) HandleUserRegistration() server.HandlerFunc {
+// HandleUserRegistration godoc
+//
+// @Summary Register New User
+// @Tags User
+// @Produce json
+// @Param payload body services.UserRegistration true "User Data"
+// @Success 204
+// @Router /v1/users/register [Post]
+func (ctrl *V1Controller) HandleUserRegistration() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
regData := services.UserRegistration{}
@@ -28,7 +31,7 @@ func (ctrl *V1Controller) HandleUserRegistration() server.HandlerFunc {
}
if !ctrl.allowRegistration && regData.GroupToken == "" {
- return validate.NewRequestError(nil, http.StatusForbidden)
+ return validate.NewRequestError(fmt.Errorf("user registration disabled"), http.StatusForbidden)
}
_, err := ctrl.svc.User.RegisterUser(r.Context(), regData)
@@ -37,18 +40,19 @@ func (ctrl *V1Controller) HandleUserRegistration() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
- return server.Respond(w, http.StatusNoContent, nil)
+ return server.JSON(w, http.StatusNoContent, nil)
}
}
// HandleUserSelf godoc
-// @Summary Get the current user
-// @Tags User
-// @Produce json
-// @Success 200 {object} server.Result{item=repo.UserOut}
-// @Router /v1/users/self [GET]
-// @Security Bearer
-func (ctrl *V1Controller) HandleUserSelf() server.HandlerFunc {
+//
+// @Summary Get User Self
+// @Tags User
+// @Produce json
+// @Success 200 {object} Wrapped{item=repo.UserOut}
+// @Router /v1/users/self [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleUserSelf() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
token := services.UseTokenCtx(r.Context())
usr, err := ctrl.svc.User.GetSelf(r.Context(), token)
@@ -57,19 +61,20 @@ func (ctrl *V1Controller) HandleUserSelf() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
- return server.Respond(w, http.StatusOK, server.Wrap(usr))
+ return server.JSON(w, http.StatusOK, Wrap(usr))
}
}
// HandleUserSelfUpdate godoc
-// @Summary Update the current user
-// @Tags User
-// @Produce json
-// @Param payload body repo.UserUpdate true "User Data"
-// @Success 200 {object} server.Result{item=repo.UserUpdate}
-// @Router /v1/users/self [PUT]
-// @Security Bearer
-func (ctrl *V1Controller) HandleUserSelfUpdate() server.HandlerFunc {
+//
+// @Summary Update Account
+// @Tags User
+// @Produce json
+// @Param payload body repo.UserUpdate true "User Data"
+// @Success 200 {object} Wrapped{item=repo.UserUpdate}
+// @Router /v1/users/self [PUT]
+// @Security Bearer
+func (ctrl *V1Controller) HandleUserSelfUpdate() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
updateData := repo.UserUpdate{}
if err := server.Decode(r, &updateData); err != nil {
@@ -79,23 +84,23 @@ func (ctrl *V1Controller) HandleUserSelfUpdate() server.HandlerFunc {
actor := services.UseUserCtx(r.Context())
newData, err := ctrl.svc.User.UpdateSelf(r.Context(), actor.ID, updateData)
-
if err != nil {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
- return server.Respond(w, http.StatusOK, server.Wrap(newData))
+ return server.JSON(w, http.StatusOK, Wrap(newData))
}
}
// HandleUserSelfDelete godoc
-// @Summary Deletes the user account
-// @Tags User
-// @Produce json
-// @Success 204
-// @Router /v1/users/self [DELETE]
-// @Security Bearer
-func (ctrl *V1Controller) HandleUserSelfDelete() server.HandlerFunc {
+//
+// @Summary Delete Account
+// @Tags User
+// @Produce json
+// @Success 204
+// @Router /v1/users/self [DELETE]
+// @Security Bearer
+func (ctrl *V1Controller) HandleUserSelfDelete() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
if ctrl.isDemo {
return validate.NewRequestError(nil, http.StatusForbidden)
@@ -106,7 +111,7 @@ func (ctrl *V1Controller) HandleUserSelfDelete() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
- return server.Respond(w, http.StatusNoContent, nil)
+ return server.JSON(w, http.StatusNoContent, nil)
}
}
@@ -118,13 +123,14 @@ type (
)
// HandleUserSelfChangePassword godoc
-// @Summary Updates the users password
-// @Tags User
-// @Success 204
-// @Param payload body ChangePassword true "Password Payload"
-// @Router /v1/users/change-password [PUT]
-// @Security Bearer
-func (ctrl *V1Controller) HandleUserSelfChangePassword() server.HandlerFunc {
+//
+// @Summary Change Password
+// @Tags User
+// @Success 204
+// @Param payload body ChangePassword true "Password Payload"
+// @Router /v1/users/change-password [PUT]
+// @Security Bearer
+func (ctrl *V1Controller) HandleUserSelfChangePassword() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
if ctrl.isDemo {
return validate.NewRequestError(nil, http.StatusForbidden)
@@ -143,6 +149,6 @@ func (ctrl *V1Controller) HandleUserSelfChangePassword() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
- return server.Respond(w, http.StatusNoContent, nil)
+ return server.JSON(w, http.StatusNoContent, nil)
}
}
diff --git a/backend/app/api/logger.go b/backend/app/api/logger.go
index ddc574f..34659c6 100644
--- a/backend/app/api/logger.go
+++ b/backend/app/api/logger.go
@@ -2,7 +2,6 @@ package main
import (
"os"
- "strings"
"github.com/hay-kot/homebox/backend/internal/sys/config"
"github.com/rs/zerolog"
@@ -18,24 +17,8 @@ func (a *app) setupLogger() {
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}).With().Caller().Logger()
}
- log.Level(getLevel(a.conf.Log.Level))
-}
-
-func getLevel(l string) zerolog.Level {
- switch strings.ToLower(l) {
- case "debug":
- return zerolog.DebugLevel
- case "info":
- return zerolog.InfoLevel
- case "warn":
- return zerolog.WarnLevel
- case "error":
- return zerolog.ErrorLevel
- case "fatal":
- return zerolog.FatalLevel
- case "panic":
- return zerolog.PanicLevel
- default:
- return zerolog.InfoLevel
+ level, err := zerolog.ParseLevel(a.conf.Log.Level)
+ if err == nil {
+ zerolog.SetGlobalLevel(level)
}
}
diff --git a/backend/app/api/main.go b/backend/app/api/main.go
index c62d5df..4811bfa 100644
--- a/backend/app/api/main.go
+++ b/backend/app/api/main.go
@@ -1,7 +1,9 @@
package main
import (
+ "bytes"
"context"
+ "fmt"
"net/http"
"os"
"path/filepath"
@@ -9,16 +11,24 @@ import (
atlas "ariga.io/atlas/sql/migrate"
"entgo.io/ent/dialect/sql/schema"
- "github.com/hay-kot/homebox/backend/app/api/static/docs"
+ "github.com/go-chi/chi/v5"
+ "github.com/go-chi/chi/v5/middleware"
+
+ "github.com/hay-kot/homebox/backend/internal/core/currencies"
"github.com/hay-kot/homebox/backend/internal/core/services"
+ "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/migrations"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/config"
"github.com/hay-kot/homebox/backend/internal/web/mid"
- "github.com/hay-kot/homebox/backend/pkgs/server"
- _ "github.com/mattn/go-sqlite3"
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/graceful"
+ "github.com/rs/zerolog"
"github.com/rs/zerolog/log"
+ "github.com/rs/zerolog/pkgerrors"
+
+ _ "github.com/hay-kot/homebox/backend/pkgs/cgofreesqlite"
)
var (
@@ -27,24 +37,32 @@ var (
buildTime = "now"
)
-// @title Go API Templates
+func build() string {
+ short := commit
+ if len(short) > 7 {
+ short = short[:7]
+ }
+
+ return fmt.Sprintf("%s, commit %s, built at %s", version, short, buildTime)
+}
+
+// @title Homebox API
// @version 1.0
-// @description This is a simple Rest API Server Template that implements some basic User and Authentication patterns to help you get started and bootstrap your next project!.
+// @description Track, Manage, and Organize your Things.
// @contact.name Don't
-// @license.name MIT
// @BasePath /api
// @securityDefinitions.apikey Bearer
// @in header
// @name Authorization
// @description "Type 'Bearer TOKEN' to correctly set the API Key"
func main() {
- cfg, err := config.New()
+ zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
+
+ cfg, err := config.New(build(), "Homebox inventory management system")
if err != nil {
panic(err)
}
- docs.SwaggerInfo.Host = cfg.Swagger.Host
-
if err := run(cfg); err != nil {
panic(err)
}
@@ -57,17 +75,17 @@ func run(cfg *config.Config) error {
// =========================================================================
// Initialize Database & Repos
- err := os.MkdirAll(cfg.Storage.Data, 0755)
+ err := os.MkdirAll(cfg.Storage.Data, 0o755)
if err != nil {
log.Fatal().Err(err).Msg("failed to create data directory")
}
- c, err := ent.Open("sqlite3", cfg.Storage.SqliteUrl)
+ c, err := ent.Open("sqlite3", cfg.Storage.SqliteURL)
if err != nil {
log.Fatal().
Err(err).
Str("driver", "sqlite").
- Str("url", cfg.Storage.SqliteUrl).
+ Str("url", cfg.Storage.SqliteURL).
Msg("failed opening connection to sqlite")
}
defer func(c *ent.Client) {
@@ -100,7 +118,7 @@ func run(cfg *config.Config) error {
log.Fatal().
Err(err).
Str("driver", "sqlite").
- Str("url", cfg.Storage.SqliteUrl).
+ Str("url", cfg.Storage.SqliteURL).
Msg("failed creating schema resources")
}
@@ -110,67 +128,145 @@ func run(cfg *config.Config) error {
return err
}
- app.db = c
- app.repos = repo.New(c, cfg.Storage.Data)
- app.services = services.New(app.repos)
-
- // =========================================================================
- // Start Server\
- logger := log.With().Caller().Logger()
-
- mwLogger := mid.Logger(logger)
- if app.conf.Mode == config.ModeDevelopment {
- mwLogger = mid.SugarLogger(logger)
+ collectFuncs := []currencies.CollectorFunc{
+ currencies.CollectDefaults(),
}
- app.server = server.NewServer(
- server.WithHost(app.conf.Web.Host),
- server.WithPort(app.conf.Web.Port),
- server.WithMiddleware(
- mwLogger,
- mid.Errors(logger),
- mid.Panic(app.conf.Mode == config.ModeDevelopment),
- ),
+ if cfg.Options.CurrencyConfig != "" {
+ log.Info().
+ Str("path", cfg.Options.CurrencyConfig).
+ Msg("loading currency config file")
+
+ content, err := os.ReadFile(cfg.Options.CurrencyConfig)
+ if err != nil {
+ log.Fatal().
+ Err(err).
+ Str("path", cfg.Options.CurrencyConfig).
+ Msg("failed to read currency config file")
+ }
+
+ collectFuncs = append(collectFuncs, currencies.CollectJSON(bytes.NewReader(content)))
+ }
+
+ currencies, err := currencies.CollectionCurrencies(collectFuncs...)
+ if err != nil {
+ log.Fatal().
+ Err(err).
+ Msg("failed to collect currencies")
+ }
+
+ app.bus = eventbus.New()
+ app.db = c
+ app.repos = repo.New(c, app.bus, cfg.Storage.Data)
+ app.services = services.New(
+ app.repos,
+ services.WithAutoIncrementAssetID(cfg.Options.AutoIncrementAssetID),
+ services.WithCurrencies(currencies),
)
- app.mountRoutes(app.repos)
+ // =========================================================================
+ // Start Server
- log.Info().Msgf("Starting HTTP Server on %s:%s", app.server.Host, app.server.Port)
+ logger := log.With().Caller().Logger()
+
+ router := chi.NewMux()
+ router.Use(
+ middleware.RequestID,
+ middleware.RealIP,
+ mid.Logger(logger),
+ middleware.Recoverer,
+ middleware.StripSlashes,
+ )
+
+ chain := errchain.New(mid.Errors(logger))
+
+ app.mountRoutes(router, chain, app.repos)
+
+ runner := graceful.NewRunner()
+
+ runner.AddFunc("server", func(ctx context.Context) error {
+ httpserver := http.Server{
+ Addr: fmt.Sprintf("%s:%s", cfg.Web.Host, cfg.Web.Port),
+ Handler: router,
+ ReadTimeout: cfg.Web.ReadTimeout,
+ WriteTimeout: cfg.Web.WriteTimeout,
+ IdleTimeout: cfg.Web.IdleTimeout,
+ }
+
+ go func() {
+ <-ctx.Done()
+ _ = httpserver.Shutdown(context.Background())
+ }()
+
+ log.Info().Msgf("Server is running on %s:%s", cfg.Web.Host, cfg.Web.Port)
+ return httpserver.ListenAndServe()
+ })
// =========================================================================
// Start Reoccurring Tasks
- go app.startBgTask(time.Duration(24)*time.Hour, func() {
- _, err := app.repos.AuthTokens.PurgeExpiredTokens(context.Background())
+ runner.AddFunc("eventbus", app.bus.Run)
+
+ runner.AddFunc("seed_database", func(ctx context.Context) error {
+ // TODO: Remove through external API that does setup
+ if cfg.Demo {
+ log.Info().Msg("Running in demo mode, creating demo data")
+ app.SetupDemo()
+ }
+ return nil
+ })
+
+ runner.AddPlugin(NewTask("purge-tokens", time.Duration(24)*time.Hour, func(ctx context.Context) {
+ _, err := app.repos.AuthTokens.PurgeExpiredTokens(ctx)
if err != nil {
log.Error().
Err(err).
Msg("failed to purge expired tokens")
}
- })
- go app.startBgTask(time.Duration(24)*time.Hour, func() {
- _, err := app.repos.Groups.InvitationPurge(context.Background())
+ }))
+
+ runner.AddPlugin(NewTask("purge-invitations", time.Duration(24)*time.Hour, func(ctx context.Context) {
+ _, err := app.repos.Groups.InvitationPurge(ctx)
if err != nil {
log.Error().
Err(err).
Msg("failed to purge expired invitations")
}
- })
+ }))
- // TODO: Remove through external API that does setup
- if cfg.Demo {
- log.Info().Msg("Running in demo mode, creating demo data")
- app.SetupDemo()
- }
+ runner.AddPlugin(NewTask("send-notifications", time.Duration(1)*time.Hour, func(ctx context.Context) {
+ now := time.Now()
+
+ if now.Hour() == 8 {
+ fmt.Println("run notifiers")
+ err := app.services.BackgroundService.SendNotifiersToday(context.Background())
+ if err != nil {
+ log.Error().
+ Err(err).
+ Msg("failed to send notifiers")
+ }
+ }
+ }))
if cfg.Debug.Enabled {
- debugrouter := app.debugRouter()
- go func() {
- if err := http.ListenAndServe(":"+cfg.Debug.Port, debugrouter); err != nil {
- log.Fatal().Err(err).Msg("failed to start debug server")
+ runner.AddFunc("debug", func(ctx context.Context) error {
+ debugserver := http.Server{
+ Addr: fmt.Sprintf("%s:%s", cfg.Web.Host, cfg.Debug.Port),
+ Handler: app.debugRouter(),
+ ReadTimeout: cfg.Web.ReadTimeout,
+ WriteTimeout: cfg.Web.WriteTimeout,
+ IdleTimeout: cfg.Web.IdleTimeout,
}
- }()
+
+ go func() {
+ <-ctx.Done()
+ _ = debugserver.Shutdown(context.Background())
+ }()
+
+ log.Info().Msgf("Debug server is running on %s:%s", cfg.Web.Host, cfg.Debug.Port)
+ return debugserver.ListenAndServe()
+ })
}
- return app.server.Start()
+ return runner.Start(context.Background())
}
diff --git a/backend/app/api/middleware.go b/backend/app/api/middleware.go
index 505ba40..02b3a6c 100644
--- a/backend/app/api/middleware.go
+++ b/backend/app/api/middleware.go
@@ -1,31 +1,151 @@
package main
import (
+ "context"
"errors"
"net/http"
+ "net/url"
"strings"
+ v1 "github.com/hay-kot/homebox/backend/app/api/handlers/v1"
"github.com/hay-kot/homebox/backend/internal/core/services"
+ "github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
- "github.com/hay-kot/homebox/backend/pkgs/server"
+ "github.com/hay-kot/httpkit/errchain"
)
+type tokenHasKey struct {
+ key string
+}
+
+var hashedToken = tokenHasKey{key: "hashedToken"}
+
+type RoleMode int
+
+const (
+ RoleModeOr RoleMode = 0
+ RoleModeAnd RoleMode = 1
+)
+
+// mwRoles is a middleware that will validate the required roles are met. All roles
+// are required to be met for the request to be allowed. If the user does not have
+// the required roles, a 403 Forbidden will be returned.
+//
+// WARNING: This middleware _MUST_ be called after mwAuthToken or else it will panic
+func (a *app) mwRoles(rm RoleMode, required ...string) errchain.Middleware {
+ return func(next errchain.Handler) errchain.Handler {
+ return errchain.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
+ ctx := r.Context()
+
+ maybeToken := ctx.Value(hashedToken)
+ if maybeToken == nil {
+ panic("mwRoles: token not found in context, you must call mwAuthToken before mwRoles")
+ }
+
+ token := maybeToken.(string)
+
+ roles, err := a.repos.AuthTokens.GetRoles(r.Context(), token)
+ if err != nil {
+ return err
+ }
+
+ outer:
+ switch rm {
+ case RoleModeOr:
+ for _, role := range required {
+ if roles.Contains(role) {
+ break outer
+ }
+ }
+ return validate.NewRequestError(errors.New("Forbidden"), http.StatusForbidden)
+ case RoleModeAnd:
+ for _, req := range required {
+ if !roles.Contains(req) {
+ return validate.NewRequestError(errors.New("Unauthorized"), http.StatusForbidden)
+ }
+ }
+ }
+
+ return next.ServeHTTP(w, r)
+ })
+ }
+}
+
+type KeyFunc func(r *http.Request) (string, error)
+
+func getBearer(r *http.Request) (string, error) {
+ auth := r.Header.Get("Authorization")
+ if auth == "" {
+ return "", errors.New("authorization header is required")
+ }
+
+ return auth, nil
+}
+
+func getQuery(r *http.Request) (string, error) {
+ token := r.URL.Query().Get("access_token")
+ if token == "" {
+ return "", errors.New("access_token query is required")
+ }
+
+ token, err := url.QueryUnescape(token)
+ if err != nil {
+ return "", errors.New("access_token query is required")
+ }
+
+ return token, nil
+}
+
// mwAuthToken is a middleware that will check the database for a stateful token
-// and attach it to the request context with the user, or return a 401 if it doesn't exist.
-func (a *app) mwAuthToken(next server.Handler) server.Handler {
- return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
- requestToken := r.Header.Get("Authorization")
+// and attach it's user to the request context, or return an appropriate error.
+// Authorization support is by token via Headers or Query Parameter
+//
+// Example:
+// - header = "Bearer 1234567890"
+// - query = "?access_token=1234567890"
+func (a *app) mwAuthToken(next errchain.Handler) errchain.Handler {
+ return errchain.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
+ var requestToken string
+
+ // We ignore the error to allow the next strategy to be attempted
+ {
+ cookies, _ := v1.GetCookies(r)
+ if cookies != nil {
+ requestToken = cookies.Token
+ }
+ }
if requestToken == "" {
- return validate.NewRequestError(errors.New("Authorization header is required"), http.StatusUnauthorized)
+ keyFuncs := [...]KeyFunc{
+ getBearer,
+ getQuery,
+ }
+
+ for _, keyFunc := range keyFuncs {
+ token, err := keyFunc(r)
+ if err == nil {
+ requestToken = token
+ break
+ }
+ }
+ }
+
+ if requestToken == "" {
+ return validate.NewRequestError(errors.New("authorization header or query is required"), http.StatusUnauthorized)
}
requestToken = strings.TrimPrefix(requestToken, "Bearer ")
- usr, err := a.services.User.GetSelf(r.Context(), requestToken)
+ r = r.WithContext(context.WithValue(r.Context(), hashedToken, requestToken))
+
+ usr, err := a.services.User.GetSelf(r.Context(), requestToken)
// Check the database for the token
if err != nil {
- return validate.NewRequestError(errors.New("Authorization header is required"), http.StatusUnauthorized)
+ if ent.IsNotFound(err) {
+ return validate.NewRequestError(errors.New("valid authorization token is required"), http.StatusUnauthorized)
+ }
+
+ return err
}
r = r.WithContext(services.SetUserCtx(r.Context(), &usr, requestToken))
diff --git a/backend/app/api/providers/doc.go b/backend/app/api/providers/doc.go
new file mode 100644
index 0000000..f58615d
--- /dev/null
+++ b/backend/app/api/providers/doc.go
@@ -0,0 +1,2 @@
+// Package providers provides a authentication abstraction for the backend.
+package providers
diff --git a/backend/app/api/providers/extractors.go b/backend/app/api/providers/extractors.go
new file mode 100644
index 0000000..bc042a4
--- /dev/null
+++ b/backend/app/api/providers/extractors.go
@@ -0,0 +1,55 @@
+package providers
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/hay-kot/homebox/backend/internal/sys/validate"
+ "github.com/hay-kot/httpkit/server"
+ "github.com/rs/zerolog/log"
+)
+
+type LoginForm struct {
+ Username string `json:"username"`
+ Password string `json:"password"`
+ StayLoggedIn bool `json:"stayLoggedIn"`
+}
+
+func getLoginForm(r *http.Request) (LoginForm, error) {
+ loginForm := LoginForm{}
+
+ switch r.Header.Get("Content-Type") {
+ case "application/x-www-form-urlencoded":
+ err := r.ParseForm()
+ if err != nil {
+ return loginForm, errors.New("failed to parse form")
+ }
+
+ loginForm.Username = r.PostFormValue("username")
+ loginForm.Password = r.PostFormValue("password")
+ loginForm.StayLoggedIn = r.PostFormValue("stayLoggedIn") == "true"
+ case "application/json":
+ err := server.Decode(r, &loginForm)
+ if err != nil {
+ log.Err(err).Msg("failed to decode login form")
+ return loginForm, errors.New("failed to decode login form")
+ }
+ default:
+ return loginForm, errors.New("invalid content type")
+ }
+
+ if loginForm.Username == "" || loginForm.Password == "" {
+ return loginForm, validate.NewFieldErrors(
+ validate.FieldError{
+ Field: "username",
+ Error: "username or password is empty",
+ },
+ validate.FieldError{
+ Field: "password",
+ Error: "username or password is empty",
+ },
+ )
+ }
+
+ return loginForm, nil
+}
diff --git a/backend/app/api/providers/local.go b/backend/app/api/providers/local.go
new file mode 100644
index 0000000..991f51a
--- /dev/null
+++ b/backend/app/api/providers/local.go
@@ -0,0 +1,30 @@
+package providers
+
+import (
+ "net/http"
+
+ "github.com/hay-kot/homebox/backend/internal/core/services"
+)
+
+type LocalProvider struct {
+ service *services.UserService
+}
+
+func NewLocalProvider(service *services.UserService) *LocalProvider {
+ return &LocalProvider{
+ service: service,
+ }
+}
+
+func (p *LocalProvider) Name() string {
+ return "local"
+}
+
+func (p *LocalProvider) Authenticate(w http.ResponseWriter, r *http.Request) (services.UserAuthTokenDetail, error) {
+ loginForm, err := getLoginForm(r)
+ if err != nil {
+ return services.UserAuthTokenDetail{}, err
+ }
+
+ return p.service.Login(r.Context(), loginForm.Username, loginForm.Password, loginForm.StayLoggedIn)
+}
diff --git a/backend/app/api/routes.go b/backend/app/api/routes.go
index 992e70e..de10942 100644
--- a/backend/app/api/routes.go
+++ b/backend/app/api/routes.go
@@ -3,19 +3,21 @@ package main
import (
"embed"
"errors"
- "fmt"
"io"
"mime"
"net/http"
"path"
"path/filepath"
+ "github.com/go-chi/chi/v5"
"github.com/hay-kot/homebox/backend/app/api/handlers/debughandlers"
v1 "github.com/hay-kot/homebox/backend/app/api/handlers/v1"
+ "github.com/hay-kot/homebox/backend/app/api/providers"
_ "github.com/hay-kot/homebox/backend/app/api/static/docs"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
"github.com/hay-kot/homebox/backend/internal/data/repo"
- "github.com/hay-kot/homebox/backend/pkgs/server"
- httpSwagger "github.com/swaggo/http-swagger" // http-swagger middleware
+ "github.com/hay-kot/httpkit/errchain"
+ httpSwagger "github.com/swaggo/http-swagger/v2" // http-swagger middleware
)
const prefix = "/api"
@@ -35,78 +37,133 @@ func (a *app) debugRouter() *http.ServeMux {
}
// registerRoutes registers all the routes for the API
-func (a *app) mountRoutes(repos *repo.AllRepos) {
+func (a *app) mountRoutes(r *chi.Mux, chain *errchain.ErrChain, repos *repo.AllRepos) {
registerMimes()
- a.server.Get("/swagger/*", server.ToHandler(httpSwagger.Handler(
- httpSwagger.URL(fmt.Sprintf("%s://%s/swagger/doc.json", a.conf.Swagger.Scheme, a.conf.Swagger.Host)),
- )))
+ r.Get("/swagger/*", httpSwagger.Handler(
+ httpSwagger.URL("/swagger/doc.json"),
+ ))
// =========================================================================
// API Version 1
- v1Base := v1.BaseUrlFunc(prefix)
+ v1Base := v1.BaseURLFunc(prefix)
v1Ctrl := v1.NewControllerV1(
a.services,
a.repos,
+ a.bus,
v1.WithMaxUploadSize(a.conf.Web.MaxUploadSize),
- v1.WithRegistration(a.conf.AllowRegistration),
+ v1.WithRegistration(a.conf.Options.AllowRegistration),
v1.WithDemoStatus(a.conf.Demo), // Disable Password Change in Demo Mode
)
- a.server.Get(v1Base("/status"), v1Ctrl.HandleBase(func() bool { return true }, v1.Build{
+ r.Get(v1Base("/status"), chain.ToHandlerFunc(v1Ctrl.HandleBase(func() bool { return true }, v1.Build{
Version: version,
Commit: commit,
BuildTime: buildTime,
- }))
+ })))
- a.server.Post(v1Base("/users/register"), v1Ctrl.HandleUserRegistration())
- a.server.Post(v1Base("/users/login"), v1Ctrl.HandleAuthLogin())
+ r.Get(v1Base("/currencies"), chain.ToHandlerFunc(v1Ctrl.HandleCurrency()))
- // Attachment download URl needs a `token` query param to be passed in the request.
- // and also needs to be outside of the `auth` middleware.
- a.server.Get(v1Base("/items/{id}/attachments/download"), v1Ctrl.HandleItemAttachmentDownload())
+ providers := []v1.AuthProvider{
+ providers.NewLocalProvider(a.services.User),
+ }
- a.server.Get(v1Base("/users/self"), v1Ctrl.HandleUserSelf(), a.mwAuthToken)
- a.server.Put(v1Base("/users/self"), v1Ctrl.HandleUserSelfUpdate(), a.mwAuthToken)
- a.server.Delete(v1Base("/users/self"), v1Ctrl.HandleUserSelfDelete(), a.mwAuthToken)
- a.server.Post(v1Base("/users/logout"), v1Ctrl.HandleAuthLogout(), a.mwAuthToken)
- a.server.Get(v1Base("/users/refresh"), v1Ctrl.HandleAuthRefresh(), a.mwAuthToken)
- a.server.Put(v1Base("/users/self/change-password"), v1Ctrl.HandleUserSelfChangePassword(), a.mwAuthToken)
+ r.Post(v1Base("/users/register"), chain.ToHandlerFunc(v1Ctrl.HandleUserRegistration()))
+ r.Post(v1Base("/users/login"), chain.ToHandlerFunc(v1Ctrl.HandleAuthLogin(providers...)))
- a.server.Post(v1Base("/groups/invitations"), v1Ctrl.HandleGroupInvitationsCreate(), a.mwAuthToken)
- a.server.Get(v1Base("/groups/statistics"), v1Ctrl.HandleGroupStatistics(), a.mwAuthToken)
+ userMW := []errchain.Middleware{
+ a.mwAuthToken,
+ a.mwRoles(RoleModeOr, authroles.RoleUser.String()),
+ }
+
+ r.Get(v1Base("/ws/events"), chain.ToHandlerFunc(v1Ctrl.HandleCacheWS(), userMW...))
+ r.Get(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelf(), userMW...))
+ r.Put(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfUpdate(), userMW...))
+ r.Delete(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfDelete(), userMW...))
+ r.Post(v1Base("/users/logout"), chain.ToHandlerFunc(v1Ctrl.HandleAuthLogout(), userMW...))
+ r.Get(v1Base("/users/refresh"), chain.ToHandlerFunc(v1Ctrl.HandleAuthRefresh(), userMW...))
+ r.Put(v1Base("/users/self/change-password"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfChangePassword(), userMW...))
+
+ r.Post(v1Base("/groups/invitations"), chain.ToHandlerFunc(v1Ctrl.HandleGroupInvitationsCreate(), userMW...))
+ r.Get(v1Base("/groups/statistics"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatistics(), userMW...))
+ r.Get(v1Base("/groups/statistics/purchase-price"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsPriceOverTime(), userMW...))
+ r.Get(v1Base("/groups/statistics/locations"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsLocations(), userMW...))
+ r.Get(v1Base("/groups/statistics/labels"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsLabels(), userMW...))
// TODO: I don't like /groups being the URL for users
- a.server.Get(v1Base("/groups"), v1Ctrl.HandleGroupGet(), a.mwAuthToken)
- a.server.Put(v1Base("/groups"), v1Ctrl.HandleGroupUpdate(), a.mwAuthToken)
+ r.Get(v1Base("/groups"), chain.ToHandlerFunc(v1Ctrl.HandleGroupGet(), userMW...))
+ r.Put(v1Base("/groups"), chain.ToHandlerFunc(v1Ctrl.HandleGroupUpdate(), userMW...))
- a.server.Get(v1Base("/locations"), v1Ctrl.HandleLocationGetAll(), a.mwAuthToken)
- a.server.Post(v1Base("/locations"), v1Ctrl.HandleLocationCreate(), a.mwAuthToken)
- a.server.Get(v1Base("/locations/{id}"), v1Ctrl.HandleLocationGet(), a.mwAuthToken)
- a.server.Put(v1Base("/locations/{id}"), v1Ctrl.HandleLocationUpdate(), a.mwAuthToken)
- a.server.Delete(v1Base("/locations/{id}"), v1Ctrl.HandleLocationDelete(), a.mwAuthToken)
+ r.Post(v1Base("/actions/ensure-asset-ids"), chain.ToHandlerFunc(v1Ctrl.HandleEnsureAssetID(), userMW...))
+ r.Post(v1Base("/actions/zero-item-time-fields"), chain.ToHandlerFunc(v1Ctrl.HandleItemDateZeroOut(), userMW...))
+ r.Post(v1Base("/actions/ensure-import-refs"), chain.ToHandlerFunc(v1Ctrl.HandleEnsureImportRefs(), userMW...))
+ r.Post(v1Base("/actions/set-primary-photos"), chain.ToHandlerFunc(v1Ctrl.HandleSetPrimaryPhotos(), userMW...))
- a.server.Get(v1Base("/labels"), v1Ctrl.HandleLabelsGetAll(), a.mwAuthToken)
- a.server.Post(v1Base("/labels"), v1Ctrl.HandleLabelsCreate(), a.mwAuthToken)
- a.server.Get(v1Base("/labels/{id}"), v1Ctrl.HandleLabelGet(), a.mwAuthToken)
- a.server.Put(v1Base("/labels/{id}"), v1Ctrl.HandleLabelUpdate(), a.mwAuthToken)
- a.server.Delete(v1Base("/labels/{id}"), v1Ctrl.HandleLabelDelete(), a.mwAuthToken)
+ r.Get(v1Base("/locations"), chain.ToHandlerFunc(v1Ctrl.HandleLocationGetAll(), userMW...))
+ r.Post(v1Base("/locations"), chain.ToHandlerFunc(v1Ctrl.HandleLocationCreate(), userMW...))
+ r.Get(v1Base("/locations/tree"), chain.ToHandlerFunc(v1Ctrl.HandleLocationTreeQuery(), userMW...))
+ r.Get(v1Base("/locations/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLocationGet(), userMW...))
+ r.Put(v1Base("/locations/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLocationUpdate(), userMW...))
+ r.Delete(v1Base("/locations/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLocationDelete(), userMW...))
- a.server.Get(v1Base("/items"), v1Ctrl.HandleItemsGetAll(), a.mwAuthToken)
- a.server.Post(v1Base("/items/import"), v1Ctrl.HandleItemsImport(), a.mwAuthToken)
- a.server.Post(v1Base("/items"), v1Ctrl.HandleItemsCreate(), a.mwAuthToken)
- a.server.Get(v1Base("/items/{id}"), v1Ctrl.HandleItemGet(), a.mwAuthToken)
- a.server.Put(v1Base("/items/{id}"), v1Ctrl.HandleItemUpdate(), a.mwAuthToken)
- a.server.Delete(v1Base("/items/{id}"), v1Ctrl.HandleItemDelete(), a.mwAuthToken)
+ r.Get(v1Base("/labels"), chain.ToHandlerFunc(v1Ctrl.HandleLabelsGetAll(), userMW...))
+ r.Post(v1Base("/labels"), chain.ToHandlerFunc(v1Ctrl.HandleLabelsCreate(), userMW...))
+ r.Get(v1Base("/labels/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLabelGet(), userMW...))
+ r.Put(v1Base("/labels/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLabelUpdate(), userMW...))
+ r.Delete(v1Base("/labels/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLabelDelete(), userMW...))
- a.server.Post(v1Base("/items/{id}/attachments"), v1Ctrl.HandleItemAttachmentCreate(), a.mwAuthToken)
- a.server.Get(v1Base("/items/{id}/attachments/{attachment_id}"), v1Ctrl.HandleItemAttachmentToken(), a.mwAuthToken)
- a.server.Put(v1Base("/items/{id}/attachments/{attachment_id}"), v1Ctrl.HandleItemAttachmentUpdate(), a.mwAuthToken)
- a.server.Delete(v1Base("/items/{id}/attachments/{attachment_id}"), v1Ctrl.HandleItemAttachmentDelete(), a.mwAuthToken)
+ r.Get(v1Base("/items"), chain.ToHandlerFunc(v1Ctrl.HandleItemsGetAll(), userMW...))
+ r.Post(v1Base("/items"), chain.ToHandlerFunc(v1Ctrl.HandleItemsCreate(), userMW...))
+ r.Post(v1Base("/items/import"), chain.ToHandlerFunc(v1Ctrl.HandleItemsImport(), userMW...))
+ r.Get(v1Base("/items/export"), chain.ToHandlerFunc(v1Ctrl.HandleItemsExport(), userMW...))
+ r.Get(v1Base("/items/fields"), chain.ToHandlerFunc(v1Ctrl.HandleGetAllCustomFieldNames(), userMW...))
+ r.Get(v1Base("/items/fields/values"), chain.ToHandlerFunc(v1Ctrl.HandleGetAllCustomFieldValues(), userMW...))
- a.server.NotFound(notFoundHandler())
+ r.Get(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemGet(), userMW...))
+ r.Get(v1Base("/items/{id}/path"), chain.ToHandlerFunc(v1Ctrl.HandleItemFullPath(), userMW...))
+ r.Put(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemUpdate(), userMW...))
+ r.Patch(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemPatch(), userMW...))
+ r.Delete(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemDelete(), userMW...))
+
+ r.Post(v1Base("/items/{id}/attachments"), chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentCreate(), userMW...))
+ r.Put(v1Base("/items/{id}/attachments/{attachment_id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentUpdate(), userMW...))
+ r.Delete(v1Base("/items/{id}/attachments/{attachment_id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentDelete(), userMW...))
+
+ r.Get(v1Base("/items/{id}/maintenance"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceLogGet(), userMW...))
+ r.Post(v1Base("/items/{id}/maintenance"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryCreate(), userMW...))
+ r.Put(v1Base("/items/{id}/maintenance/{entry_id}"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryUpdate(), userMW...))
+ r.Delete(v1Base("/items/{id}/maintenance/{entry_id}"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryDelete(), userMW...))
+
+ r.Get(v1Base("/assets/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleAssetGet(), userMW...))
+
+ // Notifiers
+ r.Get(v1Base("/notifiers"), chain.ToHandlerFunc(v1Ctrl.HandleGetUserNotifiers(), userMW...))
+ r.Post(v1Base("/notifiers"), chain.ToHandlerFunc(v1Ctrl.HandleCreateNotifier(), userMW...))
+ r.Put(v1Base("/notifiers/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleUpdateNotifier(), userMW...))
+ r.Delete(v1Base("/notifiers/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleDeleteNotifier(), userMW...))
+ r.Post(v1Base("/notifiers/test"), chain.ToHandlerFunc(v1Ctrl.HandlerNotifierTest(), userMW...))
+
+ // Asset-Like endpoints
+ assetMW := []errchain.Middleware{
+ a.mwAuthToken,
+ a.mwRoles(RoleModeOr, authroles.RoleUser.String(), authroles.RoleAttachments.String()),
+ }
+
+ r.Get(
+ v1Base("/qrcode"),
+ chain.ToHandlerFunc(v1Ctrl.HandleGenerateQRCode(), assetMW...),
+ )
+ r.Get(
+ v1Base("/items/{id}/attachments/{attachment_id}"),
+ chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentGet(), assetMW...),
+ )
+
+ // Reporting Services
+ r.Get(v1Base("/reporting/bill-of-materials"), chain.ToHandlerFunc(v1Ctrl.HandleBillOfMaterialsExport(), userMW...))
+
+ r.NotFound(chain.ToHandlerFunc(notFoundHandler()))
}
func registerMimes() {
@@ -123,13 +180,13 @@ func registerMimes() {
// notFoundHandler perform the main logic around handling the internal SPA embed and ensuring that
// the client side routing is handled correctly.
-func notFoundHandler() server.HandlerFunc {
+func notFoundHandler() errchain.HandlerFunc {
tryRead := func(fs embed.FS, prefix, requestedPath string, w http.ResponseWriter) error {
f, err := fs.Open(path.Join(prefix, requestedPath))
if err != nil {
return err
}
- defer f.Close()
+ defer func() { _ = f.Close() }()
stat, _ := f.Stat()
if stat.IsDir() {
diff --git a/backend/app/api/static/docs/docs.go b/backend/app/api/static/docs/docs.go
index a4105a1..7c9a748 100644
--- a/backend/app/api/static/docs/docs.go
+++ b/backend/app/api/static/docs/docs.go
@@ -1,5 +1,4 @@
-// Package docs GENERATED BY SWAG; DO NOT EDIT
-// This file was generated by swaggo/swag
+// Package docs Code generated by swaggo/swag. DO NOT EDIT
package docs
import "github.com/swaggo/swag"
@@ -13,14 +12,163 @@ const docTemplate = `{
"contact": {
"name": "Don't"
},
- "license": {
- "name": "MIT"
- },
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
+ "/v1/actions/ensure-asset-ids": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Ensures all items in the database have an asset ID",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Actions"
+ ],
+ "summary": "Ensure Asset IDs",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.ActionAmountResult"
+ }
+ }
+ }
+ }
+ },
+ "/v1/actions/ensure-import-refs": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Ensures all items in the database have an import ref",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Actions"
+ ],
+ "summary": "Ensures Import Refs",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.ActionAmountResult"
+ }
+ }
+ }
+ }
+ },
+ "/v1/actions/set-primary-photos": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Sets the first photo of each item as the primary photo",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Actions"
+ ],
+ "summary": "Set Primary Photos",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.ActionAmountResult"
+ }
+ }
+ }
+ }
+ },
+ "/v1/actions/zero-item-time-fields": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Resets all item date fields to the beginning of the day",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Actions"
+ ],
+ "summary": "Zero Out Time Fields",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.ActionAmountResult"
+ }
+ }
+ }
+ }
+ },
+ "/v1/assets/{id}": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Get Item by Asset ID",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Asset ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.PaginationResult-repo_ItemSummary"
+ }
+ }
+ }
+ }
+ },
+ "/v1/currency": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Base"
+ ],
+ "summary": "Currency",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/currencies.Currency"
+ }
+ }
+ }
+ }
+ },
"/v1/groups": {
"get": {
"security": [
@@ -34,7 +182,7 @@ const docTemplate = `{
"tags": [
"Group"
],
- "summary": "Get the current user's group",
+ "summary": "Get Group",
"responses": {
"200": {
"description": "OK",
@@ -56,7 +204,7 @@ const docTemplate = `{
"tags": [
"Group"
],
- "summary": "Updates some fields of the current users group",
+ "summary": "Update Group",
"parameters": [
{
"description": "User Data",
@@ -91,7 +239,7 @@ const docTemplate = `{
"tags": [
"Group"
],
- "summary": "Get the current user",
+ "summary": "Create Group Invitation",
"parameters": [
{
"description": "User Data",
@@ -124,9 +272,9 @@ const docTemplate = `{
"application/json"
],
"tags": [
- "Group"
+ "Statistics"
],
- "summary": "Get the current user's group",
+ "summary": "Get Group Statistics",
"responses": {
"200": {
"description": "OK",
@@ -137,6 +285,98 @@ const docTemplate = `{
}
}
},
+ "/v1/groups/statistics/labels": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Statistics"
+ ],
+ "summary": "Get Label Statistics",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.TotalsByOrganizer"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/groups/statistics/locations": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Statistics"
+ ],
+ "summary": "Get Location Statistics",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.TotalsByOrganizer"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/groups/statistics/purchase-price": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Statistics"
+ ],
+ "summary": "Get Purchase Price Statistics",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "start date",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "end date",
+ "name": "end",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.ValueOverTime"
+ }
+ }
+ }
+ }
+ },
"/v1/items": {
"get": {
"security": [
@@ -150,7 +390,7 @@ const docTemplate = `{
"tags": [
"Items"
],
- "summary": "Get All Items",
+ "summary": "Query All Items",
"parameters": [
{
"type": "string",
@@ -189,6 +429,16 @@ const docTemplate = `{
"description": "location Ids",
"name": "locations",
"in": "query"
+ },
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "collectionFormat": "multi",
+ "description": "parent Ids",
+ "name": "parentIds",
+ "in": "query"
}
],
"responses": {
@@ -212,7 +462,7 @@ const docTemplate = `{
"tags": [
"Items"
],
- "summary": "Create a new item",
+ "summary": "Create Item",
"parameters": [
{
"description": "Item Data",
@@ -224,11 +474,86 @@ const docTemplate = `{
}
}
],
+ "responses": {
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/repo.ItemSummary"
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/export": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Export Items",
+ "responses": {
+ "200": {
+ "description": "text/csv",
+ "schema": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/fields": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Get All Custom Field Names",
"responses": {
"200": {
"description": "OK",
"schema": {
- "$ref": "#/definitions/repo.ItemSummary"
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/fields/values": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Get All Custom Field Values",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
}
}
}
@@ -247,7 +572,7 @@ const docTemplate = `{
"tags": [
"Items"
],
- "summary": "imports items into the database",
+ "summary": "Import Items",
"parameters": [
{
"type": "file",
@@ -277,7 +602,7 @@ const docTemplate = `{
"tags": [
"Items"
],
- "summary": "Gets a item and fields",
+ "summary": "Get Item",
"parameters": [
{
"type": "string",
@@ -308,7 +633,7 @@ const docTemplate = `{
"tags": [
"Items"
],
- "summary": "updates a item",
+ "summary": "Update Item",
"parameters": [
{
"type": "string",
@@ -348,7 +673,7 @@ const docTemplate = `{
"tags": [
"Items"
],
- "summary": "deletes a item",
+ "summary": "Delete Item",
"parameters": [
{
"type": "string",
@@ -363,6 +688,46 @@ const docTemplate = `{
"description": "No Content"
}
}
+ },
+ "patch": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Update Item",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Item ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "description": "Item Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.ItemPatch"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.ItemOut"
+ }
+ }
+ }
}
},
"/v1/items/{id}/attachments": {
@@ -378,7 +743,7 @@ const docTemplate = `{
"tags": [
"Items Attachments"
],
- "summary": "imports items into the database",
+ "summary": "Create Item Attachment",
"parameters": [
{
"type": "string",
@@ -419,49 +784,12 @@ const docTemplate = `{
"422": {
"description": "Unprocessable Entity",
"schema": {
- "$ref": "#/definitions/server.ErrorResponse"
+ "$ref": "#/definitions/validate.ErrorResponse"
}
}
}
}
},
- "/v1/items/{id}/attachments/download": {
- "get": {
- "security": [
- {
- "Bearer": []
- }
- ],
- "produces": [
- "application/octet-stream"
- ],
- "tags": [
- "Items Attachments"
- ],
- "summary": "retrieves an attachment for an item",
- "parameters": [
- {
- "type": "string",
- "description": "Item ID",
- "name": "id",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "description": "Attachment token",
- "name": "token",
- "in": "query",
- "required": true
- }
- ],
- "responses": {
- "200": {
- "description": "OK"
- }
- }
- }
- },
"/v1/items/{id}/attachments/{attachment_id}": {
"get": {
"security": [
@@ -475,7 +803,7 @@ const docTemplate = `{
"tags": [
"Items Attachments"
],
- "summary": "retrieves an attachment for an item",
+ "summary": "Get Item Attachment",
"parameters": [
{
"type": "string",
@@ -510,7 +838,7 @@ const docTemplate = `{
"tags": [
"Items Attachments"
],
- "summary": "retrieves an attachment for an item",
+ "summary": "Update Item Attachment",
"parameters": [
{
"type": "string",
@@ -554,7 +882,7 @@ const docTemplate = `{
"tags": [
"Items Attachments"
],
- "summary": "retrieves an attachment for an item",
+ "summary": "Delete Item Attachment",
"parameters": [
{
"type": "string",
@@ -578,6 +906,153 @@ const docTemplate = `{
}
}
},
+ "/v1/items/{id}/maintenance": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Maintenance"
+ ],
+ "summary": "Get Maintenance Log",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceLog"
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Maintenance"
+ ],
+ "summary": "Create Maintenance Entry",
+ "parameters": [
+ {
+ "description": "Entry Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceEntryCreate"
+ }
+ }
+ ],
+ "responses": {
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceEntry"
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/{id}/maintenance/{entry_id}": {
+ "put": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Maintenance"
+ ],
+ "summary": "Update Maintenance Entry",
+ "parameters": [
+ {
+ "description": "Entry Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceEntryUpdate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceEntry"
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Maintenance"
+ ],
+ "summary": "Delete Maintenance Entry",
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/items/{id}/path": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Get the full path of an item",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Item ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.ItemPath"
+ }
+ }
+ }
+ }
+ }
+ },
"/v1/labels": {
"get": {
"security": [
@@ -596,22 +1071,10 @@ const docTemplate = `{
"200": {
"description": "OK",
"schema": {
- "allOf": [
- {
- "$ref": "#/definitions/server.Results"
- },
- {
- "type": "object",
- "properties": {
- "items": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/repo.LabelOut"
- }
- }
- }
- }
- ]
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.LabelOut"
+ }
}
}
}
@@ -628,7 +1091,7 @@ const docTemplate = `{
"tags": [
"Labels"
],
- "summary": "Create a new label",
+ "summary": "Create Label",
"parameters": [
{
"description": "Label Data",
@@ -663,7 +1126,7 @@ const docTemplate = `{
"tags": [
"Labels"
],
- "summary": "Gets a label and fields",
+ "summary": "Get Label",
"parameters": [
{
"type": "string",
@@ -694,7 +1157,7 @@ const docTemplate = `{
"tags": [
"Labels"
],
- "summary": "updates a label",
+ "summary": "Update Label",
"parameters": [
{
"type": "string",
@@ -725,7 +1188,7 @@ const docTemplate = `{
"tags": [
"Labels"
],
- "summary": "deletes a label",
+ "summary": "Delete Label",
"parameters": [
{
"type": "string",
@@ -756,26 +1219,22 @@ const docTemplate = `{
"Locations"
],
"summary": "Get All Locations",
+ "parameters": [
+ {
+ "type": "boolean",
+ "description": "Filter locations with parents",
+ "name": "filterChildren",
+ "in": "query"
+ }
+ ],
"responses": {
"200": {
"description": "OK",
"schema": {
- "allOf": [
- {
- "$ref": "#/definitions/server.Results"
- },
- {
- "type": "object",
- "properties": {
- "items": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/repo.LocationOutCount"
- }
- }
- }
- }
- ]
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.LocationOutCount"
+ }
}
}
}
@@ -792,7 +1251,7 @@ const docTemplate = `{
"tags": [
"Locations"
],
- "summary": "Create a new location",
+ "summary": "Create Location",
"parameters": [
{
"description": "Location Data",
@@ -814,6 +1273,41 @@ const docTemplate = `{
}
}
},
+ "/v1/locations/tree": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Locations"
+ ],
+ "summary": "Get Locations Tree",
+ "parameters": [
+ {
+ "type": "boolean",
+ "description": "include items in response tree",
+ "name": "withItems",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.TreeItem"
+ }
+ }
+ }
+ }
+ }
+ },
"/v1/locations/{id}": {
"get": {
"security": [
@@ -827,7 +1321,7 @@ const docTemplate = `{
"tags": [
"Locations"
],
- "summary": "Gets a location and fields",
+ "summary": "Get Location",
"parameters": [
{
"type": "string",
@@ -858,7 +1352,7 @@ const docTemplate = `{
"tags": [
"Locations"
],
- "summary": "updates a location",
+ "summary": "Update Location",
"parameters": [
{
"type": "string",
@@ -898,7 +1392,7 @@ const docTemplate = `{
"tags": [
"Locations"
],
- "summary": "deletes a location",
+ "summary": "Delete Location",
"parameters": [
{
"type": "string",
@@ -915,6 +1409,223 @@ const docTemplate = `{
}
}
},
+ "/v1/notifiers": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Get Notifiers",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.NotifierOut"
+ }
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Create Notifier",
+ "parameters": [
+ {
+ "description": "Notifier Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.NotifierCreate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.NotifierOut"
+ }
+ }
+ }
+ }
+ },
+ "/v1/notifiers/test": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Test Notifier",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Notifier ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "URL",
+ "name": "url",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/notifiers/{id}": {
+ "put": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Update Notifier",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Notifier ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "description": "Notifier Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.NotifierUpdate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.NotifierOut"
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Delete a Notifier",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Notifier ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/qrcode": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Create QR Code",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "data to be encoded into qrcode",
+ "name": "data",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "image/jpeg",
+ "schema": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "/v1/reporting/bill-of-materials": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Reporting"
+ ],
+ "summary": "Export Bill of Materials",
+ "responses": {
+ "200": {
+ "description": "text/csv",
+ "schema": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
"/v1/status": {
"get": {
"produces": [
@@ -923,12 +1634,12 @@ const docTemplate = `{
"tags": [
"Base"
],
- "summary": "Retrieves the basic information about the API",
+ "summary": "Application Info",
"responses": {
"200": {
"description": "OK",
"schema": {
- "$ref": "#/definitions/v1.ApiSummary"
+ "$ref": "#/definitions/v1.APISummary"
}
}
}
@@ -944,7 +1655,7 @@ const docTemplate = `{
"tags": [
"User"
],
- "summary": "Updates the users password",
+ "summary": "Change Password",
"parameters": [
{
"description": "Password Payload",
@@ -990,6 +1701,21 @@ const docTemplate = `{
"description": "string",
"name": "password",
"in": "formData"
+ },
+ {
+ "description": "Login Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.LoginForm"
+ }
+ },
+ {
+ "type": "string",
+ "description": "auth provider",
+ "name": "provider",
+ "in": "query"
}
],
"responses": {
@@ -1047,7 +1773,7 @@ const docTemplate = `{
"tags": [
"User"
],
- "summary": "Get the current user",
+ "summary": "Register New User",
"parameters": [
{
"description": "User Data",
@@ -1079,14 +1805,14 @@ const docTemplate = `{
"tags": [
"User"
],
- "summary": "Get the current user",
+ "summary": "Get User Self",
"responses": {
"200": {
"description": "OK",
"schema": {
"allOf": [
{
- "$ref": "#/definitions/server.Result"
+ "$ref": "#/definitions/v1.Wrapped"
},
{
"type": "object",
@@ -1113,7 +1839,7 @@ const docTemplate = `{
"tags": [
"User"
],
- "summary": "Update the current user",
+ "summary": "Update Account",
"parameters": [
{
"description": "User Data",
@@ -1131,7 +1857,7 @@ const docTemplate = `{
"schema": {
"allOf": [
{
- "$ref": "#/definitions/server.Result"
+ "$ref": "#/definitions/v1.Wrapped"
},
{
"type": "object",
@@ -1158,7 +1884,7 @@ const docTemplate = `{
"tags": [
"User"
],
- "summary": "Deletes the user account",
+ "summary": "Delete Account",
"responses": {
"204": {
"description": "No Content"
@@ -1168,6 +1894,23 @@ const docTemplate = `{
}
},
"definitions": {
+ "currencies.Currency": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "string"
+ },
+ "local": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "symbol": {
+ "type": "string"
+ }
+ }
+ },
"repo.DocumentOut": {
"type": "object",
"properties": {
@@ -1205,6 +1948,9 @@ const docTemplate = `{
"repo.GroupStatistics": {
"type": "object",
"properties": {
+ "totalItemPrice": {
+ "type": "number"
+ },
"totalItems": {
"type": "integer"
},
@@ -1216,6 +1962,9 @@ const docTemplate = `{
},
"totalUsers": {
"type": "integer"
+ },
+ "totalWithWarranty": {
+ "type": "integer"
}
}
},
@@ -1242,6 +1991,9 @@ const docTemplate = `{
"id": {
"type": "string"
},
+ "primary": {
+ "type": "boolean"
+ },
"type": {
"type": "string"
},
@@ -1253,6 +2005,9 @@ const docTemplate = `{
"repo.ItemAttachmentUpdate": {
"type": "object",
"properties": {
+ "primary": {
+ "type": "boolean"
+ },
"title": {
"type": "string"
},
@@ -1263,9 +2018,13 @@ const docTemplate = `{
},
"repo.ItemCreate": {
"type": "object",
+ "required": [
+ "name"
+ ],
"properties": {
"description": {
- "type": "string"
+ "type": "string",
+ "maxLength": 1000
},
"labelIds": {
"type": "array",
@@ -1278,7 +2037,9 @@ const docTemplate = `{
"type": "string"
},
"name": {
- "type": "string"
+ "type": "string",
+ "maxLength": 255,
+ "minLength": 1
},
"parentId": {
"type": "string",
@@ -1304,9 +2065,6 @@ const docTemplate = `{
"textValue": {
"type": "string"
},
- "timeValue": {
- "type": "string"
- },
"type": {
"type": "string"
}
@@ -1318,18 +2076,16 @@ const docTemplate = `{
"archived": {
"type": "boolean"
},
+ "assetId": {
+ "type": "string",
+ "example": "0"
+ },
"attachments": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.ItemAttachment"
}
},
- "children": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/repo.ItemSummary"
- }
- },
"createdAt": {
"type": "string"
},
@@ -1345,6 +2101,9 @@ const docTemplate = `{
"id": {
"type": "string"
},
+ "imageId": {
+ "type": "string"
+ },
"insured": {
"type": "boolean"
},
@@ -1360,9 +2119,13 @@ const docTemplate = `{
},
"location": {
"description": "Edges",
+ "allOf": [
+ {
+ "$ref": "#/definitions/repo.LocationSummary"
+ }
+ ],
"x-nullable": true,
- "x-omitempty": true,
- "$ref": "#/definitions/repo.LocationSummary"
+ "x-omitempty": true
},
"manufacturer": {
"type": "string"
@@ -1378,9 +2141,13 @@ const docTemplate = `{
"type": "string"
},
"parent": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/repo.ItemSummary"
+ }
+ ],
"x-nullable": true,
- "x-omitempty": true,
- "$ref": "#/definitions/repo.ItemSummary"
+ "x-omitempty": true
},
"purchaseFrom": {
"type": "string"
@@ -1424,6 +2191,33 @@ const docTemplate = `{
}
}
},
+ "repo.ItemPatch": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "quantity": {
+ "type": "integer",
+ "x-nullable": true,
+ "x-omitempty": true
+ }
+ }
+ },
+ "repo.ItemPath": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "type": {
+ "$ref": "#/definitions/repo.ItemType"
+ }
+ }
+ },
"repo.ItemSummary": {
"type": "object",
"properties": {
@@ -1439,6 +2233,9 @@ const docTemplate = `{
"id": {
"type": "string"
},
+ "imageId": {
+ "type": "string"
+ },
"insured": {
"type": "boolean"
},
@@ -1450,13 +2247,21 @@ const docTemplate = `{
},
"location": {
"description": "Edges",
+ "allOf": [
+ {
+ "$ref": "#/definitions/repo.LocationSummary"
+ }
+ ],
"x-nullable": true,
- "x-omitempty": true,
- "$ref": "#/definitions/repo.LocationSummary"
+ "x-omitempty": true
},
"name": {
"type": "string"
},
+ "purchasePrice": {
+ "type": "string",
+ "example": "0"
+ },
"quantity": {
"type": "integer"
},
@@ -1465,12 +2270,26 @@ const docTemplate = `{
}
}
},
+ "repo.ItemType": {
+ "type": "string",
+ "enum": [
+ "location",
+ "item"
+ ],
+ "x-enum-varnames": [
+ "ItemTypeLocation",
+ "ItemTypeItem"
+ ]
+ },
"repo.ItemUpdate": {
"type": "object",
"properties": {
"archived": {
"type": "boolean"
},
+ "assetId": {
+ "type": "string"
+ },
"description": {
"type": "string"
},
@@ -1560,15 +2379,21 @@ const docTemplate = `{
},
"repo.LabelCreate": {
"type": "object",
+ "required": [
+ "name"
+ ],
"properties": {
"color": {
"type": "string"
},
"description": {
- "type": "string"
+ "type": "string",
+ "maxLength": 255
},
"name": {
- "type": "string"
+ "type": "string",
+ "maxLength": 255,
+ "minLength": 1
}
}
},
@@ -1584,12 +2409,6 @@ const docTemplate = `{
"id": {
"type": "string"
},
- "items": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/repo.ItemSummary"
- }
- },
"name": {
"type": "string"
},
@@ -1626,6 +2445,10 @@ const docTemplate = `{
},
"name": {
"type": "string"
+ },
+ "parentId": {
+ "type": "string",
+ "x-nullable": true
}
}
},
@@ -1647,12 +2470,6 @@ const docTemplate = `{
"id": {
"type": "string"
},
- "items": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/repo.ItemSummary"
- }
- },
"name": {
"type": "string"
},
@@ -1725,6 +2542,161 @@ const docTemplate = `{
}
}
},
+ "repo.MaintenanceEntry": {
+ "type": "object",
+ "properties": {
+ "completedDate": {
+ "type": "string"
+ },
+ "cost": {
+ "type": "string",
+ "example": "0"
+ },
+ "description": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "scheduledDate": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.MaintenanceEntryCreate": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "completedDate": {
+ "type": "string"
+ },
+ "cost": {
+ "type": "string",
+ "example": "0"
+ },
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "scheduledDate": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.MaintenanceEntryUpdate": {
+ "type": "object",
+ "properties": {
+ "completedDate": {
+ "type": "string"
+ },
+ "cost": {
+ "type": "string",
+ "example": "0"
+ },
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "scheduledDate": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.MaintenanceLog": {
+ "type": "object",
+ "properties": {
+ "costAverage": {
+ "type": "number"
+ },
+ "costTotal": {
+ "type": "number"
+ },
+ "entries": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.MaintenanceEntry"
+ }
+ },
+ "itemId": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.NotifierCreate": {
+ "type": "object",
+ "required": [
+ "name",
+ "url"
+ ],
+ "properties": {
+ "isActive": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 255,
+ "minLength": 1
+ },
+ "url": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.NotifierOut": {
+ "type": "object",
+ "properties": {
+ "createdAt": {
+ "type": "string"
+ },
+ "groupId": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "isActive": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ },
+ "userId": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.NotifierUpdate": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "isActive": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 255,
+ "minLength": 1
+ },
+ "url": {
+ "type": "string",
+ "x-nullable": true
+ }
+ }
+ },
"repo.PaginationResult-repo_ItemSummary": {
"type": "object",
"properties": {
@@ -1745,6 +2717,40 @@ const docTemplate = `{
}
}
},
+ "repo.TotalsByOrganizer": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "total": {
+ "type": "number"
+ }
+ }
+ },
+ "repo.TreeItem": {
+ "type": "object",
+ "properties": {
+ "children": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.TreeItem"
+ }
+ },
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string"
+ }
+ }
+ },
"repo.UserOut": {
"type": "object",
"properties": {
@@ -1782,37 +2788,41 @@ const docTemplate = `{
}
}
},
- "server.ErrorResponse": {
+ "repo.ValueOverTime": {
"type": "object",
"properties": {
- "error": {
+ "end": {
"type": "string"
},
- "fields": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
+ "entries": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.ValueOverTimeEntry"
}
- }
- }
- },
- "server.Result": {
- "type": "object",
- "properties": {
- "details": {},
- "error": {
- "type": "boolean"
},
- "item": {},
- "message": {
+ "start": {
"type": "string"
+ },
+ "valueAtEnd": {
+ "type": "number"
+ },
+ "valueAtStart": {
+ "type": "number"
}
}
},
- "server.Results": {
+ "repo.ValueOverTimeEntry": {
"type": "object",
"properties": {
- "items": {}
+ "date": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "value": {
+ "type": "number"
+ }
}
},
"services.UserRegistration": {
@@ -1832,9 +2842,12 @@ const docTemplate = `{
}
}
},
- "v1.ApiSummary": {
+ "v1.APISummary": {
"type": "object",
"properties": {
+ "allowRegistration": {
+ "type": "boolean"
+ },
"build": {
"$ref": "#/definitions/v1.Build"
},
@@ -1858,6 +2871,14 @@ const docTemplate = `{
}
}
},
+ "v1.ActionAmountResult": {
+ "type": "object",
+ "properties": {
+ "completed": {
+ "type": "integer"
+ }
+ }
+ },
"v1.Build": {
"type": "object",
"properties": {
@@ -1899,12 +2920,17 @@ const docTemplate = `{
},
"v1.GroupInvitationCreate": {
"type": "object",
+ "required": [
+ "uses"
+ ],
"properties": {
"expiresAt": {
"type": "string"
},
"uses": {
- "type": "integer"
+ "type": "integer",
+ "maximum": 100,
+ "minimum": 1
}
}
},
@@ -1916,9 +2942,26 @@ const docTemplate = `{
}
}
},
+ "v1.LoginForm": {
+ "type": "object",
+ "properties": {
+ "password": {
+ "type": "string"
+ },
+ "stayLoggedIn": {
+ "type": "boolean"
+ },
+ "username": {
+ "type": "string"
+ }
+ }
+ },
"v1.TokenResponse": {
"type": "object",
"properties": {
+ "attachmentToken": {
+ "type": "string"
+ },
"expiresAt": {
"type": "string"
},
@@ -1926,6 +2969,23 @@ const docTemplate = `{
"type": "string"
}
}
+ },
+ "v1.Wrapped": {
+ "type": "object",
+ "properties": {
+ "item": {}
+ }
+ },
+ "validate.ErrorResponse": {
+ "type": "object",
+ "properties": {
+ "error": {
+ "type": "string"
+ },
+ "fields": {
+ "type": "string"
+ }
+ }
}
},
"securityDefinitions": {
@@ -1944,10 +3004,12 @@ var SwaggerInfo = &swag.Spec{
Host: "",
BasePath: "/api",
Schemes: []string{},
- Title: "Go API Templates",
- Description: "This is a simple Rest API Server Template that implements some basic User and Authentication patterns to help you get started and bootstrap your next project!.",
+ Title: "Homebox API",
+ Description: "Track, Manage, and Organize your Things.",
InfoInstanceName: "swagger",
SwaggerTemplate: docTemplate,
+ LeftDelim: "{{",
+ RightDelim: "}}",
}
func init() {
diff --git a/backend/app/api/static/docs/swagger.json b/backend/app/api/static/docs/swagger.json
index 911f8f2..b10c93a 100644
--- a/backend/app/api/static/docs/swagger.json
+++ b/backend/app/api/static/docs/swagger.json
@@ -1,18 +1,167 @@
{
"swagger": "2.0",
"info": {
- "description": "This is a simple Rest API Server Template that implements some basic User and Authentication patterns to help you get started and bootstrap your next project!.",
- "title": "Go API Templates",
+ "description": "Track, Manage, and Organize your Things.",
+ "title": "Homebox API",
"contact": {
"name": "Don't"
},
- "license": {
- "name": "MIT"
- },
"version": "1.0"
},
"basePath": "/api",
"paths": {
+ "/v1/actions/ensure-asset-ids": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Ensures all items in the database have an asset ID",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Actions"
+ ],
+ "summary": "Ensure Asset IDs",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.ActionAmountResult"
+ }
+ }
+ }
+ }
+ },
+ "/v1/actions/ensure-import-refs": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Ensures all items in the database have an import ref",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Actions"
+ ],
+ "summary": "Ensures Import Refs",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.ActionAmountResult"
+ }
+ }
+ }
+ }
+ },
+ "/v1/actions/set-primary-photos": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Sets the first photo of each item as the primary photo",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Actions"
+ ],
+ "summary": "Set Primary Photos",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.ActionAmountResult"
+ }
+ }
+ }
+ }
+ },
+ "/v1/actions/zero-item-time-fields": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Resets all item date fields to the beginning of the day",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Actions"
+ ],
+ "summary": "Zero Out Time Fields",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.ActionAmountResult"
+ }
+ }
+ }
+ }
+ },
+ "/v1/assets/{id}": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Get Item by Asset ID",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Asset ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.PaginationResult-repo_ItemSummary"
+ }
+ }
+ }
+ }
+ },
+ "/v1/currency": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Base"
+ ],
+ "summary": "Currency",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/currencies.Currency"
+ }
+ }
+ }
+ }
+ },
"/v1/groups": {
"get": {
"security": [
@@ -26,7 +175,7 @@
"tags": [
"Group"
],
- "summary": "Get the current user's group",
+ "summary": "Get Group",
"responses": {
"200": {
"description": "OK",
@@ -48,7 +197,7 @@
"tags": [
"Group"
],
- "summary": "Updates some fields of the current users group",
+ "summary": "Update Group",
"parameters": [
{
"description": "User Data",
@@ -83,7 +232,7 @@
"tags": [
"Group"
],
- "summary": "Get the current user",
+ "summary": "Create Group Invitation",
"parameters": [
{
"description": "User Data",
@@ -116,9 +265,9 @@
"application/json"
],
"tags": [
- "Group"
+ "Statistics"
],
- "summary": "Get the current user's group",
+ "summary": "Get Group Statistics",
"responses": {
"200": {
"description": "OK",
@@ -129,6 +278,98 @@
}
}
},
+ "/v1/groups/statistics/labels": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Statistics"
+ ],
+ "summary": "Get Label Statistics",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.TotalsByOrganizer"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/groups/statistics/locations": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Statistics"
+ ],
+ "summary": "Get Location Statistics",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.TotalsByOrganizer"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/groups/statistics/purchase-price": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Statistics"
+ ],
+ "summary": "Get Purchase Price Statistics",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "start date",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "end date",
+ "name": "end",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.ValueOverTime"
+ }
+ }
+ }
+ }
+ },
"/v1/items": {
"get": {
"security": [
@@ -142,7 +383,7 @@
"tags": [
"Items"
],
- "summary": "Get All Items",
+ "summary": "Query All Items",
"parameters": [
{
"type": "string",
@@ -181,6 +422,16 @@
"description": "location Ids",
"name": "locations",
"in": "query"
+ },
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "collectionFormat": "multi",
+ "description": "parent Ids",
+ "name": "parentIds",
+ "in": "query"
}
],
"responses": {
@@ -204,7 +455,7 @@
"tags": [
"Items"
],
- "summary": "Create a new item",
+ "summary": "Create Item",
"parameters": [
{
"description": "Item Data",
@@ -216,11 +467,86 @@
}
}
],
+ "responses": {
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/repo.ItemSummary"
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/export": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Export Items",
+ "responses": {
+ "200": {
+ "description": "text/csv",
+ "schema": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/fields": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Get All Custom Field Names",
"responses": {
"200": {
"description": "OK",
"schema": {
- "$ref": "#/definitions/repo.ItemSummary"
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/fields/values": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Get All Custom Field Values",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
}
}
}
@@ -239,7 +565,7 @@
"tags": [
"Items"
],
- "summary": "imports items into the database",
+ "summary": "Import Items",
"parameters": [
{
"type": "file",
@@ -269,7 +595,7 @@
"tags": [
"Items"
],
- "summary": "Gets a item and fields",
+ "summary": "Get Item",
"parameters": [
{
"type": "string",
@@ -300,7 +626,7 @@
"tags": [
"Items"
],
- "summary": "updates a item",
+ "summary": "Update Item",
"parameters": [
{
"type": "string",
@@ -340,7 +666,7 @@
"tags": [
"Items"
],
- "summary": "deletes a item",
+ "summary": "Delete Item",
"parameters": [
{
"type": "string",
@@ -355,6 +681,46 @@
"description": "No Content"
}
}
+ },
+ "patch": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Update Item",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Item ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "description": "Item Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.ItemPatch"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.ItemOut"
+ }
+ }
+ }
}
},
"/v1/items/{id}/attachments": {
@@ -370,7 +736,7 @@
"tags": [
"Items Attachments"
],
- "summary": "imports items into the database",
+ "summary": "Create Item Attachment",
"parameters": [
{
"type": "string",
@@ -411,49 +777,12 @@
"422": {
"description": "Unprocessable Entity",
"schema": {
- "$ref": "#/definitions/server.ErrorResponse"
+ "$ref": "#/definitions/validate.ErrorResponse"
}
}
}
}
},
- "/v1/items/{id}/attachments/download": {
- "get": {
- "security": [
- {
- "Bearer": []
- }
- ],
- "produces": [
- "application/octet-stream"
- ],
- "tags": [
- "Items Attachments"
- ],
- "summary": "retrieves an attachment for an item",
- "parameters": [
- {
- "type": "string",
- "description": "Item ID",
- "name": "id",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "description": "Attachment token",
- "name": "token",
- "in": "query",
- "required": true
- }
- ],
- "responses": {
- "200": {
- "description": "OK"
- }
- }
- }
- },
"/v1/items/{id}/attachments/{attachment_id}": {
"get": {
"security": [
@@ -467,7 +796,7 @@
"tags": [
"Items Attachments"
],
- "summary": "retrieves an attachment for an item",
+ "summary": "Get Item Attachment",
"parameters": [
{
"type": "string",
@@ -502,7 +831,7 @@
"tags": [
"Items Attachments"
],
- "summary": "retrieves an attachment for an item",
+ "summary": "Update Item Attachment",
"parameters": [
{
"type": "string",
@@ -546,7 +875,7 @@
"tags": [
"Items Attachments"
],
- "summary": "retrieves an attachment for an item",
+ "summary": "Delete Item Attachment",
"parameters": [
{
"type": "string",
@@ -570,6 +899,153 @@
}
}
},
+ "/v1/items/{id}/maintenance": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Maintenance"
+ ],
+ "summary": "Get Maintenance Log",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceLog"
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Maintenance"
+ ],
+ "summary": "Create Maintenance Entry",
+ "parameters": [
+ {
+ "description": "Entry Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceEntryCreate"
+ }
+ }
+ ],
+ "responses": {
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceEntry"
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/{id}/maintenance/{entry_id}": {
+ "put": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Maintenance"
+ ],
+ "summary": "Update Maintenance Entry",
+ "parameters": [
+ {
+ "description": "Entry Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceEntryUpdate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceEntry"
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Maintenance"
+ ],
+ "summary": "Delete Maintenance Entry",
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/items/{id}/path": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Get the full path of an item",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Item ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.ItemPath"
+ }
+ }
+ }
+ }
+ }
+ },
"/v1/labels": {
"get": {
"security": [
@@ -588,22 +1064,10 @@
"200": {
"description": "OK",
"schema": {
- "allOf": [
- {
- "$ref": "#/definitions/server.Results"
- },
- {
- "type": "object",
- "properties": {
- "items": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/repo.LabelOut"
- }
- }
- }
- }
- ]
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.LabelOut"
+ }
}
}
}
@@ -620,7 +1084,7 @@
"tags": [
"Labels"
],
- "summary": "Create a new label",
+ "summary": "Create Label",
"parameters": [
{
"description": "Label Data",
@@ -655,7 +1119,7 @@
"tags": [
"Labels"
],
- "summary": "Gets a label and fields",
+ "summary": "Get Label",
"parameters": [
{
"type": "string",
@@ -686,7 +1150,7 @@
"tags": [
"Labels"
],
- "summary": "updates a label",
+ "summary": "Update Label",
"parameters": [
{
"type": "string",
@@ -717,7 +1181,7 @@
"tags": [
"Labels"
],
- "summary": "deletes a label",
+ "summary": "Delete Label",
"parameters": [
{
"type": "string",
@@ -748,26 +1212,22 @@
"Locations"
],
"summary": "Get All Locations",
+ "parameters": [
+ {
+ "type": "boolean",
+ "description": "Filter locations with parents",
+ "name": "filterChildren",
+ "in": "query"
+ }
+ ],
"responses": {
"200": {
"description": "OK",
"schema": {
- "allOf": [
- {
- "$ref": "#/definitions/server.Results"
- },
- {
- "type": "object",
- "properties": {
- "items": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/repo.LocationOutCount"
- }
- }
- }
- }
- ]
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.LocationOutCount"
+ }
}
}
}
@@ -784,7 +1244,7 @@
"tags": [
"Locations"
],
- "summary": "Create a new location",
+ "summary": "Create Location",
"parameters": [
{
"description": "Location Data",
@@ -806,6 +1266,41 @@
}
}
},
+ "/v1/locations/tree": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Locations"
+ ],
+ "summary": "Get Locations Tree",
+ "parameters": [
+ {
+ "type": "boolean",
+ "description": "include items in response tree",
+ "name": "withItems",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.TreeItem"
+ }
+ }
+ }
+ }
+ }
+ },
"/v1/locations/{id}": {
"get": {
"security": [
@@ -819,7 +1314,7 @@
"tags": [
"Locations"
],
- "summary": "Gets a location and fields",
+ "summary": "Get Location",
"parameters": [
{
"type": "string",
@@ -850,7 +1345,7 @@
"tags": [
"Locations"
],
- "summary": "updates a location",
+ "summary": "Update Location",
"parameters": [
{
"type": "string",
@@ -890,7 +1385,7 @@
"tags": [
"Locations"
],
- "summary": "deletes a location",
+ "summary": "Delete Location",
"parameters": [
{
"type": "string",
@@ -907,6 +1402,223 @@
}
}
},
+ "/v1/notifiers": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Get Notifiers",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.NotifierOut"
+ }
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Create Notifier",
+ "parameters": [
+ {
+ "description": "Notifier Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.NotifierCreate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.NotifierOut"
+ }
+ }
+ }
+ }
+ },
+ "/v1/notifiers/test": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Test Notifier",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Notifier ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "URL",
+ "name": "url",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/notifiers/{id}": {
+ "put": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Update Notifier",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Notifier ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "description": "Notifier Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.NotifierUpdate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.NotifierOut"
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Delete a Notifier",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Notifier ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/qrcode": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Create QR Code",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "data to be encoded into qrcode",
+ "name": "data",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "image/jpeg",
+ "schema": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "/v1/reporting/bill-of-materials": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Reporting"
+ ],
+ "summary": "Export Bill of Materials",
+ "responses": {
+ "200": {
+ "description": "text/csv",
+ "schema": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
"/v1/status": {
"get": {
"produces": [
@@ -915,12 +1627,12 @@
"tags": [
"Base"
],
- "summary": "Retrieves the basic information about the API",
+ "summary": "Application Info",
"responses": {
"200": {
"description": "OK",
"schema": {
- "$ref": "#/definitions/v1.ApiSummary"
+ "$ref": "#/definitions/v1.APISummary"
}
}
}
@@ -936,7 +1648,7 @@
"tags": [
"User"
],
- "summary": "Updates the users password",
+ "summary": "Change Password",
"parameters": [
{
"description": "Password Payload",
@@ -982,6 +1694,21 @@
"description": "string",
"name": "password",
"in": "formData"
+ },
+ {
+ "description": "Login Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.LoginForm"
+ }
+ },
+ {
+ "type": "string",
+ "description": "auth provider",
+ "name": "provider",
+ "in": "query"
}
],
"responses": {
@@ -1039,7 +1766,7 @@
"tags": [
"User"
],
- "summary": "Get the current user",
+ "summary": "Register New User",
"parameters": [
{
"description": "User Data",
@@ -1071,14 +1798,14 @@
"tags": [
"User"
],
- "summary": "Get the current user",
+ "summary": "Get User Self",
"responses": {
"200": {
"description": "OK",
"schema": {
"allOf": [
{
- "$ref": "#/definitions/server.Result"
+ "$ref": "#/definitions/v1.Wrapped"
},
{
"type": "object",
@@ -1105,7 +1832,7 @@
"tags": [
"User"
],
- "summary": "Update the current user",
+ "summary": "Update Account",
"parameters": [
{
"description": "User Data",
@@ -1123,7 +1850,7 @@
"schema": {
"allOf": [
{
- "$ref": "#/definitions/server.Result"
+ "$ref": "#/definitions/v1.Wrapped"
},
{
"type": "object",
@@ -1150,7 +1877,7 @@
"tags": [
"User"
],
- "summary": "Deletes the user account",
+ "summary": "Delete Account",
"responses": {
"204": {
"description": "No Content"
@@ -1160,6 +1887,23 @@
}
},
"definitions": {
+ "currencies.Currency": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "string"
+ },
+ "local": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "symbol": {
+ "type": "string"
+ }
+ }
+ },
"repo.DocumentOut": {
"type": "object",
"properties": {
@@ -1197,6 +1941,9 @@
"repo.GroupStatistics": {
"type": "object",
"properties": {
+ "totalItemPrice": {
+ "type": "number"
+ },
"totalItems": {
"type": "integer"
},
@@ -1208,6 +1955,9 @@
},
"totalUsers": {
"type": "integer"
+ },
+ "totalWithWarranty": {
+ "type": "integer"
}
}
},
@@ -1234,6 +1984,9 @@
"id": {
"type": "string"
},
+ "primary": {
+ "type": "boolean"
+ },
"type": {
"type": "string"
},
@@ -1245,6 +1998,9 @@
"repo.ItemAttachmentUpdate": {
"type": "object",
"properties": {
+ "primary": {
+ "type": "boolean"
+ },
"title": {
"type": "string"
},
@@ -1255,9 +2011,13 @@
},
"repo.ItemCreate": {
"type": "object",
+ "required": [
+ "name"
+ ],
"properties": {
"description": {
- "type": "string"
+ "type": "string",
+ "maxLength": 1000
},
"labelIds": {
"type": "array",
@@ -1270,7 +2030,9 @@
"type": "string"
},
"name": {
- "type": "string"
+ "type": "string",
+ "maxLength": 255,
+ "minLength": 1
},
"parentId": {
"type": "string",
@@ -1296,9 +2058,6 @@
"textValue": {
"type": "string"
},
- "timeValue": {
- "type": "string"
- },
"type": {
"type": "string"
}
@@ -1310,18 +2069,16 @@
"archived": {
"type": "boolean"
},
+ "assetId": {
+ "type": "string",
+ "example": "0"
+ },
"attachments": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.ItemAttachment"
}
},
- "children": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/repo.ItemSummary"
- }
- },
"createdAt": {
"type": "string"
},
@@ -1337,6 +2094,9 @@
"id": {
"type": "string"
},
+ "imageId": {
+ "type": "string"
+ },
"insured": {
"type": "boolean"
},
@@ -1352,9 +2112,13 @@
},
"location": {
"description": "Edges",
+ "allOf": [
+ {
+ "$ref": "#/definitions/repo.LocationSummary"
+ }
+ ],
"x-nullable": true,
- "x-omitempty": true,
- "$ref": "#/definitions/repo.LocationSummary"
+ "x-omitempty": true
},
"manufacturer": {
"type": "string"
@@ -1370,9 +2134,13 @@
"type": "string"
},
"parent": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/repo.ItemSummary"
+ }
+ ],
"x-nullable": true,
- "x-omitempty": true,
- "$ref": "#/definitions/repo.ItemSummary"
+ "x-omitempty": true
},
"purchaseFrom": {
"type": "string"
@@ -1416,6 +2184,33 @@
}
}
},
+ "repo.ItemPatch": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "quantity": {
+ "type": "integer",
+ "x-nullable": true,
+ "x-omitempty": true
+ }
+ }
+ },
+ "repo.ItemPath": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "type": {
+ "$ref": "#/definitions/repo.ItemType"
+ }
+ }
+ },
"repo.ItemSummary": {
"type": "object",
"properties": {
@@ -1431,6 +2226,9 @@
"id": {
"type": "string"
},
+ "imageId": {
+ "type": "string"
+ },
"insured": {
"type": "boolean"
},
@@ -1442,13 +2240,21 @@
},
"location": {
"description": "Edges",
+ "allOf": [
+ {
+ "$ref": "#/definitions/repo.LocationSummary"
+ }
+ ],
"x-nullable": true,
- "x-omitempty": true,
- "$ref": "#/definitions/repo.LocationSummary"
+ "x-omitempty": true
},
"name": {
"type": "string"
},
+ "purchasePrice": {
+ "type": "string",
+ "example": "0"
+ },
"quantity": {
"type": "integer"
},
@@ -1457,12 +2263,26 @@
}
}
},
+ "repo.ItemType": {
+ "type": "string",
+ "enum": [
+ "location",
+ "item"
+ ],
+ "x-enum-varnames": [
+ "ItemTypeLocation",
+ "ItemTypeItem"
+ ]
+ },
"repo.ItemUpdate": {
"type": "object",
"properties": {
"archived": {
"type": "boolean"
},
+ "assetId": {
+ "type": "string"
+ },
"description": {
"type": "string"
},
@@ -1552,15 +2372,21 @@
},
"repo.LabelCreate": {
"type": "object",
+ "required": [
+ "name"
+ ],
"properties": {
"color": {
"type": "string"
},
"description": {
- "type": "string"
+ "type": "string",
+ "maxLength": 255
},
"name": {
- "type": "string"
+ "type": "string",
+ "maxLength": 255,
+ "minLength": 1
}
}
},
@@ -1576,12 +2402,6 @@
"id": {
"type": "string"
},
- "items": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/repo.ItemSummary"
- }
- },
"name": {
"type": "string"
},
@@ -1618,6 +2438,10 @@
},
"name": {
"type": "string"
+ },
+ "parentId": {
+ "type": "string",
+ "x-nullable": true
}
}
},
@@ -1639,12 +2463,6 @@
"id": {
"type": "string"
},
- "items": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/repo.ItemSummary"
- }
- },
"name": {
"type": "string"
},
@@ -1717,6 +2535,161 @@
}
}
},
+ "repo.MaintenanceEntry": {
+ "type": "object",
+ "properties": {
+ "completedDate": {
+ "type": "string"
+ },
+ "cost": {
+ "type": "string",
+ "example": "0"
+ },
+ "description": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "scheduledDate": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.MaintenanceEntryCreate": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "completedDate": {
+ "type": "string"
+ },
+ "cost": {
+ "type": "string",
+ "example": "0"
+ },
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "scheduledDate": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.MaintenanceEntryUpdate": {
+ "type": "object",
+ "properties": {
+ "completedDate": {
+ "type": "string"
+ },
+ "cost": {
+ "type": "string",
+ "example": "0"
+ },
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "scheduledDate": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.MaintenanceLog": {
+ "type": "object",
+ "properties": {
+ "costAverage": {
+ "type": "number"
+ },
+ "costTotal": {
+ "type": "number"
+ },
+ "entries": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.MaintenanceEntry"
+ }
+ },
+ "itemId": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.NotifierCreate": {
+ "type": "object",
+ "required": [
+ "name",
+ "url"
+ ],
+ "properties": {
+ "isActive": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 255,
+ "minLength": 1
+ },
+ "url": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.NotifierOut": {
+ "type": "object",
+ "properties": {
+ "createdAt": {
+ "type": "string"
+ },
+ "groupId": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "isActive": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ },
+ "userId": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.NotifierUpdate": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "isActive": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 255,
+ "minLength": 1
+ },
+ "url": {
+ "type": "string",
+ "x-nullable": true
+ }
+ }
+ },
"repo.PaginationResult-repo_ItemSummary": {
"type": "object",
"properties": {
@@ -1737,6 +2710,40 @@
}
}
},
+ "repo.TotalsByOrganizer": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "total": {
+ "type": "number"
+ }
+ }
+ },
+ "repo.TreeItem": {
+ "type": "object",
+ "properties": {
+ "children": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.TreeItem"
+ }
+ },
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string"
+ }
+ }
+ },
"repo.UserOut": {
"type": "object",
"properties": {
@@ -1774,37 +2781,41 @@
}
}
},
- "server.ErrorResponse": {
+ "repo.ValueOverTime": {
"type": "object",
"properties": {
- "error": {
+ "end": {
"type": "string"
},
- "fields": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
+ "entries": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.ValueOverTimeEntry"
}
- }
- }
- },
- "server.Result": {
- "type": "object",
- "properties": {
- "details": {},
- "error": {
- "type": "boolean"
},
- "item": {},
- "message": {
+ "start": {
"type": "string"
+ },
+ "valueAtEnd": {
+ "type": "number"
+ },
+ "valueAtStart": {
+ "type": "number"
}
}
},
- "server.Results": {
+ "repo.ValueOverTimeEntry": {
"type": "object",
"properties": {
- "items": {}
+ "date": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "value": {
+ "type": "number"
+ }
}
},
"services.UserRegistration": {
@@ -1824,9 +2835,12 @@
}
}
},
- "v1.ApiSummary": {
+ "v1.APISummary": {
"type": "object",
"properties": {
+ "allowRegistration": {
+ "type": "boolean"
+ },
"build": {
"$ref": "#/definitions/v1.Build"
},
@@ -1850,6 +2864,14 @@
}
}
},
+ "v1.ActionAmountResult": {
+ "type": "object",
+ "properties": {
+ "completed": {
+ "type": "integer"
+ }
+ }
+ },
"v1.Build": {
"type": "object",
"properties": {
@@ -1891,12 +2913,17 @@
},
"v1.GroupInvitationCreate": {
"type": "object",
+ "required": [
+ "uses"
+ ],
"properties": {
"expiresAt": {
"type": "string"
},
"uses": {
- "type": "integer"
+ "type": "integer",
+ "maximum": 100,
+ "minimum": 1
}
}
},
@@ -1908,9 +2935,26 @@
}
}
},
+ "v1.LoginForm": {
+ "type": "object",
+ "properties": {
+ "password": {
+ "type": "string"
+ },
+ "stayLoggedIn": {
+ "type": "boolean"
+ },
+ "username": {
+ "type": "string"
+ }
+ }
+ },
"v1.TokenResponse": {
"type": "object",
"properties": {
+ "attachmentToken": {
+ "type": "string"
+ },
"expiresAt": {
"type": "string"
},
@@ -1918,6 +2962,23 @@
"type": "string"
}
}
+ },
+ "v1.Wrapped": {
+ "type": "object",
+ "properties": {
+ "item": {}
+ }
+ },
+ "validate.ErrorResponse": {
+ "type": "object",
+ "properties": {
+ "error": {
+ "type": "string"
+ },
+ "fields": {
+ "type": "string"
+ }
+ }
}
},
"securityDefinitions": {
diff --git a/backend/app/api/static/docs/swagger.yaml b/backend/app/api/static/docs/swagger.yaml
index b4f8c78..dbb31e6 100644
--- a/backend/app/api/static/docs/swagger.yaml
+++ b/backend/app/api/static/docs/swagger.yaml
@@ -1,5 +1,16 @@
basePath: /api
definitions:
+ currencies.Currency:
+ properties:
+ code:
+ type: string
+ local:
+ type: string
+ name:
+ type: string
+ symbol:
+ type: string
+ type: object
repo.DocumentOut:
properties:
id:
@@ -24,6 +35,8 @@ definitions:
type: object
repo.GroupStatistics:
properties:
+ totalItemPrice:
+ type: number
totalItems:
type: integer
totalLabels:
@@ -32,6 +45,8 @@ definitions:
type: integer
totalUsers:
type: integer
+ totalWithWarranty:
+ type: integer
type: object
repo.GroupUpdate:
properties:
@@ -48,6 +63,8 @@ definitions:
$ref: '#/definitions/repo.DocumentOut'
id:
type: string
+ primary:
+ type: boolean
type:
type: string
updatedAt:
@@ -55,6 +72,8 @@ definitions:
type: object
repo.ItemAttachmentUpdate:
properties:
+ primary:
+ type: boolean
title:
type: string
type:
@@ -63,6 +82,7 @@ definitions:
repo.ItemCreate:
properties:
description:
+ maxLength: 1000
type: string
labelIds:
items:
@@ -72,10 +92,14 @@ definitions:
description: Edges
type: string
name:
+ maxLength: 255
+ minLength: 1
type: string
parentId:
type: string
x-nullable: true
+ required:
+ - name
type: object
repo.ItemField:
properties:
@@ -89,8 +113,6 @@ definitions:
type: integer
textValue:
type: string
- timeValue:
- type: string
type:
type: string
type: object
@@ -98,14 +120,13 @@ definitions:
properties:
archived:
type: boolean
+ assetId:
+ example: "0"
+ type: string
attachments:
items:
$ref: '#/definitions/repo.ItemAttachment'
type: array
- children:
- items:
- $ref: '#/definitions/repo.ItemSummary'
- type: array
createdAt:
type: string
description:
@@ -116,6 +137,8 @@ definitions:
type: array
id:
type: string
+ imageId:
+ type: string
insured:
type: boolean
labels:
@@ -126,7 +149,8 @@ definitions:
description: Warranty
type: boolean
location:
- $ref: '#/definitions/repo.LocationSummary'
+ allOf:
+ - $ref: '#/definitions/repo.LocationSummary'
description: Edges
x-nullable: true
x-omitempty: true
@@ -140,7 +164,8 @@ definitions:
description: Extras
type: string
parent:
- $ref: '#/definitions/repo.ItemSummary'
+ allOf:
+ - $ref: '#/definitions/repo.ItemSummary'
x-nullable: true
x-omitempty: true
purchaseFrom:
@@ -172,6 +197,24 @@ definitions:
warrantyExpires:
type: string
type: object
+ repo.ItemPatch:
+ properties:
+ id:
+ type: string
+ quantity:
+ type: integer
+ x-nullable: true
+ x-omitempty: true
+ type: object
+ repo.ItemPath:
+ properties:
+ id:
+ type: string
+ name:
+ type: string
+ type:
+ $ref: '#/definitions/repo.ItemType'
+ type: object
repo.ItemSummary:
properties:
archived:
@@ -182,6 +225,8 @@ definitions:
type: string
id:
type: string
+ imageId:
+ type: string
insured:
type: boolean
labels:
@@ -189,21 +234,35 @@ definitions:
$ref: '#/definitions/repo.LabelSummary'
type: array
location:
- $ref: '#/definitions/repo.LocationSummary'
+ allOf:
+ - $ref: '#/definitions/repo.LocationSummary'
description: Edges
x-nullable: true
x-omitempty: true
name:
type: string
+ purchasePrice:
+ example: "0"
+ type: string
quantity:
type: integer
updatedAt:
type: string
type: object
+ repo.ItemType:
+ enum:
+ - location
+ - item
+ type: string
+ x-enum-varnames:
+ - ItemTypeLocation
+ - ItemTypeItem
repo.ItemUpdate:
properties:
archived:
type: boolean
+ assetId:
+ type: string
description:
type: string
fields:
@@ -270,9 +329,14 @@ definitions:
color:
type: string
description:
+ maxLength: 255
type: string
name:
+ maxLength: 255
+ minLength: 1
type: string
+ required:
+ - name
type: object
repo.LabelOut:
properties:
@@ -282,10 +346,6 @@ definitions:
type: string
id:
type: string
- items:
- items:
- $ref: '#/definitions/repo.ItemSummary'
- type: array
name:
type: string
updatedAt:
@@ -310,6 +370,9 @@ definitions:
type: string
name:
type: string
+ parentId:
+ type: string
+ x-nullable: true
type: object
repo.LocationOut:
properties:
@@ -323,10 +386,6 @@ definitions:
type: string
id:
type: string
- items:
- items:
- $ref: '#/definitions/repo.ItemSummary'
- type: array
name:
type: string
parent:
@@ -374,6 +433,110 @@ definitions:
type: string
x-nullable: true
type: object
+ repo.MaintenanceEntry:
+ properties:
+ completedDate:
+ type: string
+ cost:
+ example: "0"
+ type: string
+ description:
+ type: string
+ id:
+ type: string
+ name:
+ type: string
+ scheduledDate:
+ type: string
+ type: object
+ repo.MaintenanceEntryCreate:
+ properties:
+ completedDate:
+ type: string
+ cost:
+ example: "0"
+ type: string
+ description:
+ type: string
+ name:
+ type: string
+ scheduledDate:
+ type: string
+ required:
+ - name
+ type: object
+ repo.MaintenanceEntryUpdate:
+ properties:
+ completedDate:
+ type: string
+ cost:
+ example: "0"
+ type: string
+ description:
+ type: string
+ name:
+ type: string
+ scheduledDate:
+ type: string
+ type: object
+ repo.MaintenanceLog:
+ properties:
+ costAverage:
+ type: number
+ costTotal:
+ type: number
+ entries:
+ items:
+ $ref: '#/definitions/repo.MaintenanceEntry'
+ type: array
+ itemId:
+ type: string
+ type: object
+ repo.NotifierCreate:
+ properties:
+ isActive:
+ type: boolean
+ name:
+ maxLength: 255
+ minLength: 1
+ type: string
+ url:
+ type: string
+ required:
+ - name
+ - url
+ type: object
+ repo.NotifierOut:
+ properties:
+ createdAt:
+ type: string
+ groupId:
+ type: string
+ id:
+ type: string
+ isActive:
+ type: boolean
+ name:
+ type: string
+ updatedAt:
+ type: string
+ userId:
+ type: string
+ type: object
+ repo.NotifierUpdate:
+ properties:
+ isActive:
+ type: boolean
+ name:
+ maxLength: 255
+ minLength: 1
+ type: string
+ url:
+ type: string
+ x-nullable: true
+ required:
+ - name
+ type: object
repo.PaginationResult-repo_ItemSummary:
properties:
items:
@@ -387,6 +550,28 @@ definitions:
total:
type: integer
type: object
+ repo.TotalsByOrganizer:
+ properties:
+ id:
+ type: string
+ name:
+ type: string
+ total:
+ type: number
+ type: object
+ repo.TreeItem:
+ properties:
+ children:
+ items:
+ $ref: '#/definitions/repo.TreeItem'
+ type: array
+ id:
+ type: string
+ name:
+ type: string
+ type:
+ type: string
+ type: object
repo.UserOut:
properties:
email:
@@ -411,27 +596,29 @@ definitions:
name:
type: string
type: object
- server.ErrorResponse:
+ repo.ValueOverTime:
properties:
- error:
+ end:
type: string
- fields:
- additionalProperties:
- type: string
- type: object
- type: object
- server.Result:
- properties:
- details: {}
- error:
- type: boolean
- item: {}
- message:
+ entries:
+ items:
+ $ref: '#/definitions/repo.ValueOverTimeEntry'
+ type: array
+ start:
type: string
+ valueAtEnd:
+ type: number
+ valueAtStart:
+ type: number
type: object
- server.Results:
+ repo.ValueOverTimeEntry:
properties:
- items: {}
+ date:
+ type: string
+ name:
+ type: string
+ value:
+ type: number
type: object
services.UserRegistration:
properties:
@@ -444,8 +631,10 @@ definitions:
token:
type: string
type: object
- v1.ApiSummary:
+ v1.APISummary:
properties:
+ allowRegistration:
+ type: boolean
build:
$ref: '#/definitions/v1.Build'
demo:
@@ -461,6 +650,11 @@ definitions:
type: string
type: array
type: object
+ v1.ActionAmountResult:
+ properties:
+ completed:
+ type: integer
+ type: object
v1.Build:
properties:
buildTime:
@@ -491,31 +685,145 @@ definitions:
expiresAt:
type: string
uses:
+ maximum: 100
+ minimum: 1
type: integer
+ required:
+ - uses
type: object
v1.ItemAttachmentToken:
properties:
token:
type: string
type: object
+ v1.LoginForm:
+ properties:
+ password:
+ type: string
+ stayLoggedIn:
+ type: boolean
+ username:
+ type: string
+ type: object
v1.TokenResponse:
properties:
+ attachmentToken:
+ type: string
expiresAt:
type: string
token:
type: string
type: object
+ v1.Wrapped:
+ properties:
+ item: {}
+ type: object
+ validate.ErrorResponse:
+ properties:
+ error:
+ type: string
+ fields:
+ type: string
+ type: object
info:
contact:
name: Don't
- description: This is a simple Rest API Server Template that implements some basic
- User and Authentication patterns to help you get started and bootstrap your next
- project!.
- license:
- name: MIT
- title: Go API Templates
+ description: Track, Manage, and Organize your Things.
+ title: Homebox API
version: "1.0"
paths:
+ /v1/actions/ensure-asset-ids:
+ post:
+ description: Ensures all items in the database have an asset ID
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/v1.ActionAmountResult'
+ security:
+ - Bearer: []
+ summary: Ensure Asset IDs
+ tags:
+ - Actions
+ /v1/actions/ensure-import-refs:
+ post:
+ description: Ensures all items in the database have an import ref
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/v1.ActionAmountResult'
+ security:
+ - Bearer: []
+ summary: Ensures Import Refs
+ tags:
+ - Actions
+ /v1/actions/set-primary-photos:
+ post:
+ description: Sets the first photo of each item as the primary photo
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/v1.ActionAmountResult'
+ security:
+ - Bearer: []
+ summary: Set Primary Photos
+ tags:
+ - Actions
+ /v1/actions/zero-item-time-fields:
+ post:
+ description: Resets all item date fields to the beginning of the day
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/v1.ActionAmountResult'
+ security:
+ - Bearer: []
+ summary: Zero Out Time Fields
+ tags:
+ - Actions
+ /v1/assets/{id}:
+ get:
+ parameters:
+ - description: Asset ID
+ in: path
+ name: id
+ required: true
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/repo.PaginationResult-repo_ItemSummary'
+ security:
+ - Bearer: []
+ summary: Get Item by Asset ID
+ tags:
+ - Items
+ /v1/currency:
+ get:
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/currencies.Currency'
+ summary: Currency
+ tags:
+ - Base
/v1/groups:
get:
produces:
@@ -527,7 +835,7 @@ paths:
$ref: '#/definitions/repo.Group'
security:
- Bearer: []
- summary: Get the current user's group
+ summary: Get Group
tags:
- Group
put:
@@ -547,7 +855,7 @@ paths:
$ref: '#/definitions/repo.Group'
security:
- Bearer: []
- summary: Updates some fields of the current users group
+ summary: Update Group
tags:
- Group
/v1/groups/invitations:
@@ -568,7 +876,7 @@ paths:
$ref: '#/definitions/v1.GroupInvitation'
security:
- Bearer: []
- summary: Get the current user
+ summary: Create Group Invitation
tags:
- Group
/v1/groups/statistics:
@@ -582,9 +890,64 @@ paths:
$ref: '#/definitions/repo.GroupStatistics'
security:
- Bearer: []
- summary: Get the current user's group
+ summary: Get Group Statistics
tags:
- - Group
+ - Statistics
+ /v1/groups/statistics/labels:
+ get:
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ items:
+ $ref: '#/definitions/repo.TotalsByOrganizer'
+ type: array
+ security:
+ - Bearer: []
+ summary: Get Label Statistics
+ tags:
+ - Statistics
+ /v1/groups/statistics/locations:
+ get:
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ items:
+ $ref: '#/definitions/repo.TotalsByOrganizer'
+ type: array
+ security:
+ - Bearer: []
+ summary: Get Location Statistics
+ tags:
+ - Statistics
+ /v1/groups/statistics/purchase-price:
+ get:
+ parameters:
+ - description: start date
+ in: query
+ name: start
+ type: string
+ - description: end date
+ in: query
+ name: end
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/repo.ValueOverTime'
+ security:
+ - Bearer: []
+ summary: Get Purchase Price Statistics
+ tags:
+ - Statistics
/v1/items:
get:
parameters:
@@ -614,6 +977,13 @@ paths:
type: string
name: locations
type: array
+ - collectionFormat: multi
+ description: parent Ids
+ in: query
+ items:
+ type: string
+ name: parentIds
+ type: array
produces:
- application/json
responses:
@@ -623,7 +993,7 @@ paths:
$ref: '#/definitions/repo.PaginationResult-repo_ItemSummary'
security:
- Bearer: []
- summary: Get All Items
+ summary: Query All Items
tags:
- Items
post:
@@ -637,13 +1007,13 @@ paths:
produces:
- application/json
responses:
- "200":
- description: OK
+ "201":
+ description: Created
schema:
$ref: '#/definitions/repo.ItemSummary'
security:
- Bearer: []
- summary: Create a new item
+ summary: Create Item
tags:
- Items
/v1/items/{id}:
@@ -661,7 +1031,7 @@ paths:
description: No Content
security:
- Bearer: []
- summary: deletes a item
+ summary: Delete Item
tags:
- Items
get:
@@ -680,7 +1050,32 @@ paths:
$ref: '#/definitions/repo.ItemOut'
security:
- Bearer: []
- summary: Gets a item and fields
+ summary: Get Item
+ tags:
+ - Items
+ patch:
+ parameters:
+ - description: Item ID
+ in: path
+ name: id
+ required: true
+ type: string
+ - description: Item Data
+ in: body
+ name: payload
+ required: true
+ schema:
+ $ref: '#/definitions/repo.ItemPatch'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/repo.ItemOut'
+ security:
+ - Bearer: []
+ summary: Update Item
tags:
- Items
put:
@@ -705,7 +1100,7 @@ paths:
$ref: '#/definitions/repo.ItemOut'
security:
- Bearer: []
- summary: updates a item
+ summary: Update Item
tags:
- Items
/v1/items/{id}/attachments:
@@ -741,10 +1136,10 @@ paths:
"422":
description: Unprocessable Entity
schema:
- $ref: '#/definitions/server.ErrorResponse'
+ $ref: '#/definitions/validate.ErrorResponse'
security:
- Bearer: []
- summary: imports items into the database
+ summary: Create Item Attachment
tags:
- Items Attachments
/v1/items/{id}/attachments/{attachment_id}:
@@ -765,7 +1160,7 @@ paths:
description: No Content
security:
- Bearer: []
- summary: retrieves an attachment for an item
+ summary: Delete Item Attachment
tags:
- Items Attachments
get:
@@ -789,7 +1184,7 @@ paths:
$ref: '#/definitions/v1.ItemAttachmentToken'
security:
- Bearer: []
- summary: retrieves an attachment for an item
+ summary: Get Item Attachment
tags:
- Items Attachments
put:
@@ -817,10 +1212,76 @@ paths:
$ref: '#/definitions/repo.ItemOut'
security:
- Bearer: []
- summary: retrieves an attachment for an item
+ summary: Update Item Attachment
tags:
- Items Attachments
- /v1/items/{id}/attachments/download:
+ /v1/items/{id}/maintenance:
+ get:
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/repo.MaintenanceLog'
+ security:
+ - Bearer: []
+ summary: Get Maintenance Log
+ tags:
+ - Maintenance
+ post:
+ parameters:
+ - description: Entry Data
+ in: body
+ name: payload
+ required: true
+ schema:
+ $ref: '#/definitions/repo.MaintenanceEntryCreate'
+ produces:
+ - application/json
+ responses:
+ "201":
+ description: Created
+ schema:
+ $ref: '#/definitions/repo.MaintenanceEntry'
+ security:
+ - Bearer: []
+ summary: Create Maintenance Entry
+ tags:
+ - Maintenance
+ /v1/items/{id}/maintenance/{entry_id}:
+ delete:
+ produces:
+ - application/json
+ responses:
+ "204":
+ description: No Content
+ security:
+ - Bearer: []
+ summary: Delete Maintenance Entry
+ tags:
+ - Maintenance
+ put:
+ parameters:
+ - description: Entry Data
+ in: body
+ name: payload
+ required: true
+ schema:
+ $ref: '#/definitions/repo.MaintenanceEntryUpdate'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/repo.MaintenanceEntry'
+ security:
+ - Bearer: []
+ summary: Update Maintenance Entry
+ tags:
+ - Maintenance
+ /v1/items/{id}/path:
get:
parameters:
- description: Item ID
@@ -828,21 +1289,64 @@ paths:
name: id
required: true
type: string
- - description: Attachment token
- in: query
- name: token
- required: true
- type: string
produces:
- - application/octet-stream
+ - application/json
responses:
"200":
description: OK
+ schema:
+ items:
+ $ref: '#/definitions/repo.ItemPath'
+ type: array
security:
- Bearer: []
- summary: retrieves an attachment for an item
+ summary: Get the full path of an item
tags:
- - Items Attachments
+ - Items
+ /v1/items/export:
+ get:
+ responses:
+ "200":
+ description: text/csv
+ schema:
+ type: string
+ security:
+ - Bearer: []
+ summary: Export Items
+ tags:
+ - Items
+ /v1/items/fields:
+ get:
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ items:
+ type: string
+ type: array
+ security:
+ - Bearer: []
+ summary: Get All Custom Field Names
+ tags:
+ - Items
+ /v1/items/fields/values:
+ get:
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ items:
+ type: string
+ type: array
+ security:
+ - Bearer: []
+ summary: Get All Custom Field Values
+ tags:
+ - Items
/v1/items/import:
post:
parameters:
@@ -858,7 +1362,7 @@ paths:
description: No Content
security:
- Bearer: []
- summary: imports items into the database
+ summary: Import Items
tags:
- Items
/v1/labels:
@@ -869,14 +1373,9 @@ paths:
"200":
description: OK
schema:
- allOf:
- - $ref: '#/definitions/server.Results'
- - properties:
- items:
- items:
- $ref: '#/definitions/repo.LabelOut'
- type: array
- type: object
+ items:
+ $ref: '#/definitions/repo.LabelOut'
+ type: array
security:
- Bearer: []
summary: Get All Labels
@@ -899,7 +1398,7 @@ paths:
$ref: '#/definitions/repo.LabelSummary'
security:
- Bearer: []
- summary: Create a new label
+ summary: Create Label
tags:
- Labels
/v1/labels/{id}:
@@ -917,7 +1416,7 @@ paths:
description: No Content
security:
- Bearer: []
- summary: deletes a label
+ summary: Delete Label
tags:
- Labels
get:
@@ -936,7 +1435,7 @@ paths:
$ref: '#/definitions/repo.LabelOut'
security:
- Bearer: []
- summary: Gets a label and fields
+ summary: Get Label
tags:
- Labels
put:
@@ -955,25 +1454,25 @@ paths:
$ref: '#/definitions/repo.LabelOut'
security:
- Bearer: []
- summary: updates a label
+ summary: Update Label
tags:
- Labels
/v1/locations:
get:
+ parameters:
+ - description: Filter locations with parents
+ in: query
+ name: filterChildren
+ type: boolean
produces:
- application/json
responses:
"200":
description: OK
schema:
- allOf:
- - $ref: '#/definitions/server.Results'
- - properties:
- items:
- items:
- $ref: '#/definitions/repo.LocationOutCount'
- type: array
- type: object
+ items:
+ $ref: '#/definitions/repo.LocationOutCount'
+ type: array
security:
- Bearer: []
summary: Get All Locations
@@ -996,7 +1495,7 @@ paths:
$ref: '#/definitions/repo.LocationSummary'
security:
- Bearer: []
- summary: Create a new location
+ summary: Create Location
tags:
- Locations
/v1/locations/{id}:
@@ -1014,7 +1513,7 @@ paths:
description: No Content
security:
- Bearer: []
- summary: deletes a location
+ summary: Delete Location
tags:
- Locations
get:
@@ -1033,7 +1532,7 @@ paths:
$ref: '#/definitions/repo.LocationOut'
security:
- Bearer: []
- summary: Gets a location and fields
+ summary: Get Location
tags:
- Locations
put:
@@ -1058,9 +1557,161 @@ paths:
$ref: '#/definitions/repo.LocationOut'
security:
- Bearer: []
- summary: updates a location
+ summary: Update Location
tags:
- Locations
+ /v1/locations/tree:
+ get:
+ parameters:
+ - description: include items in response tree
+ in: query
+ name: withItems
+ type: boolean
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ items:
+ $ref: '#/definitions/repo.TreeItem'
+ type: array
+ security:
+ - Bearer: []
+ summary: Get Locations Tree
+ tags:
+ - Locations
+ /v1/notifiers:
+ get:
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ items:
+ $ref: '#/definitions/repo.NotifierOut'
+ type: array
+ security:
+ - Bearer: []
+ summary: Get Notifiers
+ tags:
+ - Notifiers
+ post:
+ parameters:
+ - description: Notifier Data
+ in: body
+ name: payload
+ required: true
+ schema:
+ $ref: '#/definitions/repo.NotifierCreate'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/repo.NotifierOut'
+ security:
+ - Bearer: []
+ summary: Create Notifier
+ tags:
+ - Notifiers
+ /v1/notifiers/{id}:
+ delete:
+ parameters:
+ - description: Notifier ID
+ in: path
+ name: id
+ required: true
+ type: string
+ responses:
+ "204":
+ description: No Content
+ security:
+ - Bearer: []
+ summary: Delete a Notifier
+ tags:
+ - Notifiers
+ put:
+ parameters:
+ - description: Notifier ID
+ in: path
+ name: id
+ required: true
+ type: string
+ - description: Notifier Data
+ in: body
+ name: payload
+ required: true
+ schema:
+ $ref: '#/definitions/repo.NotifierUpdate'
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/repo.NotifierOut'
+ security:
+ - Bearer: []
+ summary: Update Notifier
+ tags:
+ - Notifiers
+ /v1/notifiers/test:
+ post:
+ parameters:
+ - description: Notifier ID
+ in: path
+ name: id
+ required: true
+ type: string
+ - description: URL
+ in: query
+ name: url
+ required: true
+ type: string
+ produces:
+ - application/json
+ responses:
+ "204":
+ description: No Content
+ security:
+ - Bearer: []
+ summary: Test Notifier
+ tags:
+ - Notifiers
+ /v1/qrcode:
+ get:
+ parameters:
+ - description: data to be encoded into qrcode
+ in: query
+ name: data
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: image/jpeg
+ schema:
+ type: string
+ security:
+ - Bearer: []
+ summary: Create QR Code
+ tags:
+ - Items
+ /v1/reporting/bill-of-materials:
+ get:
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: text/csv
+ schema:
+ type: string
+ security:
+ - Bearer: []
+ summary: Export Bill of Materials
+ tags:
+ - Reporting
/v1/status:
get:
produces:
@@ -1069,8 +1720,8 @@ paths:
"200":
description: OK
schema:
- $ref: '#/definitions/v1.ApiSummary'
- summary: Retrieves the basic information about the API
+ $ref: '#/definitions/v1.APISummary'
+ summary: Application Info
tags:
- Base
/v1/users/change-password:
@@ -1087,7 +1738,7 @@ paths:
description: No Content
security:
- Bearer: []
- summary: Updates the users password
+ summary: Change Password
tags:
- User
/v1/users/login:
@@ -1106,6 +1757,16 @@ paths:
in: formData
name: password
type: string
+ - description: Login Data
+ in: body
+ name: payload
+ required: true
+ schema:
+ $ref: '#/definitions/v1.LoginForm'
+ - description: auth provider
+ in: query
+ name: provider
+ type: string
produces:
- application/json
responses:
@@ -1153,7 +1814,7 @@ paths:
responses:
"204":
description: No Content
- summary: Get the current user
+ summary: Register New User
tags:
- User
/v1/users/self:
@@ -1165,7 +1826,7 @@ paths:
description: No Content
security:
- Bearer: []
- summary: Deletes the user account
+ summary: Delete Account
tags:
- User
get:
@@ -1176,14 +1837,14 @@ paths:
description: OK
schema:
allOf:
- - $ref: '#/definitions/server.Result'
+ - $ref: '#/definitions/v1.Wrapped'
- properties:
item:
$ref: '#/definitions/repo.UserOut'
type: object
security:
- Bearer: []
- summary: Get the current user
+ summary: Get User Self
tags:
- User
put:
@@ -1201,14 +1862,14 @@ paths:
description: OK
schema:
allOf:
- - $ref: '#/definitions/server.Result'
+ - $ref: '#/definitions/v1.Wrapped'
- properties:
item:
$ref: '#/definitions/repo.UserUpdate'
type: object
security:
- Bearer: []
- summary: Update the current user
+ summary: Update Account
tags:
- User
securityDefinitions:
diff --git a/backend/app/tools/migrations/main.go b/backend/app/tools/migrations/main.go
index a2f6624..e53e7ba 100644
--- a/backend/app/tools/migrations/main.go
+++ b/backend/app/tools/migrations/main.go
@@ -2,6 +2,7 @@ package main
import (
"context"
+ "fmt"
"log"
"os"
@@ -39,4 +40,6 @@ func main() {
if err != nil {
log.Fatalf("failed generating migration file: %v", err)
}
+
+ fmt.Println("Migration file generated successfully.")
}
diff --git a/backend/app/tools/typegen/main.go b/backend/app/tools/typegen/main.go
new file mode 100644
index 0000000..5f4d8da
--- /dev/null
+++ b/backend/app/tools/typegen/main.go
@@ -0,0 +1,81 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+)
+
+type ReReplace struct {
+ Regex *regexp.Regexp
+ Text string
+}
+
+func NewReReplace(regex string, replace string) ReReplace {
+ return ReReplace{
+ Regex: regexp.MustCompile(regex),
+ Text: replace,
+ }
+}
+
+func NewReDate(dateStr string) ReReplace {
+ return ReReplace{
+ Regex: regexp.MustCompile(fmt.Sprintf(`%s: string`, dateStr)),
+ Text: fmt.Sprintf(`%s: Date | string`, dateStr),
+ }
+}
+
+func main() {
+ if len(os.Args) != 2 {
+ fmt.Println("Please provide a file path as an argument")
+ os.Exit(1)
+ }
+
+ path := os.Args[1]
+
+ fmt.Printf("Processing %s\n", path)
+
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ fmt.Printf("File %s does not exist\n", path)
+ os.Exit(1)
+ }
+
+ text := "/* post-processed by ./scripts/process-types.go */\n"
+ data, err := os.ReadFile(path)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ text += string(data)
+
+ replaces := [...]ReReplace{
+ NewReReplace(` Repo`, " "),
+ NewReReplace(` PaginationResultRepo`, " PaginationResult"),
+ NewReReplace(` Services`, " "),
+ NewReReplace(` V1`, " "),
+ NewReReplace(`\?:`, ":"),
+ NewReReplace(`(\w+):\s(.*null.*)`, "$1?: $2"), // make null union types optional
+ NewReDate("createdAt"),
+ NewReDate("updatedAt"),
+ NewReDate("soldTime"),
+ NewReDate("purchaseTime"),
+ NewReDate("warrantyExpires"),
+ NewReDate("expiresAt"),
+ NewReDate("date"),
+ NewReDate("completedDate"),
+ NewReDate("scheduledDate"),
+ }
+
+ for _, replace := range replaces {
+ fmt.Printf("Replacing '%v' -> '%s'\n", replace.Regex, replace.Text)
+ text = replace.Regex.ReplaceAllString(text, replace.Text)
+ }
+
+ err = os.WriteFile(path, []byte(text), 0644)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ os.Exit(0)
+}
diff --git a/backend/go.mod b/backend/go.mod
index 3127969..d29a620 100644
--- a/backend/go.mod
+++ b/backend/go.mod
@@ -1,49 +1,77 @@
module github.com/hay-kot/homebox/backend
-go 1.19
+go 1.23.0
+
+toolchain go1.24.2
require (
- ariga.io/atlas v0.7.2-0.20220927111110-867ee0cca56a
- entgo.io/ent v0.11.3
- github.com/ardanlabs/conf/v2 v2.2.0
- github.com/go-chi/chi/v5 v5.0.7
- github.com/go-playground/validator/v10 v10.11.1
- github.com/google/uuid v1.3.0
- github.com/mattn/go-sqlite3 v1.14.16
- github.com/rs/zerolog v1.28.0
- github.com/stretchr/testify v1.8.1
- github.com/swaggo/http-swagger v1.3.3
- github.com/swaggo/swag v1.8.7
- golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90
+ ariga.io/atlas v0.32.0
+ entgo.io/ent v0.14.4
+ github.com/ardanlabs/conf/v3 v3.7.1
+ github.com/containrrr/shoutrrr v0.8.0
+ github.com/go-chi/chi/v5 v5.2.1
+ github.com/go-playground/validator/v10 v10.26.0
+ github.com/gocarina/gocsv v0.0.0-20240520201108-78e41c74b4b1
+ github.com/google/uuid v1.6.0
+ github.com/gorilla/schema v1.4.1
+ github.com/hay-kot/httpkit v0.0.11
+ github.com/mattn/go-sqlite3 v1.14.27
+ github.com/olahol/melody v1.2.1
+ github.com/pkg/errors v0.9.1
+ github.com/rs/zerolog v1.34.0
+ github.com/stretchr/testify v1.10.0
+ github.com/swaggo/http-swagger/v2 v2.0.2
+ github.com/swaggo/swag v1.16.4
+ github.com/yeqown/go-qrcode/v2 v2.2.5
+ github.com/yeqown/go-qrcode/writer/standard v1.2.5
+ golang.org/x/crypto v0.37.0
+ modernc.org/sqlite v1.37.0
)
require (
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/agext/levenshtein v1.2.3 // indirect
- github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
+ github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
+ github.com/bmatcuk/doublestar v1.3.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/go-openapi/inflect v0.19.0 // indirect
- github.com/go-openapi/jsonpointer v0.19.5 // indirect
- github.com/go-openapi/jsonreference v0.20.0 // indirect
- github.com/go-openapi/spec v0.20.7 // indirect
- github.com/go-openapi/swag v0.22.3 // indirect
- github.com/go-playground/locales v0.14.0 // indirect
- github.com/go-playground/universal-translator v0.18.0 // indirect
- github.com/google/go-cmp v0.5.9 // indirect
- github.com/hashicorp/hcl/v2 v2.14.1 // indirect
+ github.com/dustin/go-humanize v1.0.1 // indirect
+ github.com/fatih/color v1.18.0 // indirect
+ github.com/fogleman/gg v1.3.0 // indirect
+ github.com/gabriel-vasile/mimetype v1.4.8 // indirect
+ github.com/go-openapi/inflect v0.21.2 // indirect
+ github.com/go-openapi/jsonpointer v0.21.1 // indirect
+ github.com/go-openapi/jsonreference v0.21.0 // indirect
+ github.com/go-openapi/spec v0.21.0 // indirect
+ github.com/go-openapi/swag v0.23.1 // indirect
+ github.com/go-playground/locales v0.14.1 // indirect
+ github.com/go-playground/universal-translator v0.18.1 // indirect
+ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
+ github.com/gorilla/websocket v1.5.3 // indirect
+ github.com/hashicorp/hcl/v2 v2.23.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
- github.com/leodido/go-urn v1.2.1 // indirect
- github.com/mailru/easyjson v0.7.7 // indirect
- github.com/mattn/go-colorable v0.1.13 // indirect
- github.com/mattn/go-isatty v0.0.16 // indirect
+ github.com/leodido/go-urn v1.4.0 // indirect
+ github.com/mailru/easyjson v0.9.0 // indirect
+ github.com/mattn/go-colorable v0.1.14 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
+ github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a // indirect
- github.com/zclconf/go-cty v1.11.0 // indirect
- golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
- golang.org/x/net v0.0.0-20220923203811-8be639271d50 // indirect
- golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect
- golang.org/x/text v0.3.7 // indirect
- golang.org/x/tools v0.1.13-0.20220804200503-81c7dc4e4efa // indirect
+ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
+ github.com/swaggo/files/v2 v2.0.2 // indirect
+ github.com/yeqown/reedsolomon v1.0.0 // indirect
+ github.com/zclconf/go-cty v1.16.2 // indirect
+ github.com/zclconf/go-cty-yaml v1.1.0 // indirect
+ golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
+ golang.org/x/image v0.26.0 // indirect
+ golang.org/x/mod v0.24.0 // indirect
+ golang.org/x/net v0.39.0 // indirect
+ golang.org/x/sync v0.13.0 // indirect
+ golang.org/x/sys v0.32.0 // indirect
+ golang.org/x/text v0.24.0 // indirect
+ golang.org/x/tools v0.32.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
+ modernc.org/libc v1.62.1 // indirect
+ modernc.org/mathutil v1.7.1 // indirect
+ modernc.org/memory v1.9.1 // indirect
)
diff --git a/backend/go.sum b/backend/go.sum
index a16fd64..6e0ebbf 100644
--- a/backend/go.sum
+++ b/backend/go.sum
@@ -1,151 +1,190 @@
-ariga.io/atlas v0.7.2-0.20220927111110-867ee0cca56a h1:6/nt4DODfgxzHTTg3tYy7YkVzruGQGZ/kRvXpA45KUo=
-ariga.io/atlas v0.7.2-0.20220927111110-867ee0cca56a/go.mod h1:ft47uSh5hWGDCmQC9DsztZg6Xk+KagM5Ts/mZYKb9JE=
-entgo.io/ent v0.11.3 h1:F5FBGAWiDCGder7YT+lqMnyzXl6d0xU3xMBM/SO3CMc=
-entgo.io/ent v0.11.3/go.mod h1:mvDhvynOzAsOe7anH7ynPPtMjA/eeXP96kAfweevyxc=
+ariga.io/atlas v0.32.0 h1:y+77nueMrExLiKlz1CcPKh/nU7VSlWfBbwCShsJyvCw=
+ariga.io/atlas v0.32.0/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w=
+entgo.io/ent v0.14.4 h1:/DhDraSLXIkBhyiVoJeSshr4ZYi7femzhj6/TckzZuI=
+entgo.io/ent v0.14.4/go.mod h1:aDPE/OziPEu8+OWbzy4UlvWmD2/kbRuWfK2A40hcxJM=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
+github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
-github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
-github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
-github.com/ardanlabs/conf/v2 v2.2.0 h1:ar1+TYIYAh2Tdeg2DQroh7ruR56/vJR8BDfzDIrXgtk=
-github.com/ardanlabs/conf/v2 v2.2.0/go.mod h1:m37ZKdW9jwMUEhGX36jRNt8VzSQ/HVmSziLZH2p33nY=
-github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
+github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
+github.com/ardanlabs/conf/v3 v3.7.1 h1:GIV7ylesF/0NexhnJdLmzsi2NIVYY2wVhR0UfvpmAeQ=
+github.com/ardanlabs/conf/v3 v3.7.1/go.mod h1:IIucqD+601gt3jfhMXVukxoT16LnoGVd2DzRC2GhHiA=
+github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0=
+github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
+github.com/containrrr/shoutrrr v0.8.0 h1:mfG2ATzIS7NR2Ec6XL+xyoHzN97H8WPjir8aYzJUSec=
+github.com/containrrr/shoutrrr v0.8.0/go.mod h1:ioyQAyu1LJY6sILuNyKaQaw+9Ttik5QePU8atnAdO2o=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/go-chi/chi/v5 v5.0.7 h1:rDTPXLDHGATaeHvVlLcR4Qe0zftYethFucbjVQ1PxU8=
-github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
-github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4=
-github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
-github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
-github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
-github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI=
-github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
-github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
-github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
-github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=
-github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
-github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
-github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
-github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
-github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
+github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8=
+github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
+github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
+github.com/go-chi/chi/v5 v5.2.1 h1:KOIHODQj58PmL80G2Eak4WdvUzjSJSm0vG72crDCqb8=
+github.com/go-chi/chi/v5 v5.2.1/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-openapi/inflect v0.21.2 h1:0gClGlGcxifcJR56zwvhaOulnNgnhc4qTAkob5ObnSM=
+github.com/go-openapi/inflect v0.21.2/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw=
+github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
+github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
+github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
+github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
+github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
+github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
+github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/gocarina/gocsv v0.0.0-20240520201108-78e41c74b4b1 h1:FWNFq4fM1wPfcK40yHE5UO3RUdSNPaBC+j3PokzA6OQ=
+github.com/gocarina/gocsv v0.0.0-20240520201108-78e41c74b4b1/go.mod h1:5YoVOkjYAQumqlV356Hj3xeYh4BdZuLE0/nRkf2NKkI=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/hashicorp/hcl/v2 v2.14.1 h1:x0BpjfZ+CYdbiz+8yZTQ+gdLO7IXvOut7Da+XJayx34=
-github.com/hashicorp/hcl/v2 v2.14.1/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
+github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E=
+github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM=
+github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
+github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos=
+github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA=
+github.com/hay-kot/httpkit v0.0.11 h1:ZdB2uqsFBSDpfUoClGK5c5orjBjQkEVSXh7fZX5FKEk=
+github.com/hay-kot/httpkit v0.0.11/go.mod h1:0kZdk5/swzdfqfg2c6pBWimcgeJ9PTyO97EbHnYl2Sw=
+github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc=
+github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
-github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
-github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
-github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
+github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
+github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
+github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
-github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
-github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
+github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
+github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
-github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
-github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-sqlite3 v1.14.27 h1:drZCnuvf37yPfs95E5jd9s3XhdVWLal+6BOK6qrv6IU=
+github.com/mattn/go-sqlite3 v1.14.27/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
+github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
+github.com/olahol/melody v1.2.1 h1:xdwRkzHxf+B0w4TKbGpUSSkV516ZucQZJIWLztOWICQ=
+github.com/olahol/melody v1.2.1/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4=
+github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU=
+github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
+github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
+github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
-github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
-github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
-github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY=
-github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0=
-github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
-github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a h1:kAe4YSu0O0UFn1DowNo2MY5p6xzqtJ/wQ7LZynSvGaY=
-github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w=
-github.com/swaggo/http-swagger v1.3.3 h1:Hu5Z0L9ssyBLofaama21iYaF2VbWyA8jdohaaCGpHsc=
-github.com/swaggo/http-swagger v1.3.3/go.mod h1:sE+4PjD89IxMPm77FnkDz0sdO+p5lbXzrVWT6OTVVGo=
-github.com/swaggo/swag v1.8.7 h1:2K9ivTD3teEO+2fXV6zrZKDqk5IuU2aJtBDo8U7omWU=
-github.com/swaggo/swag v1.8.7/go.mod h1:ezQVUUhly8dludpVk+/PuwJWvLLanB13ygV5Pr9enSk=
-github.com/zclconf/go-cty v1.11.0 h1:726SxLdi2SDnjY+BStqB9J1hNp4+2WlzyXLuimibIe0=
-github.com/zclconf/go-cty v1.11.0/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA=
-golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM=
-golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220923203811-8be639271d50 h1:vKyz8L3zkd+xrMeIaBsQ/MNVPVFSffdaU3ZyYlBGFnI=
-golang.org/x/net v0.0.0-20220923203811-8be639271d50/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
+github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
+github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/swaggo/files/v2 v2.0.2 h1:Bq4tgS/yxLB/3nwOMcul5oLEUKa877Ykgz3CJMVbQKU=
+github.com/swaggo/files/v2 v2.0.2/go.mod h1:TVqetIzZsO9OhHX1Am9sRf9LdrFZqoK49N37KON/jr0=
+github.com/swaggo/http-swagger/v2 v2.0.2 h1:FKCdLsl+sFCx60KFsyM0rDarwiUSZ8DqbfSyIKC9OBg=
+github.com/swaggo/http-swagger/v2 v2.0.2/go.mod h1:r7/GBkAWIfK6E/OLnE8fXnviHiDeAHmgIyooa4xm3AQ=
+github.com/swaggo/swag v1.16.4 h1:clWJtd9LStiG3VeijiCfOVODP6VpHtKdQy9ELFG3s1A=
+github.com/swaggo/swag v1.16.4/go.mod h1:VBsHJRsDvfYvqoiMKnsdwhNV9LEMHgEDZcyVYX0sxPg=
+github.com/yeqown/go-qrcode/v2 v2.2.5 h1:HCOe2bSjkhZyYoyyNaXNzh4DJZll6inVJQQw+8228Zk=
+github.com/yeqown/go-qrcode/v2 v2.2.5/go.mod h1:uHpt9CM0V1HeXLz+Wg5MN50/sI/fQhfkZlOM+cOTHxw=
+github.com/yeqown/go-qrcode/writer/standard v1.2.5 h1:m+5BUIcbsaG2md76FIqI/oZULrAju8tsk47eOohovQ0=
+github.com/yeqown/go-qrcode/writer/standard v1.2.5/go.mod h1:O4MbzsotGCvy8upYPCR91j81dr5XLT7heuljcNXW+oQ=
+github.com/yeqown/reedsolomon v1.0.0 h1:x1h/Ej/uJnNu8jaX7GLHBWmZKCAWjEJTetkqaabr4B0=
+github.com/yeqown/reedsolomon v1.0.0/go.mod h1:P76zpcn2TCuL0ul1Fso373qHRc69LKwAw/Iy6g1WiiM=
+github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70=
+github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
+github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
+github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM=
+github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0=
+github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs=
+golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
+golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
+golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
+golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
+golang.org/x/image v0.26.0 h1:4XjIFEZWQmCZi6Wv8BoxsDhRU3RVnLX04dToTDAEPlY=
+golang.org/x/image v0.26.0/go.mod h1:lcxbMFAovzpnJxzXS3nyL83K27tmqtKzIJpctK8YO5c=
+golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
+golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
+golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
+golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
+golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
+golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc=
-golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.1.13-0.20220804200503-81c7dc4e4efa h1:uKcci2q7Qtp6nMTC/AAvfNUAldFtJuHWV9/5QWiypts=
-golang.org/x/tools v0.1.13-0.20220804200503-81c7dc4e4efa/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
+golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
+golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
+golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
+golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+modernc.org/cc/v4 v4.25.2 h1:T2oH7sZdGvTaie0BRNFbIYsabzCxUQg8nLqCdQ2i0ic=
+modernc.org/cc/v4 v4.25.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
+modernc.org/ccgo/v4 v4.25.1 h1:TFSzPrAGmDsdnhT9X2UrcPMI3N/mJ9/X9ykKXwLhDsU=
+modernc.org/ccgo/v4 v4.25.1/go.mod h1:njjuAYiPflywOOrm3B7kCB444ONP5pAVr8PIEoE0uDw=
+modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
+modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
+modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
+modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
+modernc.org/libc v1.62.1 h1:s0+fv5E3FymN8eJVmnk0llBe6rOxCu/DEU+XygRbS8s=
+modernc.org/libc v1.62.1/go.mod h1:iXhATfJQLjG3NWy56a6WVU73lWOcdYVxsvwCgoPljuo=
+modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
+modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
+modernc.org/memory v1.9.1 h1:V/Z1solwAVmMW1yttq3nDdZPJqV1rM05Ccq6KMSZ34g=
+modernc.org/memory v1.9.1/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
+modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
+modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
+modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
+modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
+modernc.org/sqlite v1.37.0 h1:s1TMe7T3Q3ovQiK2Ouz4Jwh7dw4ZDqbebSDTlSJdfjI=
+modernc.org/sqlite v1.37.0/go.mod h1:5YiWv+YviqGMuGw4V+PNplcyaJ5v+vQd7TQOgkACoJM=
+modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
+modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
+modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
+modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
diff --git a/backend/internal/core/currencies/currencies.go b/backend/internal/core/currencies/currencies.go
new file mode 100644
index 0000000..4cc8766
--- /dev/null
+++ b/backend/internal/core/currencies/currencies.go
@@ -0,0 +1,104 @@
+// Package currencies provides a shared definition of currencies. This uses a global
+// variable to hold the currencies.
+package currencies
+
+import (
+ "bytes"
+ _ "embed"
+ "encoding/json"
+ "io"
+ "slices"
+ "strings"
+ "sync"
+)
+
+//go:embed currencies.json
+var defaults []byte
+
+type CollectorFunc func() ([]Currency, error)
+
+func CollectJSON(reader io.Reader) CollectorFunc {
+ return func() ([]Currency, error) {
+ var currencies []Currency
+ err := json.NewDecoder(reader).Decode(¤cies)
+ if err != nil {
+ return nil, err
+ }
+
+ return currencies, nil
+ }
+}
+
+func CollectDefaults() CollectorFunc {
+ return CollectJSON(bytes.NewReader(defaults))
+}
+
+func CollectionCurrencies(collectors ...CollectorFunc) ([]Currency, error) {
+ out := make([]Currency, 0, len(collectors))
+ for i := range collectors {
+ c, err := collectors[i]()
+ if err != nil {
+ return nil, err
+ }
+
+ out = append(out, c...)
+ }
+
+ return out, nil
+}
+
+type Currency struct {
+ Name string `json:"name"`
+ Code string `json:"code"`
+ Local string `json:"local"`
+ Symbol string `json:"symbol"`
+}
+
+type CurrencyRegistry struct {
+ mu sync.RWMutex
+ registry map[string]Currency
+}
+
+func NewCurrencyService(currencies []Currency) *CurrencyRegistry {
+ registry := make(map[string]Currency, len(currencies))
+ for i := range currencies {
+ registry[currencies[i].Code] = currencies[i]
+ }
+
+ return &CurrencyRegistry{
+ registry: registry,
+ }
+}
+
+func (cs *CurrencyRegistry) Slice() []Currency {
+ cs.mu.RLock()
+ defer cs.mu.RUnlock()
+
+ out := make([]Currency, 0, len(cs.registry))
+ for key := range cs.registry {
+ out = append(out, cs.registry[key])
+ }
+
+ slices.SortFunc(out, func(a, b Currency) int {
+ if a.Name < b.Name {
+ return -1
+ }
+
+ if a.Name > b.Name {
+ return 1
+ }
+
+ return 0
+ })
+
+ return out
+}
+
+func (cs *CurrencyRegistry) IsSupported(code string) bool {
+ upper := strings.ToUpper(code)
+
+ cs.mu.RLock()
+ defer cs.mu.RUnlock()
+ _, ok := cs.registry[upper]
+ return ok
+}
diff --git a/backend/internal/core/currencies/currencies.json b/backend/internal/core/currencies/currencies.json
new file mode 100644
index 0000000..c7b2630
--- /dev/null
+++ b/backend/internal/core/currencies/currencies.json
@@ -0,0 +1,638 @@
+[
+ {
+ "code": "USD",
+ "local": "United States",
+ "symbol": "$",
+ "name": "United States Dollar"
+ },
+ {
+ "code": "AED",
+ "local": "United Arab Emirates",
+ "symbol": "د.إ",
+ "name": "United Arab Emirates Dirham"
+ },
+ {
+ "code": "AFN",
+ "local": "Afghanistan",
+ "symbol": "؋",
+ "name": "Afghan Afghani"
+ },
+ {
+ "code": "ALL",
+ "local": "Albania",
+ "symbol": "L",
+ "name": "Albanian Lek"
+ },
+ {
+ "code": "AMD",
+ "local": "Armenia",
+ "symbol": "֏",
+ "name": "Armenian Dram"
+ },
+ {
+ "code": "ANG",
+ "local": "Netherlands Antilles",
+ "symbol": "ƒ",
+ "name": "Netherlands Antillean Guilder"
+ },
+ {
+ "code": "AOA",
+ "local": "Angola",
+ "symbol": "Kz",
+ "name": "Angolan Kwanza"
+ },
+ {
+ "code": "ARS",
+ "local": "Argentina",
+ "symbol": "$",
+ "name": "Argentine Peso"
+ },
+ {
+ "code": "AUD",
+ "local": "Australia",
+ "symbol": "A$",
+ "name": "Australian Dollar"
+ },
+ {
+ "code": "AWG",
+ "local": "Aruba",
+ "symbol": "ƒ",
+ "name": "Aruban Florin"
+ },
+ {
+ "code": "AZN",
+ "local": "Azerbaijan",
+ "symbol": "₼",
+ "name": "Azerbaijani Manat"
+ },
+ {
+ "code": "BAM",
+ "local": "Bosnia and Herzegovina",
+ "symbol": "KM",
+ "name": "Bosnia and Herzegovina Convertible Mark"
+ },
+ {
+ "code": "BBD",
+ "local": "Barbados",
+ "symbol": "Bds$",
+ "name": "Barbadian Dollar"
+ },
+ {
+ "code": "BDT",
+ "local": "Bangladesh",
+ "symbol": "৳",
+ "name": "Bangladeshi Taka"
+ },
+ {
+ "code": "BGN",
+ "local": "Bulgaria",
+ "symbol": "лв",
+ "name": "Bulgarian lev"
+ },
+ {
+ "code": "BHD",
+ "local": "Bahrain",
+ "symbol": "ب.د",
+ "name": "Bahraini Dinar"
+ },
+ {
+ "code": "BIF",
+ "local": "Burundi",
+ "symbol": "FBu",
+ "name": "Burundian Franc"
+ },
+ {
+ "code": "BMD",
+ "local": "Bermuda",
+ "symbol": "BD$",
+ "name": "Bermudian Dollar"
+ },
+ {
+ "code": "BND",
+ "local": "Brunei",
+ "symbol": "B$",
+ "name": "Brunei Dollar"
+ },
+ {
+ "code": "BOB",
+ "local": "Bolivia",
+ "symbol": "Bs.",
+ "name": "Bolivian Boliviano"
+ },
+ {
+ "code": "BRL",
+ "local": "Brazil",
+ "symbol": "R$",
+ "name": "Brazilian Real"
+ },
+ {
+ "code": "BSD",
+ "local": "Bahamas",
+ "symbol": "B$",
+ "name": "Bahamian Dollar"
+ },
+ {
+ "code": "BTN",
+ "local": "Bhutan",
+ "symbol": "Nu.",
+ "name": "Bhutanese Ngultrum"
+ },
+ {
+ "code": "BWP",
+ "local": "Botswana",
+ "symbol": "P",
+ "name": "Botswana Pula"
+ },
+ {
+ "code": "BYN",
+ "local": "Belarus",
+ "symbol": "Br",
+ "name": "Belarusian Ruble"
+ },
+ {
+ "code": "BZD",
+ "local": "Belize",
+ "symbol": "BZ$",
+ "name": "Belize Dollar"
+ },
+ {
+ "code": "CAD",
+ "local": "Canada",
+ "symbol": "C$",
+ "name": "Canadian Dollar"
+ },
+ {
+ "code": "CDF",
+ "local": "Democratic Republic of the Congo",
+ "symbol": "FC",
+ "name": "Congolese Franc"
+ },
+ {
+ "code": "CHF",
+ "local": "Switzerland",
+ "symbol": "CHF",
+ "name": "Swiss Franc"
+ },
+ {
+ "code": "CLP",
+ "local": "Chile",
+ "symbol": "CL$",
+ "name": "Chilean Peso"
+ },
+ {
+ "code": "CNY",
+ "local": "China",
+ "symbol": "¥",
+ "name": "Chinese Yuan"
+ },
+ {
+ "code": "COP",
+ "local": "Colombia",
+ "symbol": "COL$",
+ "name": "Colombian Peso"
+ },
+ {
+ "code": "CRC",
+ "local": "Costa Rica",
+ "symbol": "₡",
+ "name": "Costa Rican Colón"
+ },
+ {
+ "code": "CUP",
+ "local": "Cuba",
+ "symbol": "₱",
+ "name": "Cuban Peso"
+ },
+ {
+ "code": "CVE",
+ "local": "Cape Verde",
+ "symbol": "$",
+ "name": "Cape Verdean Escudo"
+ },
+ {
+ "code": "CZK",
+ "local": "Czech Republic",
+ "symbol": "Kč",
+ "name": "Czech Koruna"
+ },
+ {
+ "code": "DJF",
+ "local": "Djibouti",
+ "symbol": "Fdj",
+ "name": "Djiboutian Franc"
+ },
+ {
+ "code": "DKK",
+ "local": "Denmark",
+ "symbol": "kr",
+ "name": "Danish Krone"
+ },
+ {
+ "code": "DOP",
+ "local": "Dominican Republic",
+ "symbol": "RD$",
+ "name": "Dominican Peso"
+ },
+ {
+ "code": "DZD",
+ "local": "Algeria",
+ "symbol": "د.ج",
+ "name": "Algerian Dinar"
+ },
+ {
+ "code": "EGP",
+ "local": "Egypt",
+ "symbol": "£",
+ "name": "Egyptian Pound"
+ },
+ {
+ "code": "ERN",
+ "local": "Eritrea",
+ "symbol": "Nfk",
+ "name": "Eritrean Nakfa"
+ },
+ {
+ "code": "ETB",
+ "local": "Ethiopia",
+ "symbol": "Br",
+ "name": "Ethiopian Birr"
+ },
+ {
+ "code": "EUR",
+ "local": "Eurozone",
+ "symbol": "€",
+ "name": "Euro"
+ },
+ {
+ "code": "FJD",
+ "local": "Fiji",
+ "symbol": "FJ$",
+ "name": "Fijian Dollar"
+ },
+ {
+ "code": "FKP",
+ "local": "Falkland Islands",
+ "symbol": "£",
+ "name": "Falkland Islands Pound"
+ },
+ {
+ "code": "FOK",
+ "local": "Faroe Islands",
+ "symbol": "kr",
+ "name": "Faroese Króna"
+ },
+ {
+ "code": "GBP",
+ "local": "United Kingdom",
+ "symbol": "£",
+ "name": "British Pound Sterling"
+ },
+ {
+ "code": "GEL",
+ "local": "Georgia",
+ "symbol": "₾",
+ "name": "Georgian Lari"
+ },
+ {
+ "code": "GGP",
+ "local": "Guernsey",
+ "symbol": "£",
+ "name": "Guernsey Pound"
+ },
+ {
+ "code": "GHS",
+ "local": "Ghana",
+ "symbol": "GH₵",
+ "name": "Ghanaian Cedi"
+ },
+ {
+ "code": "GIP",
+ "local": "Gibraltar",
+ "symbol": "£",
+ "name": "Gibraltar Pound"
+ },
+ {
+ "code": "GMD",
+ "local": "Gambia",
+ "symbol": "D",
+ "name": "Gambian Dalasi"
+ },
+ {
+ "code": "GNF",
+ "local": "Guinea",
+ "symbol": "FG",
+ "name": "Guinean Franc"
+ },
+ {
+ "code": "GTQ",
+ "local": "Guatemala",
+ "symbol": "Q",
+ "name": "Guatemalan Quetzal"
+ },
+ {
+ "code": "GYD",
+ "local": "Guyana",
+ "symbol": "GY$",
+ "name": "Guyanese Dollar"
+ },
+ {
+ "code": "HKD",
+ "local": "Hong Kong",
+ "symbol": "HK$",
+ "name": "Hong Kong Dollar"
+ },
+ {
+ "code": "HNL",
+ "local": "Honduras",
+ "symbol": "L",
+ "name": "Honduran Lempira"
+ },
+ {
+ "code": "HRK",
+ "local": "Croatia",
+ "symbol": "kn",
+ "name": "Croatian Kuna"
+ },
+ {
+ "code": "HTG",
+ "local": "Haiti",
+ "symbol": "G",
+ "name": "Haitian Gourde"
+ },
+ {
+ "code": "HUF",
+ "local": "Hungary",
+ "symbol": "Ft",
+ "name": "Hungarian Forint"
+ },
+ {
+ "code": "IDR",
+ "local": "Indonesia",
+ "symbol": "Rp",
+ "name": "Indonesian Rupiah"
+ },
+ {
+ "code": "ILS",
+ "local": "Israel",
+ "symbol": "₪",
+ "name": "Israeli New Shekel"
+ },
+ {
+ "code": "IMP",
+ "local": "Isle of Man",
+ "symbol": "£",
+ "name": "Manx Pound"
+ },
+ {
+ "code": "INR",
+ "local": "India",
+ "symbol": "₹",
+ "name": "Indian Rupee"
+ },
+ {
+ "code": "IQD",
+ "local": "Iraq",
+ "symbol": "ع.د",
+ "name": "Iraqi Dinar"
+ },
+ {
+ "code": "IRR",
+ "local": "Iran",
+ "symbol": "﷼",
+ "name": "Iranian Rial"
+ },
+ {
+ "code": "ISK",
+ "local": "Iceland",
+ "symbol": "kr",
+ "name": "Icelandic Króna"
+ },
+ {
+ "code": "JEP",
+ "local": "Jersey",
+ "symbol": "£",
+ "name": "Jersey Pound"
+ },
+ {
+ "code": "JMD",
+ "local": "Jamaica",
+ "symbol": "J$",
+ "name": "Jamaican Dollar"
+ },
+ {
+ "code": "JOD",
+ "local": "Jordan",
+ "symbol": "د.ا",
+ "name": "Jordanian Dinar"
+ },
+ {
+ "code": "JPY",
+ "local": "Japan",
+ "symbol": "¥",
+ "name": "Japanese Yen"
+ },
+ {
+ "code": "KES",
+ "local": "Kenya",
+ "symbol": "KSh",
+ "name": "Kenyan Shilling"
+ },
+ {
+ "code": "KGS",
+ "local": "Kyrgyzstan",
+ "symbol": "с",
+ "name": "Kyrgyzstani Som"
+ },
+ {
+ "code": "KHR",
+ "local": "Cambodia",
+ "symbol": "៛",
+ "name": "Cambodian Riel"
+ },
+ {
+ "code": "KID",
+ "local": "Kiribati",
+ "symbol": "$",
+ "name": "Kiribati Dollar"
+ },
+ {
+ "code": "KMF",
+ "local": "Comoros",
+ "symbol": "CF",
+ "name": "Comorian Franc"
+ },
+ {
+ "code": "KRW",
+ "local": "South Korea",
+ "symbol": "₩",
+ "name": "South Korean Won"
+ },
+ {
+ "code": "KWD",
+ "local": "Kuwait",
+ "symbol": "د.ك",
+ "name": "Kuwaiti Dinar"
+ },
+ {
+ "code": "KYD",
+ "local": "Cayman Islands",
+ "symbol": "CI$",
+ "name": "Cayman Islands Dollar"
+ },
+ {
+ "code": "KZT",
+ "local": "Kazakhstan",
+ "symbol": "₸",
+ "name": "Kazakhstani Tenge"
+ },
+ {
+ "code": "LAK",
+ "local": "Laos",
+ "symbol": "₭",
+ "name": "Lao Kip"
+ },
+ {
+ "code": "LBP",
+ "local": "Lebanon",
+ "symbol": "ل.ل",
+ "name": "Lebanese Pound"
+ },
+ {
+ "code": "LKR",
+ "local": "Sri Lanka",
+ "symbol": "₨",
+ "name": "Sri Lankan Rupee"
+ },
+ {
+ "code": "LRD",
+ "local": "Liberia",
+ "symbol": "L$",
+ "name": "Liberian Dollar"
+ },
+ {
+ "code": "LSL",
+ "local": "Lesotho",
+ "symbol": "M",
+ "name": "Lesotho Loti"
+ },
+ {
+ "code": "LYD",
+ "local": "Libya",
+ "symbol": "ل.د",
+ "name": "Libyan Dinar"
+ },
+ {
+ "code": "MAD",
+ "local": "Morocco",
+ "symbol": "د.م.",
+ "name": "Moroccan Dirham"
+ },
+ {
+ "code": "MDL",
+ "local": "Moldova",
+ "symbol": "lei",
+ "name": "Moldovan Leu"
+ },
+ {
+ "code": "MGA",
+ "local": "Madagascar",
+ "symbol": "Ar",
+ "name": "Malagasy Ariary"
+ },
+ {
+ "code": "MKD",
+ "local": "North Macedonia",
+ "symbol": "ден",
+ "name": "Macedonian Denar"
+ },
+ {
+ "code": "MMK",
+ "local": "Myanmar",
+ "symbol": "K",
+ "name": "Myanmar Kyat"
+ },
+ {
+ "code": "MNT",
+ "local": "Mongolia",
+ "symbol": "₮",
+ "name": "Mongolian Tugrik"
+ },
+ {
+ "code": "MOP",
+ "local": "Macau",
+ "symbol": "MOP$",
+ "name": "Macanese Pataca"
+ },
+ {
+ "code": "MRU",
+ "local": "Mauritania",
+ "symbol": "UM",
+ "name": "Mauritanian Ouguiya"
+ },
+ {
+ "code": "MUR",
+ "local": "Mauritius",
+ "symbol": "₨",
+ "name": "Mauritian Rupee"
+ },
+ {
+ "code": "MVR",
+ "local": "Maldives",
+ "symbol": "Rf",
+ "name": "Maldivian Rufiyaa"
+ },
+ {
+ "code": "MWK",
+ "local": "Malawi",
+ "symbol": "MK",
+ "name": "Malawian Kwacha"
+ },
+ {
+ "code": "MXN",
+ "local": "Mexico",
+ "symbol": "Mex$",
+ "name": "Mexican Peso"
+ },
+ {
+ "code": "MYR",
+ "local": "Malaysia",
+ "symbol": "RM",
+ "name": "Malaysian Ringgit"
+ },
+ {
+ "code": "MZN",
+ "local": "Mozambique",
+ "symbol": "MT",
+ "name": "Mozambican Metical"
+ },
+ {
+ "code": "NAD",
+ "local": "Namibia",
+ "symbol": "N$",
+ "name": "Namibian Dollar"
+ },
+ {
+ "code": "NGN",
+ "local": "Nigeria",
+ "symbol": "₦",
+ "name": "Nigerian Naira"
+ },
+ {
+ "code": "NIO",
+ "local": "Nicaragua",
+ "symbol": "C$",
+ "name": "Nicaraguan Córdoba"
+ },
+ {
+ "code": "NOK",
+ "local": "Norway",
+ "symbol": "kr",
+ "name": "Norwegian Krone"
+ },
+ {
+ "code": "UAH",
+ "local": "Ukraine",
+ "symbol": "₴",
+ "name": "Ukrainian Hryvnia"
+ }
+]
diff --git a/backend/internal/core/services/all.go b/backend/internal/core/services/all.go
index 5147b8a..3c03a4e 100644
--- a/backend/internal/core/services/all.go
+++ b/backend/internal/core/services/all.go
@@ -1,24 +1,67 @@
+// Package services provides the core business logic for the application.
package services
-import "github.com/hay-kot/homebox/backend/internal/data/repo"
+import (
+ "github.com/hay-kot/homebox/backend/internal/core/currencies"
+ "github.com/hay-kot/homebox/backend/internal/data/repo"
+)
type AllServices struct {
- User *UserService
- Group *GroupService
- Items *ItemService
+ User *UserService
+ Group *GroupService
+ Items *ItemService
+ BackgroundService *BackgroundService
+ Currencies *currencies.CurrencyRegistry
}
-func New(repos *repo.AllRepos) *AllServices {
+type OptionsFunc func(*options)
+
+type options struct {
+ autoIncrementAssetID bool
+ currencies []currencies.Currency
+}
+
+func WithAutoIncrementAssetID(v bool) func(*options) {
+ return func(o *options) {
+ o.autoIncrementAssetID = v
+ }
+}
+
+func WithCurrencies(v []currencies.Currency) func(*options) {
+ return func(o *options) {
+ o.currencies = v
+ }
+}
+
+func New(repos *repo.AllRepos, opts ...OptionsFunc) *AllServices {
if repos == nil {
panic("repos cannot be nil")
}
+ defaultCurrencies, err := currencies.CollectionCurrencies(
+ currencies.CollectDefaults(),
+ )
+ if err != nil {
+ panic("failed to collect default currencies")
+ }
+
+ options := &options{
+ autoIncrementAssetID: true,
+ currencies: defaultCurrencies,
+ }
+
+ for _, opt := range opts {
+ opt(options)
+ }
+
return &AllServices{
User: &UserService{repos},
Group: &GroupService{repos},
Items: &ItemService{
- repo: repos,
- at: attachmentTokens{},
+ repo: repos,
+ autoIncrementAssetID: options.autoIncrementAssetID,
},
+ BackgroundService: &BackgroundService{repos},
+ Currencies: currencies.NewCurrencyService(options.currencies),
}
}
diff --git a/backend/internal/core/services/main_test.go b/backend/internal/core/services/main_test.go
index e1f7282..ecb07b0 100644
--- a/backend/internal/core/services/main_test.go
+++ b/backend/internal/core/services/main_test.go
@@ -3,11 +3,11 @@ package services
import (
"context"
"log"
- "math/rand"
"os"
"testing"
- "time"
+ "github.com/hay-kot/homebox/backend/internal/core/currencies"
+ "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/pkgs/faker"
@@ -15,7 +15,8 @@ import (
)
var (
- fk = faker.NewFaker()
+ fk = faker.NewFaker()
+ tbus = eventbus.New()
tCtx = Context{}
tClient *ent.Client
@@ -49,8 +50,6 @@ func bootstrap() {
}
func TestMain(m *testing.M) {
- rand.Seed(int64(time.Now().Unix()))
-
client, err := ent.Open("sqlite3", "file:ent?mode=memory&cache=shared&_fk=1")
if err != nil {
log.Fatalf("failed opening connection to sqlite: %v", err)
@@ -62,9 +61,14 @@ func TestMain(m *testing.M) {
}
tClient = client
- tRepos = repo.New(tClient, os.TempDir()+"/homebox")
- tSvc = New(tRepos)
- defer client.Close()
+ tRepos = repo.New(tClient, tbus, os.TempDir()+"/homebox")
+
+ defaults, _ := currencies.CollectionCurrencies(
+ currencies.CollectDefaults(),
+ )
+
+ tSvc = New(tRepos, WithCurrencies(defaults))
+ defer func() { _ = client.Close() }()
bootstrap()
tCtx = Context{
diff --git a/backend/internal/core/services/reporting/.testdata/import/fields.csv b/backend/internal/core/services/reporting/.testdata/import/fields.csv
new file mode 100644
index 0000000..28c3c17
--- /dev/null
+++ b/backend/internal/core/services/reporting/.testdata/import/fields.csv
@@ -0,0 +1,5 @@
+HB.location,HB.name,HB.quantity,HB.description,HB.field.Custom Field 1,HB.field.Custom Field 2,HB.field.Custom Field 3
+loc,Item 1,1,Description 1,Value 1[1],Value 1[2],Value 1[3]
+loc,Item 2,2,Description 2,Value 2[1],Value 2[2],Value 2[3]
+loc,Item 3,3,Description 3,Value 3[1],Value 3[2],Value 3[3]
+
diff --git a/backend/internal/core/services/reporting/.testdata/import/minimal.csv b/backend/internal/core/services/reporting/.testdata/import/minimal.csv
new file mode 100644
index 0000000..be39ad2
--- /dev/null
+++ b/backend/internal/core/services/reporting/.testdata/import/minimal.csv
@@ -0,0 +1,4 @@
+HB.location,HB.name,HB.quantity,HB.description
+loc,Item 1,1,Description 1
+loc,Item 2,2,Description 2
+loc,Item 3,3,Description 3
\ No newline at end of file
diff --git a/backend/internal/core/services/reporting/.testdata/import/types.csv b/backend/internal/core/services/reporting/.testdata/import/types.csv
new file mode 100644
index 0000000..96ff236
--- /dev/null
+++ b/backend/internal/core/services/reporting/.testdata/import/types.csv
@@ -0,0 +1,4 @@
+HB.name,HB.asset_id,HB.location,HB.labels
+Item 1,1,Path / To / Location 1,L1 ; L2 ; L3
+Item 2,000-002,Path /To/ Location 2,L1;L2;L3
+Item 3,1000-003,Path / To /Location 3 , L1;L2; L3
\ No newline at end of file
diff --git a/backend/internal/core/services/reporting/bill_of_materials.go b/backend/internal/core/services/reporting/bill_of_materials.go
new file mode 100644
index 0000000..4147d4b
--- /dev/null
+++ b/backend/internal/core/services/reporting/bill_of_materials.go
@@ -0,0 +1,42 @@
+package reporting
+
+import (
+ "github.com/gocarina/gocsv"
+ "github.com/hay-kot/homebox/backend/internal/data/repo"
+ "github.com/hay-kot/homebox/backend/internal/data/types"
+)
+
+// =================================================================================================
+
+type BillOfMaterialsEntry struct {
+ PurchaseDate types.Date `csv:"Purchase Date"`
+ Name string `csv:"Name"`
+ Description string `csv:"Description"`
+ Manufacturer string `csv:"Manufacturer"`
+ SerialNumber string `csv:"Serial Number"`
+ ModelNumber string `csv:"Model Number"`
+ Quantity int `csv:"Quantity"`
+ Price float64 `csv:"Price"`
+ TotalPrice float64 `csv:"Total Price"`
+}
+
+// BillOfMaterialsTSV returns a byte slice of the Bill of Materials for a given GID in TSV format
+// See BillOfMaterialsEntry for the format of the output
+func BillOfMaterialsTSV(entities []repo.ItemOut) ([]byte, error) {
+ bomEntries := make([]BillOfMaterialsEntry, len(entities))
+ for i, entity := range entities {
+ bomEntries[i] = BillOfMaterialsEntry{
+ PurchaseDate: entity.PurchaseTime,
+ Name: entity.Name,
+ Description: entity.Description,
+ Manufacturer: entity.Manufacturer,
+ SerialNumber: entity.SerialNumber,
+ ModelNumber: entity.ModelNumber,
+ Quantity: entity.Quantity,
+ Price: entity.PurchasePrice,
+ TotalPrice: entity.PurchasePrice * float64(entity.Quantity),
+ }
+ }
+
+ return gocsv.MarshalBytes(&bomEntries)
+}
diff --git a/backend/internal/core/services/reporting/eventbus/eventbus.go b/backend/internal/core/services/reporting/eventbus/eventbus.go
new file mode 100644
index 0000000..581bc38
--- /dev/null
+++ b/backend/internal/core/services/reporting/eventbus/eventbus.go
@@ -0,0 +1,91 @@
+// Package eventbus provides an interface for event bus.
+package eventbus
+
+import (
+ "context"
+ "sync"
+
+ "github.com/google/uuid"
+)
+
+type Event string
+
+const (
+ EventLabelMutation Event = "label.mutation"
+ EventLocationMutation Event = "location.mutation"
+ EventItemMutation Event = "item.mutation"
+)
+
+type GroupMutationEvent struct {
+ GID uuid.UUID
+}
+
+type eventData struct {
+ event Event
+ data any
+}
+
+type EventBus struct {
+ started bool
+ ch chan eventData
+
+ mu sync.RWMutex
+ subscribers map[Event][]func(any)
+}
+
+func New() *EventBus {
+ return &EventBus{
+ ch: make(chan eventData, 100),
+ subscribers: map[Event][]func(any){
+ EventLabelMutation: {},
+ EventLocationMutation: {},
+ EventItemMutation: {},
+ },
+ }
+}
+
+func (e *EventBus) Run(ctx context.Context) error {
+ if e.started {
+ panic("event bus already started")
+ }
+
+ e.started = true
+
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+ case event := <-e.ch:
+ e.mu.RLock()
+ arr, ok := e.subscribers[event.event]
+ e.mu.RUnlock()
+
+ if !ok {
+ continue
+ }
+
+ for _, fn := range arr {
+ fn(event.data)
+ }
+ }
+ }
+}
+
+func (e *EventBus) Publish(event Event, data any) {
+ e.ch <- eventData{
+ event: event,
+ data: data,
+ }
+}
+
+func (e *EventBus) Subscribe(event Event, fn func(any)) {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ arr, ok := e.subscribers[event]
+ if !ok {
+ panic("event not found")
+ }
+
+ e.subscribers[event] = append(arr, fn)
+}
diff --git a/backend/internal/core/services/reporting/import.go b/backend/internal/core/services/reporting/import.go
new file mode 100644
index 0000000..6f01b1b
--- /dev/null
+++ b/backend/internal/core/services/reporting/import.go
@@ -0,0 +1,94 @@
+// Package reporting provides a way to import CSV files into the database.
+package reporting
+
+import (
+ "bytes"
+ "encoding/csv"
+ "errors"
+ "io"
+ "strings"
+)
+
+var (
+ ErrNoHomeboxHeaders = errors.New("no headers found")
+ ErrMissingRequiredHeaders = errors.New("missing required headers `HB.location` or `HB.name`")
+)
+
+// determineSeparator determines the separator used in the CSV file
+// It returns the separator as a rune and an error if it could not be determined
+//
+// It is assumed that the first row is the header row and that the separator is the same
+// for all rows.
+//
+// Supported separators are `,` and `\t`
+func determineSeparator(data []byte) (rune, error) {
+ // First row
+ firstRow := bytes.Split(data, []byte("\n"))[0]
+
+ // find first comma or /t
+ comma := bytes.IndexByte(firstRow, ',')
+ tab := bytes.IndexByte(firstRow, '\t')
+
+ switch {
+ case comma == -1 && tab == -1:
+ return 0, errors.New("could not determine separator")
+ case tab > comma:
+ return '\t', nil
+ default:
+ return ',', nil
+ }
+}
+
+// readRawCsv reads a CSV file and returns the raw data as a 2D string array
+// It determines the separator used in the CSV file and returns an error if
+// it could not be determined
+func readRawCsv(r io.Reader) ([][]string, error) {
+ data, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ reader := csv.NewReader(bytes.NewReader(data))
+
+ // Determine separator
+ sep, err := determineSeparator(data)
+ if err != nil {
+ return nil, err
+ }
+
+ reader.Comma = sep
+
+ return reader.ReadAll()
+}
+
+// parseHeaders parses the homebox headers from the CSV file and returns a map of the headers
+// and their column index as well as a list of the field headers (HB.field.*) in the order
+// they appear in the CSV file
+//
+// It returns an error if no homebox headers are found
+func parseHeaders(headers []string) (hbHeaders map[string]int, fieldHeaders []string, err error) {
+ hbHeaders = map[string]int{} // initialize map
+
+ for col, h := range headers {
+ if strings.HasPrefix(h, "HB.field.") {
+ fieldHeaders = append(fieldHeaders, h)
+ }
+
+ if strings.HasPrefix(h, "HB.") {
+ hbHeaders[h] = col
+ }
+ }
+
+ required := []string{"HB.location", "HB.name"}
+ for _, h := range required {
+ if _, ok := hbHeaders[h]; !ok {
+ return nil, nil, ErrMissingRequiredHeaders
+ }
+ }
+
+ if len(hbHeaders) == 0 {
+ return nil, nil, ErrNoHomeboxHeaders
+ }
+
+ return hbHeaders, fieldHeaders, nil
+}
diff --git a/backend/internal/core/services/reporting/io_row.go b/backend/internal/core/services/reporting/io_row.go
new file mode 100644
index 0000000..c80e00d
--- /dev/null
+++ b/backend/internal/core/services/reporting/io_row.go
@@ -0,0 +1,95 @@
+package reporting
+
+import (
+ "strings"
+
+ "github.com/hay-kot/homebox/backend/internal/data/repo"
+ "github.com/hay-kot/homebox/backend/internal/data/types"
+)
+
+type ExportItemFields struct {
+ Name string
+ Value string
+}
+
+type ExportTSVRow struct {
+ ImportRef string `csv:"HB.import_ref"`
+ Location LocationString `csv:"HB.location"`
+ LabelStr LabelString `csv:"HB.labels"`
+ AssetID repo.AssetID `csv:"HB.asset_id"`
+ Archived bool `csv:"HB.archived"`
+
+ Name string `csv:"HB.name"`
+ Quantity int `csv:"HB.quantity"`
+ Description string `csv:"HB.description"`
+ Insured bool `csv:"HB.insured"`
+ Notes string `csv:"HB.notes"`
+
+ PurchasePrice float64 `csv:"HB.purchase_price"`
+ PurchaseFrom string `csv:"HB.purchase_from"`
+ PurchaseTime types.Date `csv:"HB.purchase_time"`
+
+ Manufacturer string `csv:"HB.manufacturer"`
+ ModelNumber string `csv:"HB.model_number"`
+ SerialNumber string `csv:"HB.serial_number"`
+
+ LifetimeWarranty bool `csv:"HB.lifetime_warranty"`
+ WarrantyExpires types.Date `csv:"HB.warranty_expires"`
+ WarrantyDetails string `csv:"HB.warranty_details"`
+
+ SoldTo string `csv:"HB.sold_to"`
+ SoldPrice float64 `csv:"HB.sold_price"`
+ SoldTime types.Date `csv:"HB.sold_time"`
+ SoldNotes string `csv:"HB.sold_notes"`
+
+ Fields []ExportItemFields `csv:"-"`
+}
+
+// ============================================================================
+
+// LabelString is a string slice that is used to represent a list of labels.
+//
+// For example, a list of labels "Important; Work" would be represented as a
+// LabelString with the following values:
+//
+// LabelString{"Important", "Work"}
+type LabelString []string
+
+func parseLabelString(s string) LabelString {
+ v, _ := parseSeparatedString(s, ";")
+ return v
+}
+
+func (ls LabelString) String() string {
+ return strings.Join(ls, "; ")
+}
+
+// ============================================================================
+
+// LocationString is a string slice that is used to represent a location
+// hierarchy.
+//
+// For example, a location hierarchy of "Home / Bedroom / Desk" would be
+// represented as a LocationString with the following values:
+//
+// LocationString{"Home", "Bedroom", "Desk"}
+type LocationString []string
+
+func parseLocationString(s string) LocationString {
+ v, _ := parseSeparatedString(s, "/")
+ return v
+}
+
+func (csf LocationString) String() string {
+ return strings.Join(csf, " / ")
+}
+
+func fromPathSlice(s []repo.ItemPath) LocationString {
+ v := make(LocationString, len(s))
+
+ for i := range s {
+ v[i] = s[i].Name
+ }
+
+ return v
+}
diff --git a/backend/internal/core/services/reporting/io_sheet.go b/backend/internal/core/services/reporting/io_sheet.go
new file mode 100644
index 0000000..5877f3e
--- /dev/null
+++ b/backend/internal/core/services/reporting/io_sheet.go
@@ -0,0 +1,322 @@
+package reporting
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/repo"
+ "github.com/hay-kot/homebox/backend/internal/data/types"
+ "github.com/rs/zerolog/log"
+)
+
+// IOSheet is the representation of a CSV/TSV sheet that is used for importing/exporting
+// items from homebox. It is used to read/write the data from/to a CSV/TSV file given
+// the standard format of the file.
+//
+// See ExportTSVRow for the format of the data in the sheet.
+type IOSheet struct {
+ headers []string
+ custom []int
+ index map[string]int
+ Rows []ExportTSVRow
+}
+
+func (s *IOSheet) indexHeaders() {
+ s.index = make(map[string]int)
+
+ for i, h := range s.headers {
+ if strings.HasPrefix(h, "HB.field") {
+ s.custom = append(s.custom, i)
+ }
+
+ if strings.HasPrefix(h, "HB.") {
+ s.index[h] = i
+ }
+ }
+}
+
+func (s *IOSheet) GetColumn(str string) (col int, ok bool) {
+ if s.index == nil {
+ s.indexHeaders()
+ }
+
+ col, ok = s.index[str]
+ return
+}
+
+// Read reads a CSV/TSV and populates the "Rows" field with the data from the sheet
+// Custom Fields are supported via the `HB.field.*` headers. The `HB.field.*` the "Name"
+// of the field is the part after the `HB.field.` prefix. Additionally, Custom Fields with
+// no value are excluded from the row.Fields slice, this includes empty strings.
+//
+// Note That
+// - the first row is assumed to be the header
+// - at least 1 row of data is required
+// - rows and columns must be rectangular (i.e. all rows must have the same number of columns)
+func (s *IOSheet) Read(data io.Reader) error {
+ sheet, err := readRawCsv(data)
+ if err != nil {
+ return err
+ }
+
+ if len(sheet) < 2 {
+ return fmt.Errorf("sheet must have at least 1 row of data (header + 1)")
+ }
+
+ s.headers = sheet[0]
+ s.Rows = make([]ExportTSVRow, len(sheet)-1)
+
+ for i, row := range sheet[1:] {
+ if len(row) != len(s.headers) {
+ return fmt.Errorf("row has %d columns, expected %d", len(row), len(s.headers))
+ }
+
+ rowData := ExportTSVRow{}
+
+ st := reflect.TypeOf(ExportTSVRow{})
+
+ for i := 0; i < st.NumField(); i++ {
+ field := st.Field(i)
+ tag := field.Tag.Get("csv")
+ if tag == "" || tag == "-" {
+ continue
+ }
+
+ col, ok := s.GetColumn(tag)
+ if !ok {
+ continue
+ }
+
+ val := row[col]
+
+ var v interface{}
+
+ switch field.Type {
+ case reflect.TypeOf(""):
+ v = val
+ case reflect.TypeOf(int(0)):
+ v = parseInt(val)
+ case reflect.TypeOf(bool(false)):
+ v = parseBool(val)
+ case reflect.TypeOf(float64(0)):
+ v = parseFloat(val)
+
+ // Custom Types
+ case reflect.TypeOf(types.Date{}):
+ v = types.DateFromString(val)
+ case reflect.TypeOf(repo.AssetID(0)):
+ v, _ = repo.ParseAssetID(val)
+ case reflect.TypeOf(LocationString{}):
+ v = parseLocationString(val)
+ case reflect.TypeOf(LabelString{}):
+ v = parseLabelString(val)
+ }
+
+ log.Debug().
+ Str("tag", tag).
+ Interface("val", v).
+ Str("type", fmt.Sprintf("%T", v)).
+ Msg("parsed value")
+
+ // Nil values are not allowed at the moment. This may change.
+ if v == nil {
+ return fmt.Errorf("could not convert %q to %s", val, field.Type)
+ }
+
+ ptrField := reflect.ValueOf(&rowData).Elem().Field(i)
+ ptrField.Set(reflect.ValueOf(v))
+ }
+
+ for _, col := range s.custom {
+ colName := strings.TrimPrefix(s.headers[col], "HB.field.")
+ customVal := row[col]
+ if customVal == "" {
+ continue
+ }
+
+ rowData.Fields = append(rowData.Fields, ExportItemFields{
+ Name: colName,
+ Value: customVal,
+ })
+ }
+
+ s.Rows[i] = rowData
+ }
+
+ return nil
+}
+
+// ReadItems writes the sheet to a writer.
+func (s *IOSheet) ReadItems(ctx context.Context, items []repo.ItemOut, GID uuid.UUID, repos *repo.AllRepos) error {
+ s.Rows = make([]ExportTSVRow, len(items))
+
+ extraHeaders := map[string]struct{}{}
+
+ for i := range items {
+ item := items[i]
+
+ // TODO: Support fetching nested locations
+ locID := item.Location.ID
+
+ locPaths, err := repos.Locations.PathForLoc(context.Background(), GID, locID)
+ if err != nil {
+ log.Error().Err(err).Msg("could not get location path")
+ return err
+ }
+
+ locString := fromPathSlice(locPaths)
+
+ labelString := make([]string, len(item.Labels))
+
+ for i, l := range item.Labels {
+ labelString[i] = l.Name
+ }
+
+ customFields := make([]ExportItemFields, len(item.Fields))
+
+ for i, f := range item.Fields {
+ extraHeaders[f.Name] = struct{}{}
+
+ customFields[i] = ExportItemFields{
+ Name: f.Name,
+ Value: f.TextValue,
+ }
+ }
+
+ s.Rows[i] = ExportTSVRow{
+ // fill struct
+ Location: locString,
+ LabelStr: labelString,
+
+ ImportRef: item.ImportRef,
+ AssetID: item.AssetID,
+ Name: item.Name,
+ Quantity: item.Quantity,
+ Description: item.Description,
+ Insured: item.Insured,
+ Archived: item.Archived,
+
+ PurchasePrice: item.PurchasePrice,
+ PurchaseFrom: item.PurchaseFrom,
+ PurchaseTime: item.PurchaseTime,
+
+ Manufacturer: item.Manufacturer,
+ ModelNumber: item.ModelNumber,
+ SerialNumber: item.SerialNumber,
+
+ LifetimeWarranty: item.LifetimeWarranty,
+ WarrantyExpires: item.WarrantyExpires,
+ WarrantyDetails: item.WarrantyDetails,
+
+ SoldTo: item.SoldTo,
+ SoldTime: item.SoldTime,
+ SoldPrice: item.SoldPrice,
+ SoldNotes: item.SoldNotes,
+
+ Fields: customFields,
+ }
+ }
+
+ // Extract and sort additional headers for deterministic output
+ customHeaders := make([]string, 0, len(extraHeaders))
+
+ for k := range extraHeaders {
+ customHeaders = append(customHeaders, k)
+ }
+
+ sort.Strings(customHeaders)
+
+ st := reflect.TypeOf(ExportTSVRow{})
+
+ // Write headers
+ for i := 0; i < st.NumField(); i++ {
+ field := st.Field(i)
+ tag := field.Tag.Get("csv")
+ if tag == "" || tag == "-" {
+ continue
+ }
+
+ s.headers = append(s.headers, tag)
+ }
+
+ for _, h := range customHeaders {
+ s.headers = append(s.headers, "HB.field."+h)
+ }
+
+ return nil
+}
+
+// TSV writes the current sheet to a writer in TSV format.
+func (s *IOSheet) TSV() ([][]string, error) {
+ memcsv := make([][]string, len(s.Rows)+1)
+
+ memcsv[0] = s.headers
+
+ // use struct tags in rows to dertmine column order
+ for i, row := range s.Rows {
+ rowIdx := i + 1
+
+ memcsv[rowIdx] = make([]string, len(s.headers))
+
+ st := reflect.TypeOf(row)
+
+ for i := 0; i < st.NumField(); i++ {
+ field := st.Field(i)
+ tag := field.Tag.Get("csv")
+ if tag == "" || tag == "-" {
+ continue
+ }
+
+ col, ok := s.GetColumn(tag)
+ if !ok {
+ continue
+ }
+
+ val := reflect.ValueOf(row).Field(i)
+
+ var v string
+
+ switch field.Type {
+ case reflect.TypeOf(""):
+ v = val.String()
+ case reflect.TypeOf(int(0)):
+ v = strconv.Itoa(int(val.Int()))
+ case reflect.TypeOf(bool(false)):
+ v = strconv.FormatBool(val.Bool())
+ case reflect.TypeOf(float64(0)):
+ v = strconv.FormatFloat(val.Float(), 'f', -1, 64)
+
+ // Custom Types
+ case reflect.TypeOf(types.Date{}):
+ v = val.Interface().(types.Date).String()
+ case reflect.TypeOf(repo.AssetID(0)):
+ v = val.Interface().(repo.AssetID).String()
+ case reflect.TypeOf(LocationString{}):
+ v = val.Interface().(LocationString).String()
+ case reflect.TypeOf(LabelString{}):
+ v = val.Interface().(LabelString).String()
+ default:
+ log.Debug().Str("type", field.Type.String()).Msg("unknown type")
+ }
+
+ memcsv[rowIdx][col] = v
+ }
+
+ for _, f := range row.Fields {
+ col, ok := s.GetColumn("HB.field." + f.Name)
+ if !ok {
+ continue
+ }
+
+ memcsv[i+1][col] = f.Value
+ }
+ }
+
+ return memcsv, nil
+}
diff --git a/backend/internal/core/services/reporting/io_sheet_test.go b/backend/internal/core/services/reporting/io_sheet_test.go
new file mode 100644
index 0000000..f056e31
--- /dev/null
+++ b/backend/internal/core/services/reporting/io_sheet_test.go
@@ -0,0 +1,221 @@
+package reporting
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+
+ _ "embed"
+
+ "github.com/hay-kot/homebox/backend/internal/data/repo"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ //go:embed .testdata/import/minimal.csv
+ minimalImportCSV []byte
+
+ //go:embed .testdata/import/fields.csv
+ customFieldImportCSV []byte
+
+ //go:embed .testdata/import/types.csv
+ customTypesImportCSV []byte
+)
+
+func TestSheet_Read(t *testing.T) {
+ tests := []struct {
+ name string
+ data []byte
+ want []ExportTSVRow
+ wantErr bool
+ }{
+ {
+ name: "minimal import",
+ data: minimalImportCSV,
+ want: []ExportTSVRow{
+ {Location: LocationString{"loc"}, Name: "Item 1", Quantity: 1, Description: "Description 1"},
+ {Location: LocationString{"loc"}, Name: "Item 2", Quantity: 2, Description: "Description 2"},
+ {Location: LocationString{"loc"}, Name: "Item 3", Quantity: 3, Description: "Description 3"},
+ },
+ },
+ {
+ name: "custom field import",
+ data: customFieldImportCSV,
+ want: []ExportTSVRow{
+ {
+ Location: LocationString{"loc"}, Name: "Item 1", Quantity: 1, Description: "Description 1",
+ Fields: []ExportItemFields{
+ {Name: "Custom Field 1", Value: "Value 1[1]"},
+ {Name: "Custom Field 2", Value: "Value 1[2]"},
+ {Name: "Custom Field 3", Value: "Value 1[3]"},
+ },
+ },
+ {
+ Location: LocationString{"loc"}, Name: "Item 2", Quantity: 2, Description: "Description 2",
+ Fields: []ExportItemFields{
+ {Name: "Custom Field 1", Value: "Value 2[1]"},
+ {Name: "Custom Field 2", Value: "Value 2[2]"},
+ {Name: "Custom Field 3", Value: "Value 2[3]"},
+ },
+ },
+ {
+ Location: LocationString{"loc"}, Name: "Item 3", Quantity: 3, Description: "Description 3",
+ Fields: []ExportItemFields{
+ {Name: "Custom Field 1", Value: "Value 3[1]"},
+ {Name: "Custom Field 2", Value: "Value 3[2]"},
+ {Name: "Custom Field 3", Value: "Value 3[3]"},
+ },
+ },
+ },
+ },
+ {
+ name: "custom types import",
+ data: customTypesImportCSV,
+ want: []ExportTSVRow{
+ {
+ Name: "Item 1",
+ AssetID: repo.AssetID(1),
+ Location: LocationString{"Path", "To", "Location 1"},
+ LabelStr: LabelString{"L1", "L2", "L3"},
+ },
+ {
+ Name: "Item 2",
+ AssetID: repo.AssetID(2),
+ Location: LocationString{"Path", "To", "Location 2"},
+ LabelStr: LabelString{"L1", "L2", "L3"},
+ },
+ {
+ Name: "Item 3",
+ AssetID: repo.AssetID(1000003),
+ Location: LocationString{"Path", "To", "Location 3"},
+ LabelStr: LabelString{"L1", "L2", "L3"},
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ reader := bytes.NewReader(tt.data)
+
+ sheet := &IOSheet{}
+ err := sheet.Read(reader)
+
+ switch {
+ case tt.wantErr:
+ require.Error(t, err)
+ default:
+ require.NoError(t, err)
+ assert.ElementsMatch(t, tt.want, sheet.Rows)
+ }
+ })
+ }
+}
+
+func Test_parseHeaders(t *testing.T) {
+ tests := []struct {
+ name string
+ rawHeaders []string
+ wantHbHeaders map[string]int
+ wantFieldHeaders []string
+ wantErr bool
+ }{
+ {
+ name: "no hombox headers",
+ rawHeaders: []string{"Header 1", "Header 2", "Header 3"},
+ wantHbHeaders: nil,
+ wantFieldHeaders: nil,
+ wantErr: true,
+ },
+ {
+ name: "field headers only",
+ rawHeaders: []string{"HB.location", "HB.name", "HB.field.1", "HB.field.2", "HB.field.3"},
+ wantHbHeaders: map[string]int{
+ "HB.location": 0,
+ "HB.name": 1,
+ "HB.field.1": 2,
+ "HB.field.2": 3,
+ "HB.field.3": 4,
+ },
+ wantFieldHeaders: []string{"HB.field.1", "HB.field.2", "HB.field.3"},
+ wantErr: false,
+ },
+ {
+ name: "mixed headers",
+ rawHeaders: []string{"Header 1", "HB.name", "Header 2", "HB.field.2", "Header 3", "HB.field.3", "HB.location"},
+ wantHbHeaders: map[string]int{
+ "HB.name": 1,
+ "HB.field.2": 3,
+ "HB.field.3": 5,
+ "HB.location": 6,
+ },
+ wantFieldHeaders: []string{"HB.field.2", "HB.field.3"},
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotHbHeaders, gotFieldHeaders, err := parseHeaders(tt.rawHeaders)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("parseHeaders() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(gotHbHeaders, tt.wantHbHeaders) {
+ t.Errorf("parseHeaders() gotHbHeaders = %v, want %v", gotHbHeaders, tt.wantHbHeaders)
+ }
+ if !reflect.DeepEqual(gotFieldHeaders, tt.wantFieldHeaders) {
+ t.Errorf("parseHeaders() gotFieldHeaders = %v, want %v", gotFieldHeaders, tt.wantFieldHeaders)
+ }
+ })
+ }
+}
+
+func Test_determineSeparator(t *testing.T) {
+ type args struct {
+ data []byte
+ }
+ tests := []struct {
+ name string
+ args args
+ want rune
+ wantErr bool
+ }{
+ {
+ name: "comma",
+ args: args{
+ data: []byte("a,b,c"),
+ },
+ want: ',',
+ wantErr: false,
+ },
+ {
+ name: "tab",
+ args: args{
+ data: []byte("a\tb\tc"),
+ },
+ want: '\t',
+ wantErr: false,
+ },
+ {
+ name: "invalid",
+ args: args{
+ data: []byte("a;b;c"),
+ },
+ want: 0,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := determineSeparator(tt.args.data)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("determineSeparator() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("determineSeparator() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/backend/internal/core/services/reporting/value_parsers.go b/backend/internal/core/services/reporting/value_parsers.go
new file mode 100644
index 0000000..7410396
--- /dev/null
+++ b/backend/internal/core/services/reporting/value_parsers.go
@@ -0,0 +1,38 @@
+package reporting
+
+import (
+ "strconv"
+ "strings"
+)
+
+func parseSeparatedString(s string, sep string) ([]string, error) {
+ list := strings.Split(s, sep)
+
+ csf := make([]string, 0, len(list))
+ for _, s := range list {
+ trimmed := strings.TrimSpace(s)
+ if trimmed != "" {
+ csf = append(csf, trimmed)
+ }
+ }
+
+ return csf, nil
+}
+
+func parseFloat(s string) float64 {
+ if s == "" {
+ return 0
+ }
+ f, _ := strconv.ParseFloat(s, 64)
+ return f
+}
+
+func parseBool(s string) bool {
+ b, _ := strconv.ParseBool(s)
+ return b
+}
+
+func parseInt(s string) int {
+ i, _ := strconv.Atoi(s)
+ return i
+}
diff --git a/backend/internal/core/services/reporting/value_parsers_test.go b/backend/internal/core/services/reporting/value_parsers_test.go
new file mode 100644
index 0000000..bcd7431
--- /dev/null
+++ b/backend/internal/core/services/reporting/value_parsers_test.go
@@ -0,0 +1,65 @@
+package reporting
+
+import (
+ "reflect"
+ "testing"
+)
+
+func Test_parseSeparatedString(t *testing.T) {
+ type args struct {
+ s string
+ sep string
+ }
+ tests := []struct {
+ name string
+ args args
+ want []string
+ wantErr bool
+ }{
+ {
+ name: "comma",
+ args: args{
+ s: "a,b,c",
+ sep: ",",
+ },
+ want: []string{"a", "b", "c"},
+ wantErr: false,
+ },
+ {
+ name: "trimmed comma",
+ args: args{
+ s: "a, b, c",
+ sep: ",",
+ },
+ want: []string{"a", "b", "c"},
+ },
+ {
+ name: "excessive whitespace",
+ args: args{
+ s: " a, b, c ",
+ sep: ",",
+ },
+ want: []string{"a", "b", "c"},
+ },
+ {
+ name: "empty",
+ args: args{
+ s: "",
+ sep: ",",
+ },
+ want: []string{},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := parseSeparatedString(tt.args.s, tt.args.sep)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("parseSeparatedString() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("parseSeparatedString() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/backend/internal/core/services/service_background.go b/backend/internal/core/services/service_background.go
new file mode 100644
index 0000000..21ae4c3
--- /dev/null
+++ b/backend/internal/core/services/service_background.go
@@ -0,0 +1,81 @@
+package services
+
+import (
+ "context"
+ "strings"
+ "time"
+
+ "github.com/containrrr/shoutrrr"
+ "github.com/hay-kot/homebox/backend/internal/data/repo"
+ "github.com/hay-kot/homebox/backend/internal/data/types"
+ "github.com/rs/zerolog/log"
+)
+
+type BackgroundService struct {
+ repos *repo.AllRepos
+}
+
+func (svc *BackgroundService) SendNotifiersToday(ctx context.Context) error {
+ // Get All Groups
+ groups, err := svc.repos.Groups.GetAllGroups(ctx)
+ if err != nil {
+ return err
+ }
+
+ today := types.DateFromTime(time.Now())
+
+ for i := range groups {
+ group := groups[i]
+
+ entries, err := svc.repos.MaintEntry.GetScheduled(ctx, group.ID, today)
+ if err != nil {
+ return err
+ }
+
+ if len(entries) == 0 {
+ log.Debug().
+ Str("group_name", group.Name).
+ Str("group_id", group.ID.String()).
+ Msg("No scheduled maintenance for today")
+ continue
+ }
+
+ notifiers, err := svc.repos.Notifiers.GetByGroup(ctx, group.ID)
+ if err != nil {
+ return err
+ }
+
+ urls := make([]string, len(notifiers))
+ for i := range notifiers {
+ urls[i] = notifiers[i].URL
+ }
+
+ bldr := strings.Builder{}
+
+ bldr.WriteString("Homebox Maintenance for (")
+ bldr.WriteString(today.String())
+ bldr.WriteString("):\n")
+
+ for i := range entries {
+ entry := entries[i]
+ bldr.WriteString(" - ")
+ bldr.WriteString(entry.Name)
+ bldr.WriteString("\n")
+ }
+
+ var sendErrs []error
+ for i := range urls {
+ err := shoutrrr.Send(urls[i], bldr.String())
+
+ if err != nil {
+ sendErrs = append(sendErrs, err)
+ }
+ }
+
+ if len(sendErrs) > 0 {
+ return sendErrs[0]
+ }
+ }
+
+ return nil
+}
diff --git a/backend/internal/core/services/service_items.go b/backend/internal/core/services/service_items.go
index 5c02724..4d510e5 100644
--- a/backend/internal/core/services/service_items.go
+++ b/backend/internal/core/services/service_items.go
@@ -3,10 +3,13 @@ package services
import (
"context"
"errors"
+ "fmt"
+ "io"
+ "strings"
"github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/core/services/reporting"
"github.com/hay-kot/homebox/backend/internal/data/repo"
- "github.com/rs/zerolog/log"
)
var (
@@ -18,179 +21,335 @@ type ItemService struct {
repo *repo.AllRepos
filepath string
- // at is a map of tokens to attachment IDs. This is used to store the attachment ID
- // for issued URLs
- at attachmentTokens
+
+ autoIncrementAssetID bool
}
-func (svc *ItemService) CsvImport(ctx context.Context, gid uuid.UUID, data [][]string) (int, error) {
- loaded := []csvRow{}
-
- // Skip first row
- for _, row := range data[1:] {
- // Skip empty rows
- if len(row) == 0 {
- continue
+func (svc *ItemService) Create(ctx Context, item repo.ItemCreate) (repo.ItemOut, error) {
+ if svc.autoIncrementAssetID {
+ highest, err := svc.repo.Items.GetHighestAssetID(ctx, ctx.GID)
+ if err != nil {
+ return repo.ItemOut{}, err
}
- if len(row) != NumOfCols {
- return 0, ErrInvalidCsv
- }
-
- r := newCsvRow(row)
- loaded = append(loaded, r)
+ item.AssetID = highest + 1
}
- // validate rows
- var errMap = map[int][]error{}
- var hasErr bool
- for i, r := range loaded {
+ return svc.repo.Items.Create(ctx, ctx.GID, item)
+}
- errs := r.validate()
-
- if len(errs) > 0 {
- hasErr = true
- lineNum := i + 2
-
- errMap[lineNum] = errs
- }
- }
-
- if hasErr {
- for lineNum, errs := range errMap {
- for _, err := range errs {
- log.Error().Err(err).Int("line", lineNum).Msg("csv import error")
- }
- }
- }
-
- // Bootstrap the locations and labels so we can reuse the created IDs for the items
- locations := map[string]uuid.UUID{}
- existingLocation, err := svc.repo.Locations.GetAll(ctx, gid)
+func (svc *ItemService) EnsureAssetID(ctx context.Context, GID uuid.UUID) (int, error) {
+ items, err := svc.repo.Items.GetAllZeroAssetID(ctx, GID)
if err != nil {
return 0, err
}
- for _, loc := range existingLocation {
- locations[loc.Name] = loc.ID
- }
- labels := map[string]uuid.UUID{}
- existingLabels, err := svc.repo.Labels.GetAll(ctx, gid)
+ highest, err := svc.repo.Items.GetHighestAssetID(ctx, GID)
if err != nil {
return 0, err
}
- for _, label := range existingLabels {
- labels[label.Name] = label.ID
- }
- for _, row := range loaded {
+ finished := 0
+ for _, item := range items {
+ highest++
- // Locations
- if _, exists := locations[row.Location]; !exists {
- result, err := svc.repo.Locations.Create(ctx, gid, repo.LocationCreate{
- Name: row.Location,
- Description: "",
- })
- if err != nil {
- return 0, err
- }
- locations[row.Location] = result.ID
+ err = svc.repo.Items.SetAssetID(ctx, GID, item.ID, highest)
+ if err != nil {
+ return 0, err
}
- // Labels
+ finished++
+ }
- for _, label := range row.getLabels() {
- if _, exists := labels[label]; exists {
- continue
- }
- result, err := svc.repo.Labels.Create(ctx, gid, repo.LabelCreate{
- Name: label,
- Description: "",
- })
- if err != nil {
- return 0, err
- }
- labels[label] = result.ID
+ return finished, nil
+}
+
+func (svc *ItemService) EnsureImportRef(ctx context.Context, GID uuid.UUID) (int, error) {
+ ids, err := svc.repo.Items.GetAllZeroImportRef(ctx, GID)
+ if err != nil {
+ return 0, err
+ }
+
+ finished := 0
+ for _, itemID := range ids {
+ ref := uuid.New().String()[0:8]
+
+ err = svc.repo.Items.Patch(ctx, GID, itemID, repo.ItemPatch{ImportRef: &ref})
+ if err != nil {
+ return 0, err
+ }
+
+ finished++
+ }
+
+ return finished, nil
+}
+
+func serializeLocation[T ~[]string](location T) string {
+ return strings.Join(location, "/")
+}
+
+// CsvImport imports items from a CSV file. using the standard defined format.
+//
+// CsvImport applies the following rules/operations
+//
+// 1. If the item does not exist, it is created.
+// 2. If the item has a ImportRef and it exists it is skipped
+// 3. Locations and Labels are created if they do not exist.
+func (svc *ItemService) CsvImport(ctx context.Context, GID uuid.UUID, data io.Reader) (int, error) {
+ sheet := reporting.IOSheet{}
+
+ err := sheet.Read(data)
+ if err != nil {
+ return 0, err
+ }
+
+ // ========================================
+ // Labels
+
+ labelMap := make(map[string]uuid.UUID)
+ {
+ labels, err := svc.repo.Labels.GetAll(ctx, GID)
+ if err != nil {
+ return 0, err
+ }
+
+ for _, label := range labels {
+ labelMap[label.Name] = label.ID
}
}
- // Create the items
- var count int
- for _, row := range loaded {
- // Check Import Ref
- if row.Item.ImportRef != "" {
- exists, err := svc.repo.Items.CheckRef(ctx, gid, row.Item.ImportRef)
+ // ========================================
+ // Locations
+
+ locationMap := make(map[string]uuid.UUID)
+ {
+ locations, err := svc.repo.Locations.Tree(ctx, GID, repo.TreeQuery{WithItems: false})
+ if err != nil {
+ return 0, err
+ }
+
+ // Traverse the tree and build a map of location full paths to IDs
+ // where the full path is the location name joined by slashes.
+ var traverse func(location *repo.TreeItem, path []string)
+ traverse = func(location *repo.TreeItem, path []string) {
+ path = append(path, location.Name)
+
+ locationMap[serializeLocation(path)] = location.ID
+
+ for _, child := range location.Children {
+ traverse(child, path)
+ }
+ }
+
+ for _, location := range locations {
+ traverse(&location, []string{})
+ }
+ }
+
+ // ========================================
+ // Import items
+
+ // Asset ID Pre-Check
+ highestAID := repo.AssetID(-1)
+ if svc.autoIncrementAssetID {
+ highestAID, err = svc.repo.Items.GetHighestAssetID(ctx, GID)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ finished := 0
+
+ for i := range sheet.Rows {
+ row := sheet.Rows[i]
+
+ createRequired := true
+
+ // ========================================
+ // Preflight check for existing item
+ if row.ImportRef != "" {
+ exists, err := svc.repo.Items.CheckRef(ctx, GID, row.ImportRef)
+ if err != nil {
+ return 0, fmt.Errorf("error checking for existing item with ref %q: %w", row.ImportRef, err)
+ }
+
if exists {
- continue
+ createRequired = false
}
+ }
+
+ // ========================================
+ // Pre-Create Labels as necessary
+ labelIds := make([]uuid.UUID, len(row.LabelStr))
+
+ for j := range row.LabelStr {
+ label := row.LabelStr[j]
+
+ id, ok := labelMap[label]
+ if !ok {
+ newLabel, err := svc.repo.Labels.Create(ctx, GID, repo.LabelCreate{Name: label})
+ if err != nil {
+ return 0, err
+ }
+ id = newLabel.ID
+ }
+
+ labelIds[j] = id
+ labelMap[label] = id
+ }
+
+ // ========================================
+ // Pre-Create Locations as necessary
+ path := serializeLocation(row.Location)
+
+ locationID, ok := locationMap[path]
+ if !ok { // Traverse the path of LocationStr and check each path element to see if it exists already, if not create it.
+ paths := []string{}
+ for i, pathElement := range row.Location {
+ paths = append(paths, pathElement)
+ path := serializeLocation(paths)
+
+ locationID, ok = locationMap[path]
+ if !ok {
+ parentID := uuid.Nil
+
+ // Get the parent ID
+ if i > 0 {
+ parentPath := serializeLocation(row.Location[:i])
+ parentID = locationMap[parentPath]
+ }
+
+ newLocation, err := svc.repo.Locations.Create(ctx, GID, repo.LocationCreate{
+ ParentID: parentID,
+ Name: pathElement,
+ })
+ if err != nil {
+ return 0, err
+ }
+ locationID = newLocation.ID
+ }
+
+ locationMap[path] = locationID
+ }
+
+ locationID, ok = locationMap[path]
+ if !ok {
+ return 0, errors.New("failed to create location")
+ }
+ }
+
+ var effAID repo.AssetID
+ if svc.autoIncrementAssetID && row.AssetID.Nil() {
+ effAID = highestAID + 1
+ highestAID++
+ } else {
+ effAID = row.AssetID
+ }
+
+ // ========================================
+ // Create Item
+ var item repo.ItemOut
+ switch {
+ case createRequired:
+ newItem := repo.ItemCreate{
+ ImportRef: row.ImportRef,
+ Name: row.Name,
+ Description: row.Description,
+ AssetID: effAID,
+ LocationID: locationID,
+ LabelIDs: labelIds,
+ }
+
+ item, err = svc.repo.Items.Create(ctx, GID, newItem)
if err != nil {
- log.Err(err).Msg("error checking import ref")
+ return 0, err
+ }
+ default:
+ item, err = svc.repo.Items.GetByRef(ctx, GID, row.ImportRef)
+ if err != nil {
+ return 0, err
}
}
- locationID := locations[row.Location]
- labelIDs := []uuid.UUID{}
- for _, label := range row.getLabels() {
- labelIDs = append(labelIDs, labels[label])
+ if item.ID == uuid.Nil {
+ panic("item ID is nil on import - this should never happen")
}
- log.Info().
- Str("name", row.Item.Name).
- Str("location", row.Location).
- Msgf("Creating Item: %s", row.Item.Name)
-
- result, err := svc.repo.Items.Create(ctx, gid, repo.ItemCreate{
- ImportRef: row.Item.ImportRef,
- Name: row.Item.Name,
- Description: row.Item.Description,
- LabelIDs: labelIDs,
- LocationID: locationID,
- })
-
- if err != nil {
- return count, err
+ fields := make([]repo.ItemField, len(row.Fields))
+ for i := range row.Fields {
+ fields[i] = repo.ItemField{
+ Name: row.Fields[i].Name,
+ Type: "text",
+ TextValue: row.Fields[i].Value,
+ }
}
- // Update the item with the rest of the data
- _, err = svc.repo.Items.UpdateByGroup(ctx, gid, repo.ItemUpdate{
- // Edges
+ updateItem := repo.ItemUpdate{
+ ID: item.ID,
+ LabelIDs: labelIds,
LocationID: locationID,
- LabelIDs: labelIDs,
- // General Fields
- ID: result.ID,
- Name: result.Name,
- Description: result.Description,
- Insured: row.Item.Insured,
- Notes: row.Item.Notes,
- Quantity: row.Item.Quantity,
+ Name: row.Name,
+ Description: row.Description,
+ AssetID: effAID,
+ Insured: row.Insured,
+ Quantity: row.Quantity,
+ Archived: row.Archived,
- // Identifies the item as imported
- SerialNumber: row.Item.SerialNumber,
- ModelNumber: row.Item.ModelNumber,
- Manufacturer: row.Item.Manufacturer,
+ PurchasePrice: row.PurchasePrice,
+ PurchaseFrom: row.PurchaseFrom,
+ PurchaseTime: row.PurchaseTime,
- // Purchase
- PurchaseFrom: row.Item.PurchaseFrom,
- PurchasePrice: row.Item.PurchasePrice,
- PurchaseTime: row.Item.PurchaseTime,
+ Manufacturer: row.Manufacturer,
+ ModelNumber: row.ModelNumber,
+ SerialNumber: row.SerialNumber,
- // Warranty
- LifetimeWarranty: row.Item.LifetimeWarranty,
- WarrantyExpires: row.Item.WarrantyExpires,
- WarrantyDetails: row.Item.WarrantyDetails,
+ LifetimeWarranty: row.LifetimeWarranty,
+ WarrantyExpires: row.WarrantyExpires,
+ WarrantyDetails: row.WarrantyDetails,
- SoldTo: row.Item.SoldTo,
- SoldPrice: row.Item.SoldPrice,
- SoldTime: row.Item.SoldTime,
- SoldNotes: row.Item.SoldNotes,
- })
+ SoldTo: row.SoldTo,
+ SoldTime: row.SoldTime,
+ SoldPrice: row.SoldPrice,
+ SoldNotes: row.SoldNotes,
- if err != nil {
- return count, err
+ Notes: row.Notes,
+ Fields: fields,
}
- count++
+ item, err = svc.repo.Items.UpdateByGroup(ctx, GID, updateItem)
+ if err != nil {
+ return 0, err
+ }
+
+ finished++
}
- return count, nil
+
+ return finished, nil
+}
+
+func (svc *ItemService) ExportTSV(ctx context.Context, GID uuid.UUID) ([][]string, error) {
+ items, err := svc.repo.Items.GetAll(ctx, GID)
+ if err != nil {
+ return nil, err
+ }
+
+ sheet := reporting.IOSheet{}
+
+ err = sheet.ReadItems(ctx, items, GID, svc.repo)
+ if err != nil {
+ return nil, err
+ }
+
+ return sheet.TSV()
+}
+
+func (svc *ItemService) ExportBillOfMaterialsTSV(ctx context.Context, GID uuid.UUID) ([]byte, error) {
+ items, err := svc.repo.Items.GetAll(ctx, GID)
+ if err != nil {
+ return nil, err
+ }
+
+ return reporting.BillOfMaterialsTSV(items)
}
diff --git a/backend/internal/core/services/service_items_attachments.go b/backend/internal/core/services/service_items_attachments.go
index b5df5f8..43835c6 100644
--- a/backend/internal/core/services/service_items_attachments.go
+++ b/backend/internal/core/services/service_items_attachments.go
@@ -4,72 +4,16 @@ import (
"context"
"io"
"os"
- "time"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
"github.com/hay-kot/homebox/backend/internal/data/repo"
- "github.com/hay-kot/homebox/backend/pkgs/hasher"
"github.com/rs/zerolog/log"
)
-// TODO: this isn't a scalable solution, tokens should be stored in the database
-type attachmentTokens map[string]uuid.UUID
-
-func (at attachmentTokens) Add(token string, id uuid.UUID) {
- at[token] = id
-
- log.Debug().Str("token", token).Str("uuid", id.String()).Msg("added token")
-
- go func() {
- ch := time.After(1 * time.Minute)
- <-ch
- at.Delete(token)
- log.Debug().Str("token", token).Msg("deleted token")
- }()
-}
-
-func (at attachmentTokens) Get(token string) (uuid.UUID, bool) {
- id, ok := at[token]
- return id, ok
-}
-
-func (at attachmentTokens) Delete(token string) {
- delete(at, token)
-}
-
-func (svc *ItemService) AttachmentToken(ctx Context, itemId, attachmentId uuid.UUID) (string, error) {
- _, err := svc.repo.Items.GetOneByGroup(ctx, ctx.GID, itemId)
- if err != nil {
- return "", err
- }
-
- token := hasher.GenerateToken()
-
- // Ensure that the file exists
- attachment, err := svc.repo.Attachments.Get(ctx, attachmentId)
- if err != nil {
- return "", err
- }
-
- if _, err := os.Stat(attachment.Edges.Document.Path); os.IsNotExist(err) {
- _ = svc.AttachmentDelete(ctx, ctx.GID, itemId, attachmentId)
- return "", ErrNotFound
- }
-
- svc.at.Add(token.Raw, attachmentId)
-
- return token.Raw, nil
-}
-
-func (svc *ItemService) AttachmentPath(ctx context.Context, token string) (*ent.Document, error) {
- attachmentId, ok := svc.at.Get(token)
- if !ok {
- return nil, ErrNotFound
- }
-
- attachment, err := svc.repo.Attachments.Get(ctx, attachmentId)
+func (svc *ItemService) AttachmentPath(ctx context.Context, attachmentID uuid.UUID) (*ent.Document, error) {
+ attachment, err := svc.repo.Attachments.Get(ctx, attachmentID)
if err != nil {
return nil, err
}
@@ -77,9 +21,9 @@ func (svc *ItemService) AttachmentPath(ctx context.Context, token string) (*ent.
return attachment.Edges.Document, nil
}
-func (svc *ItemService) AttachmentUpdate(ctx Context, itemId uuid.UUID, data *repo.ItemAttachmentUpdate) (repo.ItemOut, error) {
+func (svc *ItemService) AttachmentUpdate(ctx Context, itemID uuid.UUID, data *repo.ItemAttachmentUpdate) (repo.ItemOut, error) {
// Update Attachment
- attachment, err := svc.repo.Attachments.Update(ctx, data.ID, attachment.Type(data.Type))
+ attachment, err := svc.repo.Attachments.Update(ctx, data.ID, data)
if err != nil {
return repo.ItemOut{}, err
}
@@ -91,15 +35,15 @@ func (svc *ItemService) AttachmentUpdate(ctx Context, itemId uuid.UUID, data *re
return repo.ItemOut{}, err
}
- return svc.repo.Items.GetOneByGroup(ctx, ctx.GID, itemId)
+ return svc.repo.Items.GetOneByGroup(ctx, ctx.GID, itemID)
}
// AttachmentAdd adds an attachment to an item by creating an entry in the Documents table and linking it to the Attachment
// Table and Items table. The file provided via the reader is stored on the file system based on the provided
// relative path during construction of the service.
-func (svc *ItemService) AttachmentAdd(ctx Context, itemId uuid.UUID, filename string, attachmentType attachment.Type, file io.Reader) (repo.ItemOut, error) {
+func (svc *ItemService) AttachmentAdd(ctx Context, itemID uuid.UUID, filename string, attachmentType attachment.Type, file io.Reader) (repo.ItemOut, error) {
// Get the Item
- _, err := svc.repo.Items.GetOneByGroup(ctx, ctx.GID, itemId)
+ _, err := svc.repo.Items.GetOneByGroup(ctx, ctx.GID, itemID)
if err != nil {
return repo.ItemOut{}, err
}
@@ -112,29 +56,29 @@ func (svc *ItemService) AttachmentAdd(ctx Context, itemId uuid.UUID, filename st
}
// Create the attachment
- _, err = svc.repo.Attachments.Create(ctx, itemId, doc.ID, attachmentType)
+ _, err = svc.repo.Attachments.Create(ctx, itemID, doc.ID, attachmentType)
if err != nil {
log.Err(err).Msg("failed to create attachment")
return repo.ItemOut{}, err
}
- return svc.repo.Items.GetOneByGroup(ctx, ctx.GID, itemId)
+ return svc.repo.Items.GetOneByGroup(ctx, ctx.GID, itemID)
}
-func (svc *ItemService) AttachmentDelete(ctx context.Context, gid, itemId, attachmentId uuid.UUID) error {
+func (svc *ItemService) AttachmentDelete(ctx context.Context, gid, itemID, attachmentID uuid.UUID) error {
// Get the Item
- _, err := svc.repo.Items.GetOneByGroup(ctx, gid, itemId)
+ _, err := svc.repo.Items.GetOneByGroup(ctx, gid, itemID)
if err != nil {
return err
}
- attachment, err := svc.repo.Attachments.Get(ctx, attachmentId)
+ attachment, err := svc.repo.Attachments.Get(ctx, attachmentID)
if err != nil {
return err
}
// Delete the attachment
- err = svc.repo.Attachments.Delete(ctx, attachmentId)
+ err = svc.repo.Attachments.Delete(ctx, attachmentID)
if err != nil {
return err
}
diff --git a/backend/internal/core/services/service_items_attachments_test.go b/backend/internal/core/services/service_items_attachments_test.go
index 14e822e..4e2315e 100644
--- a/backend/internal/core/services/service_items_attachments_test.go
+++ b/backend/internal/core/services/service_items_attachments_test.go
@@ -9,6 +9,7 @@ import (
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestItemService_AddAttachment(t *testing.T) {
@@ -23,7 +24,7 @@ func TestItemService_AddAttachment(t *testing.T) {
Description: "test",
Name: "test",
})
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.NotNil(t, loc)
itmC := repo.ItemCreate{
@@ -33,11 +34,11 @@ func TestItemService_AddAttachment(t *testing.T) {
}
itm, err := svc.repo.Items.Create(context.Background(), tGroup.ID, itmC)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.NotNil(t, itm)
t.Cleanup(func() {
err := svc.repo.Items.Delete(context.Background(), itm.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
})
contents := fk.Str(1000)
@@ -45,7 +46,7 @@ func TestItemService_AddAttachment(t *testing.T) {
// Setup
afterAttachment, err := svc.AttachmentAdd(tCtx, itm.ID, "testfile.txt", "attachment", reader)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.NotNil(t, afterAttachment)
// Check that the file exists
@@ -56,7 +57,6 @@ func TestItemService_AddAttachment(t *testing.T) {
// Check that the file contents are correct
bts, err := os.ReadFile(storedPath)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, contents, string(bts))
-
}
diff --git a/backend/internal/core/services/service_items_csv.go b/backend/internal/core/services/service_items_csv.go
deleted file mode 100644
index c9748f7..0000000
--- a/backend/internal/core/services/service_items_csv.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package services
-
-import (
- "errors"
- "strconv"
- "strings"
- "time"
-
- "github.com/hay-kot/homebox/backend/internal/data/repo"
-)
-
-var ErrInvalidCsv = errors.New("invalid csv")
-
-const NumOfCols = 21
-
-func parseFloat(s string) float64 {
- if s == "" {
- return 0
- }
- f, _ := strconv.ParseFloat(s, 64)
- return f
-}
-
-func parseDate(s string) time.Time {
- if s == "" {
- return time.Time{}
- }
-
- p, _ := time.Parse("01/02/2006", s)
- return p
-}
-
-func parseBool(s string) bool {
- switch strings.ToLower(s) {
- case "true", "yes", "1":
- return true
- default:
- return false
- }
-}
-
-func parseInt(s string) int {
- i, _ := strconv.Atoi(s)
- return i
-}
-
-type csvRow struct {
- Item repo.ItemOut
- Location string
- LabelStr string
-}
-
-func newCsvRow(row []string) csvRow {
- return csvRow{
- Location: row[1],
- LabelStr: row[2],
- Item: repo.ItemOut{
- ItemSummary: repo.ItemSummary{
- ImportRef: row[0],
- Quantity: parseInt(row[3]),
- Name: row[4],
- Description: row[5],
- Insured: parseBool(row[6]),
- },
- SerialNumber: row[7],
- ModelNumber: row[8],
- Manufacturer: row[9],
- Notes: row[10],
- PurchaseFrom: row[11],
- PurchasePrice: parseFloat(row[12]),
- PurchaseTime: parseDate(row[13]),
- LifetimeWarranty: parseBool(row[14]),
- WarrantyExpires: parseDate(row[15]),
- WarrantyDetails: row[16],
- SoldTo: row[17],
- SoldPrice: parseFloat(row[18]),
- SoldTime: parseDate(row[19]),
- SoldNotes: row[20],
- },
- }
-}
-
-func (c csvRow) getLabels() []string {
- split := strings.Split(c.LabelStr, ";")
-
- // Trim each
- for i, s := range split {
- split[i] = strings.TrimSpace(s)
- }
-
- // Remove empty
- for i, s := range split {
- if s == "" {
- split = append(split[:i], split[i+1:]...)
- }
- }
-
- return split
-}
-
-func (c csvRow) validate() []error {
- var errs []error
-
- add := func(err error) {
- errs = append(errs, err)
- }
-
- required := func(s string, name string) {
- if s == "" {
- add(errors.New(name + " is required"))
- }
- }
-
- required(c.Location, "Location")
- required(c.Item.Name, "Name")
-
- return errs
-}
diff --git a/backend/internal/core/services/service_items_csv_test.go b/backend/internal/core/services/service_items_csv_test.go
deleted file mode 100644
index b5b488c..0000000
--- a/backend/internal/core/services/service_items_csv_test.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package services
-
-import (
- "bytes"
- "encoding/csv"
- "fmt"
- "reflect"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-const CSV_DATA = `
-Import Ref,Location,Labels,Quantity,Name,Description,Insured,Serial Number,Mode Number,Manufacturer,Notes,Purchase From,Purchased Price,Purchased Time,Lifetime Warranty,Warranty Expires,Warranty Details,Sold To,Sold Price,Sold Time,Sold Notes
-A,Garage,IOT;Home Assistant; Z-Wave,1,Zooz Universal Relay ZEN17,Description 1,TRUE,,ZEN17,Zooz,,Amazon,39.95,10/13/2021,,10/13/2021,,,,10/13/2021,
-B,Living Room,IOT;Home Assistant; Z-Wave,1,Zooz Motion Sensor,Description 2,FALSE,,ZSE18,Zooz,,Amazon,29.95,10/15/2021,,10/15/2021,,,,10/15/2021,
-C,Office,IOT;Home Assistant; Z-Wave,1,Zooz 110v Power Switch,Description 3,TRUE,,ZEN15,Zooz,,Amazon,39.95,10/13/2021,,10/13/2021,,,,10/13/2021,
-D,Downstairs,IOT;Home Assistant; Z-Wave,1,Ecolink Z-Wave PIR Motion Sensor,Description 4,FALSE,,PIRZWAVE2.5-ECO,Ecolink,,Amazon,35.58,10/21/2020,,10/21/2020,,,,10/21/2020,
-E,Entry,IOT;Home Assistant; Z-Wave,1,Yale Security Touchscreen Deadbolt,Description 5,TRUE,,YRD226ZW2619,Yale,,Amazon,120.39,10/14/2020,,10/14/2020,,,,10/14/2020,
-F,Kitchen,IOT;Home Assistant; Z-Wave,1,Smart Rocker Light Dimmer,Description 6,FALSE,,39351,Honeywell,,Amazon,65.98,09/30/2020,,09/30/2020,,,,09/30/2020,`
-
-func loadcsv() [][]string {
- reader := csv.NewReader(bytes.NewBuffer([]byte(CSV_DATA)))
-
- records, err := reader.ReadAll()
- if err != nil {
- panic(err)
- }
-
- return records
-}
-
-func Test_CorrectDateParsing(t *testing.T) {
- t.Parallel()
-
- expected := []time.Time{
- time.Date(2021, 10, 13, 0, 0, 0, 0, time.UTC),
- time.Date(2021, 10, 15, 0, 0, 0, 0, time.UTC),
- time.Date(2021, 10, 13, 0, 0, 0, 0, time.UTC),
- time.Date(2020, 10, 21, 0, 0, 0, 0, time.UTC),
- time.Date(2020, 10, 14, 0, 0, 0, 0, time.UTC),
- time.Date(2020, 9, 30, 0, 0, 0, 0, time.UTC),
- }
-
- records := loadcsv()
-
- for i, record := range records {
- if i == 0 {
- continue
- }
- entity := newCsvRow(record)
- expected := expected[i-1]
-
- assert.Equal(t, expected, entity.Item.PurchaseTime, fmt.Sprintf("Failed on row %d", i))
- assert.Equal(t, expected, entity.Item.WarrantyExpires, fmt.Sprintf("Failed on row %d", i))
- assert.Equal(t, expected, entity.Item.SoldTime, fmt.Sprintf("Failed on row %d", i))
- }
-}
-
-func Test_csvRow_getLabels(t *testing.T) {
- type fields struct {
- LabelStr string
- }
- tests := []struct {
- name string
- fields fields
- want []string
- }{
- {
- name: "basic test",
- fields: fields{
- LabelStr: "IOT;Home Assistant;Z-Wave",
- },
- want: []string{"IOT", "Home Assistant", "Z-Wave"},
- },
- {
- name: "no labels",
- fields: fields{
- LabelStr: "",
- },
- want: []string{},
- },
- {
- name: "single label",
- fields: fields{
- LabelStr: "IOT",
- },
- want: []string{"IOT"},
- },
- {
- name: "trailing semicolon",
- fields: fields{
- LabelStr: "IOT;",
- },
- want: []string{"IOT"},
- },
-
- {
- name: "whitespace",
- fields: fields{
- LabelStr: " IOT; Home Assistant; Z-Wave ",
- },
- want: []string{"IOT", "Home Assistant", "Z-Wave"},
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- c := csvRow{
- LabelStr: tt.fields.LabelStr,
- }
- if got := c.getLabels(); !reflect.DeepEqual(got, tt.want) {
- t.Errorf("csvRow.getLabels() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/backend/internal/core/services/service_items_test.go b/backend/internal/core/services/service_items_test.go
deleted file mode 100644
index 1daa0b7..0000000
--- a/backend/internal/core/services/service_items_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package services
-
-import (
- "context"
- "testing"
-
- "github.com/google/uuid"
- "github.com/stretchr/testify/assert"
-)
-
-func TestItemService_CsvImport(t *testing.T) {
- data := loadcsv()
- svc := &ItemService{
- repo: tRepos,
- }
- count, err := svc.CsvImport(context.Background(), tGroup.ID, data)
- assert.Equal(t, 6, count)
- assert.NoError(t, err)
-
- // Check import refs are deduplicated
- count, err = svc.CsvImport(context.Background(), tGroup.ID, data)
- assert.Equal(t, 0, count)
- assert.NoError(t, err)
-
- items, err := svc.repo.Items.GetAll(context.Background(), tGroup.ID)
- assert.NoError(t, err)
- t.Cleanup(func() {
- for _, item := range items {
- err := svc.repo.Items.Delete(context.Background(), item.ID)
- assert.NoError(t, err)
- }
- })
-
- assert.Equal(t, len(items), 6)
-
- dataCsv := []csvRow{}
- for _, item := range data {
- dataCsv = append(dataCsv, newCsvRow(item))
- }
-
- allLocation, err := tRepos.Locations.GetAll(context.Background(), tGroup.ID)
- assert.NoError(t, err)
- locNames := []string{}
- for _, loc := range allLocation {
- locNames = append(locNames, loc.Name)
- }
-
- allLabels, err := tRepos.Labels.GetAll(context.Background(), tGroup.ID)
- assert.NoError(t, err)
- labelNames := []string{}
- for _, label := range allLabels {
- labelNames = append(labelNames, label.Name)
- }
-
- ids := []uuid.UUID{}
- t.Cleanup((func() {
- for _, id := range ids {
- err := svc.repo.Items.Delete(context.Background(), id)
- assert.NoError(t, err)
- }
- }))
-
- for _, item := range items {
- assert.Contains(t, locNames, item.Location.Name)
- for _, label := range item.Labels {
- assert.Contains(t, labelNames, label.Name)
- }
-
- for _, csvRow := range dataCsv {
- if csvRow.Item.Name == item.Name {
- assert.Equal(t, csvRow.Item.Description, item.Description)
- assert.Equal(t, csvRow.Item.Quantity, item.Quantity)
- assert.Equal(t, csvRow.Item.Insured, item.Insured)
- }
- }
- }
-}
diff --git a/backend/internal/core/services/service_user.go b/backend/internal/core/services/service_user.go
index e3d8f8a..d86c39b 100644
--- a/backend/internal/core/services/service_user.go
+++ b/backend/internal/core/services/service_user.go
@@ -6,6 +6,7 @@ import (
"time"
"github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/pkgs/hasher"
"github.com/rs/zerolog/log"
@@ -15,7 +16,7 @@ var (
oneWeek = time.Hour * 24 * 7
ErrorInvalidLogin = errors.New("invalid username or password")
ErrorInvalidToken = errors.New("invalid token")
- ErrorTokenIdMismatch = errors.New("token id mismatch")
+ ErrorTokenIDMismatch = errors.New("token id mismatch")
)
type UserService struct {
@@ -30,8 +31,9 @@ type (
Password string `json:"password"`
}
UserAuthTokenDetail struct {
- Raw string `json:"raw"`
- ExpiresAt time.Time `json:"expiresAt"`
+ Raw string `json:"raw"`
+ AttachmentToken string `json:"attachmentToken"`
+ ExpiresAt time.Time `json:"expiresAt"`
}
LoginForm struct {
Username string `json:"username"`
@@ -49,21 +51,25 @@ func (svc *UserService) RegisterUser(ctx context.Context, data UserRegistration)
Msg("Registering new user")
var (
- err error
- group repo.Group
- token repo.GroupInvitation
- isOwner = false
+ err error
+ group repo.Group
+ token repo.GroupInvitation
+
+ // creatingGroup is true if the user is creating a new group.
+ creatingGroup = false
)
switch data.GroupToken {
case "":
- isOwner = true
+ log.Debug().Msg("creating new group")
+ creatingGroup = true
group, err = svc.repos.Groups.GroupCreate(ctx, "Home")
if err != nil {
log.Err(err).Msg("Failed to create group")
return repo.UserOut{}, err
}
default:
+ log.Debug().Msg("joining existing group")
token, err = svc.repos.Groups.InvitationGet(ctx, hasher.HashToken(data.GroupToken))
if err != nil {
log.Err(err).Msg("Failed to get invitation token")
@@ -79,30 +85,37 @@ func (svc *UserService) RegisterUser(ctx context.Context, data UserRegistration)
Password: hashed,
IsSuperuser: false,
GroupID: group.ID,
- IsOwner: isOwner,
+ IsOwner: creatingGroup,
}
usr, err := svc.repos.Users.Create(ctx, usrCreate)
if err != nil {
return repo.UserOut{}, err
}
+ log.Debug().Msg("user created")
- for _, label := range defaultLabels() {
- _, err := svc.repos.Labels.Create(ctx, group.ID, label)
- if err != nil {
- return repo.UserOut{}, err
+ // Create the default labels and locations for the group.
+ if creatingGroup {
+ log.Debug().Msg("creating default labels")
+ for _, label := range defaultLabels() {
+ _, err := svc.repos.Labels.Create(ctx, usr.GroupID, label)
+ if err != nil {
+ return repo.UserOut{}, err
+ }
+ }
+
+ log.Debug().Msg("creating default locations")
+ for _, location := range defaultLocations() {
+ _, err := svc.repos.Locations.Create(ctx, usr.GroupID, location)
+ if err != nil {
+ return repo.UserOut{}, err
+ }
}
}
- for _, location := range defaultLocations() {
- _, err := svc.repos.Locations.Create(ctx, group.ID, location)
- if err != nil {
- return repo.UserOut{}, err
- }
- }
-
- // Decrement the invitation token if it was used
+ // Decrement the invitation token if it was used.
if token.ID != uuid.Nil {
+ log.Debug().Msg("decrementing invitation token")
err = svc.repos.Groups.InvitationUpdate(ctx, token.ID, token.Uses-1)
if err != nil {
log.Err(err).Msg("Failed to update invitation token")
@@ -125,27 +138,52 @@ func (svc *UserService) UpdateSelf(ctx context.Context, ID uuid.UUID, data repo.
return repo.UserOut{}, err
}
- return svc.repos.Users.GetOneId(ctx, ID)
+ return svc.repos.Users.GetOneID(ctx, ID)
}
// ============================================================================
// User Authentication
-func (svc *UserService) createToken(ctx context.Context, userId uuid.UUID) (UserAuthTokenDetail, error) {
- newToken := hasher.GenerateToken()
+func (svc *UserService) createSessionToken(ctx context.Context, userID uuid.UUID, extendedSession bool) (UserAuthTokenDetail, error) {
+ attachmentToken := hasher.GenerateToken()
- created, err := svc.repos.AuthTokens.CreateToken(ctx, repo.UserAuthTokenCreate{
- UserID: userId,
- TokenHash: newToken.Hash,
- ExpiresAt: time.Now().Add(oneWeek),
- })
+ expiresAt := time.Now().Add(oneWeek)
+ if extendedSession {
+ expiresAt = time.Now().Add(oneWeek * 4)
+ }
- return UserAuthTokenDetail{Raw: newToken.Raw, ExpiresAt: created.ExpiresAt}, err
+ attachmentData := repo.UserAuthTokenCreate{
+ UserID: userID,
+ TokenHash: attachmentToken.Hash,
+ ExpiresAt: expiresAt,
+ }
+
+ _, err := svc.repos.AuthTokens.CreateToken(ctx, attachmentData, authroles.RoleAttachments)
+ if err != nil {
+ return UserAuthTokenDetail{}, err
+ }
+
+ userToken := hasher.GenerateToken()
+ data := repo.UserAuthTokenCreate{
+ UserID: userID,
+ TokenHash: userToken.Hash,
+ ExpiresAt: expiresAt,
+ }
+
+ created, err := svc.repos.AuthTokens.CreateToken(ctx, data, authroles.RoleUser)
+ if err != nil {
+ return UserAuthTokenDetail{}, err
+ }
+
+ return UserAuthTokenDetail{
+ Raw: userToken.Raw,
+ ExpiresAt: created.ExpiresAt,
+ AttachmentToken: attachmentToken.Raw,
+ }, nil
}
-func (svc *UserService) Login(ctx context.Context, username, password string) (UserAuthTokenDetail, error) {
+func (svc *UserService) Login(ctx context.Context, username, password string, extendedSession bool) (UserAuthTokenDetail, error) {
usr, err := svc.repos.Users.GetOneEmail(ctx, username)
-
if err != nil {
// SECURITY: Perform hash to ensure response times are the same
hasher.CheckPasswordHash("not-a-real-password", "not-a-real-password")
@@ -156,7 +194,7 @@ func (svc *UserService) Login(ctx context.Context, username, password string) (U
return UserAuthTokenDetail{}, ErrorInvalidLogin
}
- return svc.createToken(ctx, usr.ID)
+ return svc.createSessionToken(ctx, usr.ID, extendedSession)
}
func (svc *UserService) Logout(ctx context.Context, token string) error {
@@ -169,14 +207,11 @@ func (svc *UserService) RenewToken(ctx context.Context, token string) (UserAuthT
hash := hasher.HashToken(token)
dbToken, err := svc.repos.AuthTokens.GetUserFromToken(ctx, hash)
-
if err != nil {
return UserAuthTokenDetail{}, ErrorInvalidToken
}
- newToken, _ := svc.createToken(ctx, dbToken.ID)
-
- return newToken, nil
+ return svc.createSessionToken(ctx, dbToken.ID, false)
}
// DeleteSelf deletes the user that is currently logged based of the provided UUID
@@ -187,7 +222,7 @@ func (svc *UserService) DeleteSelf(ctx context.Context, ID uuid.UUID) error {
}
func (svc *UserService) ChangePassword(ctx Context, current string, new string) (ok bool) {
- usr, err := svc.repos.Users.GetOneId(ctx, ctx.UID)
+ usr, err := svc.repos.Users.GetOneID(ctx, ctx.UID)
if err != nil {
return false
}
diff --git a/backend/internal/data/ent/attachment.go b/backend/internal/data/ent/attachment.go
index e5f738a..bfb7de2 100644
--- a/backend/internal/data/ent/attachment.go
+++ b/backend/internal/data/ent/attachment.go
@@ -7,6 +7,7 @@ import (
"strings"
"time"
+ "entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
@@ -25,11 +26,14 @@ type Attachment struct {
UpdatedAt time.Time `json:"updated_at,omitempty"`
// Type holds the value of the "type" field.
Type attachment.Type `json:"type,omitempty"`
+ // Primary holds the value of the "primary" field.
+ Primary bool `json:"primary,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the AttachmentQuery when eager-loading is set.
Edges AttachmentEdges `json:"edges"`
document_attachments *uuid.UUID
item_attachments *uuid.UUID
+ selectValues sql.SelectValues
}
// AttachmentEdges holds the relations/edges for other nodes in the graph.
@@ -74,6 +78,8 @@ func (*Attachment) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
+ case attachment.FieldPrimary:
+ values[i] = new(sql.NullBool)
case attachment.FieldType:
values[i] = new(sql.NullString)
case attachment.FieldCreatedAt, attachment.FieldUpdatedAt:
@@ -85,7 +91,7 @@ func (*Attachment) scanValues(columns []string) ([]any, error) {
case attachment.ForeignKeys[1]: // item_attachments
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
- return nil, fmt.Errorf("unexpected column %q for type Attachment", columns[i])
+ values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -123,6 +129,12 @@ func (a *Attachment) assignValues(columns []string, values []any) error {
} else if value.Valid {
a.Type = attachment.Type(value.String)
}
+ case attachment.FieldPrimary:
+ if value, ok := values[i].(*sql.NullBool); !ok {
+ return fmt.Errorf("unexpected type %T for field primary", values[i])
+ } else if value.Valid {
+ a.Primary = value.Bool
+ }
case attachment.ForeignKeys[0]:
if value, ok := values[i].(*sql.NullScanner); !ok {
return fmt.Errorf("unexpected type %T for field document_attachments", values[i])
@@ -137,26 +149,34 @@ func (a *Attachment) assignValues(columns []string, values []any) error {
a.item_attachments = new(uuid.UUID)
*a.item_attachments = *value.S.(*uuid.UUID)
}
+ default:
+ a.selectValues.Set(columns[i], values[i])
}
}
return nil
}
+// Value returns the ent.Value that was dynamically selected and assigned to the Attachment.
+// This includes values selected through modifiers, order, etc.
+func (a *Attachment) Value(name string) (ent.Value, error) {
+ return a.selectValues.Get(name)
+}
+
// QueryItem queries the "item" edge of the Attachment entity.
func (a *Attachment) QueryItem() *ItemQuery {
- return (&AttachmentClient{config: a.config}).QueryItem(a)
+ return NewAttachmentClient(a.config).QueryItem(a)
}
// QueryDocument queries the "document" edge of the Attachment entity.
func (a *Attachment) QueryDocument() *DocumentQuery {
- return (&AttachmentClient{config: a.config}).QueryDocument(a)
+ return NewAttachmentClient(a.config).QueryDocument(a)
}
// Update returns a builder for updating this Attachment.
// Note that you need to call Attachment.Unwrap() before calling this method if this Attachment
// was returned from a transaction, and the transaction was committed or rolled back.
func (a *Attachment) Update() *AttachmentUpdateOne {
- return (&AttachmentClient{config: a.config}).UpdateOne(a)
+ return NewAttachmentClient(a.config).UpdateOne(a)
}
// Unwrap unwraps the Attachment entity that was returned from a transaction after it was closed,
@@ -183,15 +203,12 @@ func (a *Attachment) String() string {
builder.WriteString(", ")
builder.WriteString("type=")
builder.WriteString(fmt.Sprintf("%v", a.Type))
+ builder.WriteString(", ")
+ builder.WriteString("primary=")
+ builder.WriteString(fmt.Sprintf("%v", a.Primary))
builder.WriteByte(')')
return builder.String()
}
// Attachments is a parsable slice of Attachment.
type Attachments []*Attachment
-
-func (a Attachments) config(cfg config) {
- for _i := range a {
- a[_i].config = cfg
- }
-}
diff --git a/backend/internal/data/ent/attachment/attachment.go b/backend/internal/data/ent/attachment/attachment.go
index f7aef63..4bbac72 100644
--- a/backend/internal/data/ent/attachment/attachment.go
+++ b/backend/internal/data/ent/attachment/attachment.go
@@ -6,6 +6,8 @@ import (
"fmt"
"time"
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -20,6 +22,8 @@ const (
FieldUpdatedAt = "updated_at"
// FieldType holds the string denoting the type field in the database.
FieldType = "type"
+ // FieldPrimary holds the string denoting the primary field in the database.
+ FieldPrimary = "primary"
// EdgeItem holds the string denoting the item edge name in mutations.
EdgeItem = "item"
// EdgeDocument holds the string denoting the document edge name in mutations.
@@ -48,6 +52,7 @@ var Columns = []string{
FieldCreatedAt,
FieldUpdatedAt,
FieldType,
+ FieldPrimary,
}
// ForeignKeys holds the SQL foreign-keys that are owned by the "attachments"
@@ -79,6 +84,8 @@ var (
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
+ // DefaultPrimary holds the default value on creation for the "primary" field.
+ DefaultPrimary bool
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)
@@ -111,3 +118,59 @@ func TypeValidator(_type Type) error {
return fmt.Errorf("attachment: invalid enum value for type field: %q", _type)
}
}
+
+// OrderOption defines the ordering options for the Attachment queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByType orders the results by the type field.
+func ByType(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldType, opts...).ToFunc()
+}
+
+// ByPrimary orders the results by the primary field.
+func ByPrimary(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldPrimary, opts...).ToFunc()
+}
+
+// ByItemField orders the results by item field.
+func ByItemField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newItemStep(), sql.OrderByField(field, opts...))
+ }
+}
+
+// ByDocumentField orders the results by document field.
+func ByDocumentField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newDocumentStep(), sql.OrderByField(field, opts...))
+ }
+}
+func newItemStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(ItemInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
+ )
+}
+func newDocumentStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(DocumentInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn),
+ )
+}
diff --git a/backend/internal/data/ent/attachment/where.go b/backend/internal/data/ent/attachment/where.go
index e2adb4f..f6950f3 100644
--- a/backend/internal/data/ent/attachment/where.go
+++ b/backend/internal/data/ent/attachment/where.go
@@ -13,251 +13,172 @@ import (
// ID filters vertices based on their ID field.
func ID(id uuid.UUID) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.Attachment(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id uuid.UUID) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.Attachment(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id uuid.UUID) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldID), id))
- })
+ return predicate.Attachment(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...uuid.UUID) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.In(s.C(FieldID), v...))
- })
+ return predicate.Attachment(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...uuid.UUID) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.NotIn(s.C(FieldID), v...))
- })
+ return predicate.Attachment(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id uuid.UUID) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldID), id))
- })
+ return predicate.Attachment(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id uuid.UUID) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldID), id))
- })
+ return predicate.Attachment(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id uuid.UUID) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldID), id))
- })
+ return predicate.Attachment(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id uuid.UUID) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldID), id))
- })
+ return predicate.Attachment(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Attachment(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Attachment(sql.FieldEQ(FieldUpdatedAt, v))
+}
+
+// Primary applies equality check predicate on the "primary" field. It's identical to PrimaryEQ.
+func Primary(v bool) predicate.Attachment {
+ return predicate.Attachment(sql.FieldEQ(FieldPrimary, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Attachment(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Attachment(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.Attachment {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldCreatedAt), v...))
- })
+ return predicate.Attachment(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.Attachment {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
- })
+ return predicate.Attachment(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldCreatedAt), v))
- })
+ return predicate.Attachment(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.Attachment(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldCreatedAt), v))
- })
+ return predicate.Attachment(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.Attachment(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Attachment(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Attachment(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.Attachment {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.Attachment(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.Attachment {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.Attachment(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Attachment(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Attachment(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Attachment(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Attachment(sql.FieldLTE(FieldUpdatedAt, v))
}
// TypeEQ applies the EQ predicate on the "type" field.
func TypeEQ(v Type) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldType), v))
- })
+ return predicate.Attachment(sql.FieldEQ(FieldType, v))
}
// TypeNEQ applies the NEQ predicate on the "type" field.
func TypeNEQ(v Type) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldType), v))
- })
+ return predicate.Attachment(sql.FieldNEQ(FieldType, v))
}
// TypeIn applies the In predicate on the "type" field.
func TypeIn(vs ...Type) predicate.Attachment {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldType), v...))
- })
+ return predicate.Attachment(sql.FieldIn(FieldType, vs...))
}
// TypeNotIn applies the NotIn predicate on the "type" field.
func TypeNotIn(vs ...Type) predicate.Attachment {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldType), v...))
- })
+ return predicate.Attachment(sql.FieldNotIn(FieldType, vs...))
+}
+
+// PrimaryEQ applies the EQ predicate on the "primary" field.
+func PrimaryEQ(v bool) predicate.Attachment {
+ return predicate.Attachment(sql.FieldEQ(FieldPrimary, v))
+}
+
+// PrimaryNEQ applies the NEQ predicate on the "primary" field.
+func PrimaryNEQ(v bool) predicate.Attachment {
+ return predicate.Attachment(sql.FieldNEQ(FieldPrimary, v))
}
// HasItem applies the HasEdge predicate on the "item" edge.
@@ -265,7 +186,6 @@ func HasItem() predicate.Attachment {
return predicate.Attachment(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(ItemTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -275,11 +195,7 @@ func HasItem() predicate.Attachment {
// HasItemWith applies the HasEdge predicate on the "item" edge with a given conditions (other predicates).
func HasItemWith(preds ...predicate.Item) predicate.Attachment {
return predicate.Attachment(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(ItemInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
- )
+ step := newItemStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -293,7 +209,6 @@ func HasDocument() predicate.Attachment {
return predicate.Attachment(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(DocumentTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -303,11 +218,7 @@ func HasDocument() predicate.Attachment {
// HasDocumentWith applies the HasEdge predicate on the "document" edge with a given conditions (other predicates).
func HasDocumentWith(preds ...predicate.Document) predicate.Attachment {
return predicate.Attachment(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(DocumentInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn),
- )
+ step := newDocumentStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -318,32 +229,15 @@ func HasDocumentWith(preds ...predicate.Document) predicate.Attachment {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Attachment) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for _, p := range predicates {
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.Attachment(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Attachment) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for i, p := range predicates {
- if i > 0 {
- s1.Or()
- }
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.Attachment(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Attachment) predicate.Attachment {
- return predicate.Attachment(func(s *sql.Selector) {
- p(s.Not())
- })
+ return predicate.Attachment(sql.NotPredicates(p))
}
diff --git a/backend/internal/data/ent/attachment_create.go b/backend/internal/data/ent/attachment_create.go
index 8725984..d1a0b5b 100644
--- a/backend/internal/data/ent/attachment_create.go
+++ b/backend/internal/data/ent/attachment_create.go
@@ -65,6 +65,20 @@ func (ac *AttachmentCreate) SetNillableType(a *attachment.Type) *AttachmentCreat
return ac
}
+// SetPrimary sets the "primary" field.
+func (ac *AttachmentCreate) SetPrimary(b bool) *AttachmentCreate {
+ ac.mutation.SetPrimary(b)
+ return ac
+}
+
+// SetNillablePrimary sets the "primary" field if the given value is not nil.
+func (ac *AttachmentCreate) SetNillablePrimary(b *bool) *AttachmentCreate {
+ if b != nil {
+ ac.SetPrimary(*b)
+ }
+ return ac
+}
+
// SetID sets the "id" field.
func (ac *AttachmentCreate) SetID(u uuid.UUID) *AttachmentCreate {
ac.mutation.SetID(u)
@@ -108,50 +122,8 @@ func (ac *AttachmentCreate) Mutation() *AttachmentMutation {
// Save creates the Attachment in the database.
func (ac *AttachmentCreate) Save(ctx context.Context) (*Attachment, error) {
- var (
- err error
- node *Attachment
- )
ac.defaults()
- if len(ac.hooks) == 0 {
- if err = ac.check(); err != nil {
- return nil, err
- }
- node, err = ac.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*AttachmentMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = ac.check(); err != nil {
- return nil, err
- }
- ac.mutation = mutation
- if node, err = ac.sqlSave(ctx); err != nil {
- return nil, err
- }
- mutation.id = &node.ID
- mutation.done = true
- return node, err
- })
- for i := len(ac.hooks) - 1; i >= 0; i-- {
- if ac.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = ac.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, ac.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*Attachment)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from AttachmentMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, ac.sqlSave, ac.mutation, ac.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -190,6 +162,10 @@ func (ac *AttachmentCreate) defaults() {
v := attachment.DefaultType
ac.mutation.SetType(v)
}
+ if _, ok := ac.mutation.Primary(); !ok {
+ v := attachment.DefaultPrimary
+ ac.mutation.SetPrimary(v)
+ }
if _, ok := ac.mutation.ID(); !ok {
v := attachment.DefaultID()
ac.mutation.SetID(v)
@@ -212,6 +188,9 @@ func (ac *AttachmentCreate) check() error {
return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Attachment.type": %w`, err)}
}
}
+ if _, ok := ac.mutation.Primary(); !ok {
+ return &ValidationError{Name: "primary", err: errors.New(`ent: missing required field "Attachment.primary"`)}
+ }
if _, ok := ac.mutation.ItemID(); !ok {
return &ValidationError{Name: "item", err: errors.New(`ent: missing required edge "Attachment.item"`)}
}
@@ -222,6 +201,9 @@ func (ac *AttachmentCreate) check() error {
}
func (ac *AttachmentCreate) sqlSave(ctx context.Context) (*Attachment, error) {
+ if err := ac.check(); err != nil {
+ return nil, err
+ }
_node, _spec := ac.createSpec()
if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@@ -236,48 +218,36 @@ func (ac *AttachmentCreate) sqlSave(ctx context.Context) (*Attachment, error) {
return nil, err
}
}
+ ac.mutation.id = &_node.ID
+ ac.mutation.done = true
return _node, nil
}
func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) {
var (
_node = &Attachment{config: ac.config}
- _spec = &sqlgraph.CreateSpec{
- Table: attachment.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
- }
+ _spec = sqlgraph.NewCreateSpec(attachment.Table, sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID))
)
if id, ok := ac.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = &id
}
if value, ok := ac.mutation.CreatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: attachment.FieldCreatedAt,
- })
+ _spec.SetField(attachment.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := ac.mutation.UpdatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: attachment.FieldUpdatedAt,
- })
+ _spec.SetField(attachment.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := ac.mutation.GetType(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeEnum,
- Value: value,
- Column: attachment.FieldType,
- })
+ _spec.SetField(attachment.FieldType, field.TypeEnum, value)
_node.Type = value
}
+ if value, ok := ac.mutation.Primary(); ok {
+ _spec.SetField(attachment.FieldPrimary, field.TypeBool, value)
+ _node.Primary = value
+ }
if nodes := ac.mutation.ItemIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
@@ -286,10 +256,7 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) {
Columns: []string{attachment.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -306,10 +273,7 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) {
Columns: []string{attachment.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -324,11 +288,15 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) {
// AttachmentCreateBulk is the builder for creating many Attachment entities in bulk.
type AttachmentCreateBulk struct {
config
+ err error
builders []*AttachmentCreate
}
// Save creates the Attachment entities in the database.
func (acb *AttachmentCreateBulk) Save(ctx context.Context) ([]*Attachment, error) {
+ if acb.err != nil {
+ return nil, acb.err
+ }
specs := make([]*sqlgraph.CreateSpec, len(acb.builders))
nodes := make([]*Attachment, len(acb.builders))
mutators := make([]Mutator, len(acb.builders))
@@ -345,8 +313,8 @@ func (acb *AttachmentCreateBulk) Save(ctx context.Context) ([]*Attachment, error
return nil, err
}
builder.mutation = mutation
- nodes[i], specs[i] = builder.createSpec()
var err error
+ nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation)
} else {
diff --git a/backend/internal/data/ent/attachment_delete.go b/backend/internal/data/ent/attachment_delete.go
index eeeca20..1be608a 100644
--- a/backend/internal/data/ent/attachment_delete.go
+++ b/backend/internal/data/ent/attachment_delete.go
@@ -4,7 +4,6 @@ package ent
import (
"context"
- "fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@@ -28,34 +27,7 @@ func (ad *AttachmentDelete) Where(ps ...predicate.Attachment) *AttachmentDelete
// Exec executes the deletion query and returns how many vertices were deleted.
func (ad *AttachmentDelete) Exec(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
- if len(ad.hooks) == 0 {
- affected, err = ad.sqlExec(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*AttachmentMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- ad.mutation = mutation
- affected, err = ad.sqlExec(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(ad.hooks) - 1; i >= 0; i-- {
- if ad.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = ad.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, ad.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, ad.sqlExec, ad.mutation, ad.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@@ -68,15 +40,7 @@ func (ad *AttachmentDelete) ExecX(ctx context.Context) int {
}
func (ad *AttachmentDelete) sqlExec(ctx context.Context) (int, error) {
- _spec := &sqlgraph.DeleteSpec{
- Node: &sqlgraph.NodeSpec{
- Table: attachment.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewDeleteSpec(attachment.Table, sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID))
if ps := ad.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -88,6 +52,7 @@ func (ad *AttachmentDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
+ ad.mutation.done = true
return affected, err
}
@@ -96,6 +61,12 @@ type AttachmentDeleteOne struct {
ad *AttachmentDelete
}
+// Where appends a list predicates to the AttachmentDelete builder.
+func (ado *AttachmentDeleteOne) Where(ps ...predicate.Attachment) *AttachmentDeleteOne {
+ ado.ad.mutation.Where(ps...)
+ return ado
+}
+
// Exec executes the deletion query.
func (ado *AttachmentDeleteOne) Exec(ctx context.Context) error {
n, err := ado.ad.Exec(ctx)
@@ -111,5 +82,7 @@ func (ado *AttachmentDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (ado *AttachmentDeleteOne) ExecX(ctx context.Context) {
- ado.ad.ExecX(ctx)
+ if err := ado.Exec(ctx); err != nil {
+ panic(err)
+ }
}
diff --git a/backend/internal/data/ent/attachment_query.go b/backend/internal/data/ent/attachment_query.go
index 2262c0b..976e436 100644
--- a/backend/internal/data/ent/attachment_query.go
+++ b/backend/internal/data/ent/attachment_query.go
@@ -20,11 +20,9 @@ import (
// AttachmentQuery is the builder for querying Attachment entities.
type AttachmentQuery struct {
config
- limit *int
- offset *int
- unique *bool
- order []OrderFunc
- fields []string
+ ctx *QueryContext
+ order []attachment.OrderOption
+ inters []Interceptor
predicates []predicate.Attachment
withItem *ItemQuery
withDocument *DocumentQuery
@@ -40,34 +38,34 @@ func (aq *AttachmentQuery) Where(ps ...predicate.Attachment) *AttachmentQuery {
return aq
}
-// Limit adds a limit step to the query.
+// Limit the number of records to be returned by this query.
func (aq *AttachmentQuery) Limit(limit int) *AttachmentQuery {
- aq.limit = &limit
+ aq.ctx.Limit = &limit
return aq
}
-// Offset adds an offset step to the query.
+// Offset to start from.
func (aq *AttachmentQuery) Offset(offset int) *AttachmentQuery {
- aq.offset = &offset
+ aq.ctx.Offset = &offset
return aq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (aq *AttachmentQuery) Unique(unique bool) *AttachmentQuery {
- aq.unique = &unique
+ aq.ctx.Unique = &unique
return aq
}
-// Order adds an order step to the query.
-func (aq *AttachmentQuery) Order(o ...OrderFunc) *AttachmentQuery {
+// Order specifies how the records should be ordered.
+func (aq *AttachmentQuery) Order(o ...attachment.OrderOption) *AttachmentQuery {
aq.order = append(aq.order, o...)
return aq
}
// QueryItem chains the current query on the "item" edge.
func (aq *AttachmentQuery) QueryItem() *ItemQuery {
- query := &ItemQuery{config: aq.config}
+ query := (&ItemClient{config: aq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
@@ -89,7 +87,7 @@ func (aq *AttachmentQuery) QueryItem() *ItemQuery {
// QueryDocument chains the current query on the "document" edge.
func (aq *AttachmentQuery) QueryDocument() *DocumentQuery {
- query := &DocumentQuery{config: aq.config}
+ query := (&DocumentClient{config: aq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
@@ -112,7 +110,7 @@ func (aq *AttachmentQuery) QueryDocument() *DocumentQuery {
// First returns the first Attachment entity from the query.
// Returns a *NotFoundError when no Attachment was found.
func (aq *AttachmentQuery) First(ctx context.Context) (*Attachment, error) {
- nodes, err := aq.Limit(1).All(ctx)
+ nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, "First"))
if err != nil {
return nil, err
}
@@ -135,7 +133,7 @@ func (aq *AttachmentQuery) FirstX(ctx context.Context) *Attachment {
// Returns a *NotFoundError when no Attachment ID was found.
func (aq *AttachmentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = aq.Limit(1).IDs(ctx); err != nil {
+ if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@@ -158,7 +156,7 @@ func (aq *AttachmentQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one Attachment entity is found.
// Returns a *NotFoundError when no Attachment entities are found.
func (aq *AttachmentQuery) Only(ctx context.Context) (*Attachment, error) {
- nodes, err := aq.Limit(2).All(ctx)
+ nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, "Only"))
if err != nil {
return nil, err
}
@@ -186,7 +184,7 @@ func (aq *AttachmentQuery) OnlyX(ctx context.Context) *Attachment {
// Returns a *NotFoundError when no entities are found.
func (aq *AttachmentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = aq.Limit(2).IDs(ctx); err != nil {
+ if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@@ -211,10 +209,12 @@ func (aq *AttachmentQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Attachments.
func (aq *AttachmentQuery) All(ctx context.Context) ([]*Attachment, error) {
+ ctx = setContextOp(ctx, aq.ctx, "All")
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
}
- return aq.sqlAll(ctx)
+ qr := querierAll[[]*Attachment, *AttachmentQuery]()
+ return withInterceptors[[]*Attachment](ctx, aq, qr, aq.inters)
}
// AllX is like All, but panics if an error occurs.
@@ -227,9 +227,12 @@ func (aq *AttachmentQuery) AllX(ctx context.Context) []*Attachment {
}
// IDs executes the query and returns a list of Attachment IDs.
-func (aq *AttachmentQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
- var ids []uuid.UUID
- if err := aq.Select(attachment.FieldID).Scan(ctx, &ids); err != nil {
+func (aq *AttachmentQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
+ if aq.ctx.Unique == nil && aq.path != nil {
+ aq.Unique(true)
+ }
+ ctx = setContextOp(ctx, aq.ctx, "IDs")
+ if err = aq.Select(attachment.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@@ -246,10 +249,11 @@ func (aq *AttachmentQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query.
func (aq *AttachmentQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, aq.ctx, "Count")
if err := aq.prepareQuery(ctx); err != nil {
return 0, err
}
- return aq.sqlCount(ctx)
+ return withInterceptors[int](ctx, aq, querierCount[*AttachmentQuery](), aq.inters)
}
// CountX is like Count, but panics if an error occurs.
@@ -263,10 +267,15 @@ func (aq *AttachmentQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (aq *AttachmentQuery) Exist(ctx context.Context) (bool, error) {
- if err := aq.prepareQuery(ctx); err != nil {
- return false, err
+ ctx = setContextOp(ctx, aq.ctx, "Exist")
+ switch _, err := aq.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
}
- return aq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@@ -286,23 +295,22 @@ func (aq *AttachmentQuery) Clone() *AttachmentQuery {
}
return &AttachmentQuery{
config: aq.config,
- limit: aq.limit,
- offset: aq.offset,
- order: append([]OrderFunc{}, aq.order...),
+ ctx: aq.ctx.Clone(),
+ order: append([]attachment.OrderOption{}, aq.order...),
+ inters: append([]Interceptor{}, aq.inters...),
predicates: append([]predicate.Attachment{}, aq.predicates...),
withItem: aq.withItem.Clone(),
withDocument: aq.withDocument.Clone(),
// clone intermediate query.
- sql: aq.sql.Clone(),
- path: aq.path,
- unique: aq.unique,
+ sql: aq.sql.Clone(),
+ path: aq.path,
}
}
// WithItem tells the query-builder to eager-load the nodes that are connected to
// the "item" edge. The optional arguments are used to configure the query builder of the edge.
func (aq *AttachmentQuery) WithItem(opts ...func(*ItemQuery)) *AttachmentQuery {
- query := &ItemQuery{config: aq.config}
+ query := (&ItemClient{config: aq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -313,7 +321,7 @@ func (aq *AttachmentQuery) WithItem(opts ...func(*ItemQuery)) *AttachmentQuery {
// WithDocument tells the query-builder to eager-load the nodes that are connected to
// the "document" edge. The optional arguments are used to configure the query builder of the edge.
func (aq *AttachmentQuery) WithDocument(opts ...func(*DocumentQuery)) *AttachmentQuery {
- query := &DocumentQuery{config: aq.config}
+ query := (&DocumentClient{config: aq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -336,16 +344,11 @@ func (aq *AttachmentQuery) WithDocument(opts ...func(*DocumentQuery)) *Attachmen
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (aq *AttachmentQuery) GroupBy(field string, fields ...string) *AttachmentGroupBy {
- grbuild := &AttachmentGroupBy{config: aq.config}
- grbuild.fields = append([]string{field}, fields...)
- grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
- if err := aq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- return aq.sqlQuery(ctx), nil
- }
+ aq.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &AttachmentGroupBy{build: aq}
+ grbuild.flds = &aq.ctx.Fields
grbuild.label = attachment.Label
- grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
+ grbuild.scan = grbuild.Scan
return grbuild
}
@@ -362,15 +365,30 @@ func (aq *AttachmentQuery) GroupBy(field string, fields ...string) *AttachmentGr
// Select(attachment.FieldCreatedAt).
// Scan(ctx, &v)
func (aq *AttachmentQuery) Select(fields ...string) *AttachmentSelect {
- aq.fields = append(aq.fields, fields...)
- selbuild := &AttachmentSelect{AttachmentQuery: aq}
- selbuild.label = attachment.Label
- selbuild.flds, selbuild.scan = &aq.fields, selbuild.Scan
- return selbuild
+ aq.ctx.Fields = append(aq.ctx.Fields, fields...)
+ sbuild := &AttachmentSelect{AttachmentQuery: aq}
+ sbuild.label = attachment.Label
+ sbuild.flds, sbuild.scan = &aq.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a AttachmentSelect configured with the given aggregations.
+func (aq *AttachmentQuery) Aggregate(fns ...AggregateFunc) *AttachmentSelect {
+ return aq.Select().Aggregate(fns...)
}
func (aq *AttachmentQuery) prepareQuery(ctx context.Context) error {
- for _, f := range aq.fields {
+ for _, inter := range aq.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, aq); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range aq.ctx.Fields {
if !attachment.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@@ -447,6 +465,9 @@ func (aq *AttachmentQuery) loadItem(ctx context.Context, query *ItemQuery, nodes
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
+ if len(ids) == 0 {
+ return nil
+ }
query.Where(item.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@@ -476,6 +497,9 @@ func (aq *AttachmentQuery) loadDocument(ctx context.Context, query *DocumentQuer
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
+ if len(ids) == 0 {
+ return nil
+ }
query.Where(document.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@@ -495,41 +519,22 @@ func (aq *AttachmentQuery) loadDocument(ctx context.Context, query *DocumentQuer
func (aq *AttachmentQuery) sqlCount(ctx context.Context) (int, error) {
_spec := aq.querySpec()
- _spec.Node.Columns = aq.fields
- if len(aq.fields) > 0 {
- _spec.Unique = aq.unique != nil && *aq.unique
+ _spec.Node.Columns = aq.ctx.Fields
+ if len(aq.ctx.Fields) > 0 {
+ _spec.Unique = aq.ctx.Unique != nil && *aq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, aq.driver, _spec)
}
-func (aq *AttachmentQuery) sqlExist(ctx context.Context) (bool, error) {
- switch _, err := aq.FirstID(ctx); {
- case IsNotFound(err):
- return false, nil
- case err != nil:
- return false, fmt.Errorf("ent: check existence: %w", err)
- default:
- return true, nil
- }
-}
-
func (aq *AttachmentQuery) querySpec() *sqlgraph.QuerySpec {
- _spec := &sqlgraph.QuerySpec{
- Node: &sqlgraph.NodeSpec{
- Table: attachment.Table,
- Columns: attachment.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
- },
- From: aq.sql,
- Unique: true,
- }
- if unique := aq.unique; unique != nil {
+ _spec := sqlgraph.NewQuerySpec(attachment.Table, attachment.Columns, sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID))
+ _spec.From = aq.sql
+ if unique := aq.ctx.Unique; unique != nil {
_spec.Unique = *unique
+ } else if aq.path != nil {
+ _spec.Unique = true
}
- if fields := aq.fields; len(fields) > 0 {
+ if fields := aq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, attachment.FieldID)
for i := range fields {
@@ -545,10 +550,10 @@ func (aq *AttachmentQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
- if limit := aq.limit; limit != nil {
+ if limit := aq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
- if offset := aq.offset; offset != nil {
+ if offset := aq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := aq.order; len(ps) > 0 {
@@ -564,7 +569,7 @@ func (aq *AttachmentQuery) querySpec() *sqlgraph.QuerySpec {
func (aq *AttachmentQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(aq.driver.Dialect())
t1 := builder.Table(attachment.Table)
- columns := aq.fields
+ columns := aq.ctx.Fields
if len(columns) == 0 {
columns = attachment.Columns
}
@@ -573,7 +578,7 @@ func (aq *AttachmentQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = aq.sql
selector.Select(selector.Columns(columns...)...)
}
- if aq.unique != nil && *aq.unique {
+ if aq.ctx.Unique != nil && *aq.ctx.Unique {
selector.Distinct()
}
for _, p := range aq.predicates {
@@ -582,12 +587,12 @@ func (aq *AttachmentQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range aq.order {
p(selector)
}
- if offset := aq.offset; offset != nil {
+ if offset := aq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
- if limit := aq.limit; limit != nil {
+ if limit := aq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@@ -595,13 +600,8 @@ func (aq *AttachmentQuery) sqlQuery(ctx context.Context) *sql.Selector {
// AttachmentGroupBy is the group-by builder for Attachment entities.
type AttachmentGroupBy struct {
- config
selector
- fields []string
- fns []AggregateFunc
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
- path func(context.Context) (*sql.Selector, error)
+ build *AttachmentQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@@ -610,74 +610,77 @@ func (agb *AttachmentGroupBy) Aggregate(fns ...AggregateFunc) *AttachmentGroupBy
return agb
}
-// Scan applies the group-by query and scans the result into the given value.
+// Scan applies the selector query and scans the result into the given value.
func (agb *AttachmentGroupBy) Scan(ctx context.Context, v any) error {
- query, err := agb.path(ctx)
- if err != nil {
+ ctx = setContextOp(ctx, agb.build.ctx, "GroupBy")
+ if err := agb.build.prepareQuery(ctx); err != nil {
return err
}
- agb.sql = query
- return agb.sqlScan(ctx, v)
+ return scanWithInterceptors[*AttachmentQuery, *AttachmentGroupBy](ctx, agb.build, agb, agb.build.inters, v)
}
-func (agb *AttachmentGroupBy) sqlScan(ctx context.Context, v any) error {
- for _, f := range agb.fields {
- if !attachment.ValidColumn(f) {
- return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
- }
- }
- selector := agb.sqlQuery()
- if err := selector.Err(); err != nil {
- return err
- }
- rows := &sql.Rows{}
- query, args := selector.Query()
- if err := agb.driver.Query(ctx, query, args, rows); err != nil {
- return err
- }
- defer rows.Close()
- return sql.ScanSlice(rows, v)
-}
-
-func (agb *AttachmentGroupBy) sqlQuery() *sql.Selector {
- selector := agb.sql.Select()
+func (agb *AttachmentGroupBy) sqlScan(ctx context.Context, root *AttachmentQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(agb.fns))
for _, fn := range agb.fns {
aggregation = append(aggregation, fn(selector))
}
- // If no columns were selected in a custom aggregation function, the default
- // selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
- columns := make([]string, 0, len(agb.fields)+len(agb.fns))
- for _, f := range agb.fields {
+ columns := make([]string, 0, len(*agb.flds)+len(agb.fns))
+ for _, f := range *agb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
- return selector.GroupBy(selector.Columns(agb.fields...)...)
+ selector.GroupBy(selector.Columns(*agb.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := agb.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
}
// AttachmentSelect is the builder for selecting fields of Attachment entities.
type AttachmentSelect struct {
*AttachmentQuery
selector
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (as *AttachmentSelect) Aggregate(fns ...AggregateFunc) *AttachmentSelect {
+ as.fns = append(as.fns, fns...)
+ return as
}
// Scan applies the selector query and scans the result into the given value.
func (as *AttachmentSelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, as.ctx, "Select")
if err := as.prepareQuery(ctx); err != nil {
return err
}
- as.sql = as.AttachmentQuery.sqlQuery(ctx)
- return as.sqlScan(ctx, v)
+ return scanWithInterceptors[*AttachmentQuery, *AttachmentSelect](ctx, as.AttachmentQuery, as, as.inters, v)
}
-func (as *AttachmentSelect) sqlScan(ctx context.Context, v any) error {
+func (as *AttachmentSelect) sqlScan(ctx context.Context, root *AttachmentQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(as.fns))
+ for _, fn := range as.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*as.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
rows := &sql.Rows{}
- query, args := as.sql.Query()
+ query, args := selector.Query()
if err := as.driver.Query(ctx, query, args, rows); err != nil {
return err
}
diff --git a/backend/internal/data/ent/attachment_update.go b/backend/internal/data/ent/attachment_update.go
index fbaf485..bdf10a5 100644
--- a/backend/internal/data/ent/attachment_update.go
+++ b/backend/internal/data/ent/attachment_update.go
@@ -51,6 +51,20 @@ func (au *AttachmentUpdate) SetNillableType(a *attachment.Type) *AttachmentUpdat
return au
}
+// SetPrimary sets the "primary" field.
+func (au *AttachmentUpdate) SetPrimary(b bool) *AttachmentUpdate {
+ au.mutation.SetPrimary(b)
+ return au
+}
+
+// SetNillablePrimary sets the "primary" field if the given value is not nil.
+func (au *AttachmentUpdate) SetNillablePrimary(b *bool) *AttachmentUpdate {
+ if b != nil {
+ au.SetPrimary(*b)
+ }
+ return au
+}
+
// SetItemID sets the "item" edge to the Item entity by ID.
func (au *AttachmentUpdate) SetItemID(id uuid.UUID) *AttachmentUpdate {
au.mutation.SetItemID(id)
@@ -92,41 +106,8 @@ func (au *AttachmentUpdate) ClearDocument() *AttachmentUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (au *AttachmentUpdate) Save(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
au.defaults()
- if len(au.hooks) == 0 {
- if err = au.check(); err != nil {
- return 0, err
- }
- affected, err = au.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*AttachmentMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = au.check(); err != nil {
- return 0, err
- }
- au.mutation = mutation
- affected, err = au.sqlSave(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(au.hooks) - 1; i >= 0; i-- {
- if au.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = au.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, au.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, au.sqlSave, au.mutation, au.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -176,16 +157,10 @@ func (au *AttachmentUpdate) check() error {
}
func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: attachment.Table,
- Columns: attachment.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
- },
+ if err := au.check(); err != nil {
+ return n, err
}
+ _spec := sqlgraph.NewUpdateSpec(attachment.Table, attachment.Columns, sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID))
if ps := au.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -194,18 +169,13 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := au.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: attachment.FieldUpdatedAt,
- })
+ _spec.SetField(attachment.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := au.mutation.GetType(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeEnum,
- Value: value,
- Column: attachment.FieldType,
- })
+ _spec.SetField(attachment.FieldType, field.TypeEnum, value)
+ }
+ if value, ok := au.mutation.Primary(); ok {
+ _spec.SetField(attachment.FieldPrimary, field.TypeBool, value)
}
if au.mutation.ItemCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -215,10 +185,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{attachment.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -231,10 +198,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{attachment.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -250,10 +214,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{attachment.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -266,10 +227,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{attachment.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -285,6 +243,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
+ au.mutation.done = true
return n, nil
}
@@ -316,6 +275,20 @@ func (auo *AttachmentUpdateOne) SetNillableType(a *attachment.Type) *AttachmentU
return auo
}
+// SetPrimary sets the "primary" field.
+func (auo *AttachmentUpdateOne) SetPrimary(b bool) *AttachmentUpdateOne {
+ auo.mutation.SetPrimary(b)
+ return auo
+}
+
+// SetNillablePrimary sets the "primary" field if the given value is not nil.
+func (auo *AttachmentUpdateOne) SetNillablePrimary(b *bool) *AttachmentUpdateOne {
+ if b != nil {
+ auo.SetPrimary(*b)
+ }
+ return auo
+}
+
// SetItemID sets the "item" edge to the Item entity by ID.
func (auo *AttachmentUpdateOne) SetItemID(id uuid.UUID) *AttachmentUpdateOne {
auo.mutation.SetItemID(id)
@@ -355,6 +328,12 @@ func (auo *AttachmentUpdateOne) ClearDocument() *AttachmentUpdateOne {
return auo
}
+// Where appends a list predicates to the AttachmentUpdate builder.
+func (auo *AttachmentUpdateOne) Where(ps ...predicate.Attachment) *AttachmentUpdateOne {
+ auo.mutation.Where(ps...)
+ return auo
+}
+
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (auo *AttachmentUpdateOne) Select(field string, fields ...string) *AttachmentUpdateOne {
@@ -364,47 +343,8 @@ func (auo *AttachmentUpdateOne) Select(field string, fields ...string) *Attachme
// Save executes the query and returns the updated Attachment entity.
func (auo *AttachmentUpdateOne) Save(ctx context.Context) (*Attachment, error) {
- var (
- err error
- node *Attachment
- )
auo.defaults()
- if len(auo.hooks) == 0 {
- if err = auo.check(); err != nil {
- return nil, err
- }
- node, err = auo.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*AttachmentMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = auo.check(); err != nil {
- return nil, err
- }
- auo.mutation = mutation
- node, err = auo.sqlSave(ctx)
- mutation.done = true
- return node, err
- })
- for i := len(auo.hooks) - 1; i >= 0; i-- {
- if auo.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = auo.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, auo.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*Attachment)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from AttachmentMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, auo.sqlSave, auo.mutation, auo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -454,16 +394,10 @@ func (auo *AttachmentUpdateOne) check() error {
}
func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: attachment.Table,
- Columns: attachment.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
- },
+ if err := auo.check(); err != nil {
+ return _node, err
}
+ _spec := sqlgraph.NewUpdateSpec(attachment.Table, attachment.Columns, sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID))
id, ok := auo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Attachment.id" for update`)}
@@ -489,18 +423,13 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
}
}
if value, ok := auo.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: attachment.FieldUpdatedAt,
- })
+ _spec.SetField(attachment.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := auo.mutation.GetType(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeEnum,
- Value: value,
- Column: attachment.FieldType,
- })
+ _spec.SetField(attachment.FieldType, field.TypeEnum, value)
+ }
+ if value, ok := auo.mutation.Primary(); ok {
+ _spec.SetField(attachment.FieldPrimary, field.TypeBool, value)
}
if auo.mutation.ItemCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -510,10 +439,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
Columns: []string{attachment.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -526,10 +452,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
Columns: []string{attachment.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -545,10 +468,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
Columns: []string{attachment.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -561,10 +481,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
Columns: []string{attachment.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -583,5 +500,6 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
}
return nil, err
}
+ auo.mutation.done = true
return _node, nil
}
diff --git a/backend/internal/data/ent/authroles.go b/backend/internal/data/ent/authroles.go
new file mode 100644
index 0000000..4daa0f6
--- /dev/null
+++ b/backend/internal/data/ent/authroles.go
@@ -0,0 +1,145 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "fmt"
+ "strings"
+
+ "entgo.io/ent"
+ "entgo.io/ent/dialect/sql"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
+)
+
+// AuthRoles is the model entity for the AuthRoles schema.
+type AuthRoles struct {
+ config `json:"-"`
+ // ID of the ent.
+ ID int `json:"id,omitempty"`
+ // Role holds the value of the "role" field.
+ Role authroles.Role `json:"role,omitempty"`
+ // Edges holds the relations/edges for other nodes in the graph.
+ // The values are being populated by the AuthRolesQuery when eager-loading is set.
+ Edges AuthRolesEdges `json:"edges"`
+ auth_tokens_roles *uuid.UUID
+ selectValues sql.SelectValues
+}
+
+// AuthRolesEdges holds the relations/edges for other nodes in the graph.
+type AuthRolesEdges struct {
+ // Token holds the value of the token edge.
+ Token *AuthTokens `json:"token,omitempty"`
+ // loadedTypes holds the information for reporting if a
+ // type was loaded (or requested) in eager-loading or not.
+ loadedTypes [1]bool
+}
+
+// TokenOrErr returns the Token value or an error if the edge
+// was not loaded in eager-loading, or loaded but was not found.
+func (e AuthRolesEdges) TokenOrErr() (*AuthTokens, error) {
+ if e.loadedTypes[0] {
+ if e.Token == nil {
+ // Edge was loaded but was not found.
+ return nil, &NotFoundError{label: authtokens.Label}
+ }
+ return e.Token, nil
+ }
+ return nil, &NotLoadedError{edge: "token"}
+}
+
+// scanValues returns the types for scanning values from sql.Rows.
+func (*AuthRoles) scanValues(columns []string) ([]any, error) {
+ values := make([]any, len(columns))
+ for i := range columns {
+ switch columns[i] {
+ case authroles.FieldID:
+ values[i] = new(sql.NullInt64)
+ case authroles.FieldRole:
+ values[i] = new(sql.NullString)
+ case authroles.ForeignKeys[0]: // auth_tokens_roles
+ values[i] = &sql.NullScanner{S: new(uuid.UUID)}
+ default:
+ values[i] = new(sql.UnknownType)
+ }
+ }
+ return values, nil
+}
+
+// assignValues assigns the values that were returned from sql.Rows (after scanning)
+// to the AuthRoles fields.
+func (ar *AuthRoles) assignValues(columns []string, values []any) error {
+ if m, n := len(values), len(columns); m < n {
+ return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
+ }
+ for i := range columns {
+ switch columns[i] {
+ case authroles.FieldID:
+ value, ok := values[i].(*sql.NullInt64)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field id", value)
+ }
+ ar.ID = int(value.Int64)
+ case authroles.FieldRole:
+ if value, ok := values[i].(*sql.NullString); !ok {
+ return fmt.Errorf("unexpected type %T for field role", values[i])
+ } else if value.Valid {
+ ar.Role = authroles.Role(value.String)
+ }
+ case authroles.ForeignKeys[0]:
+ if value, ok := values[i].(*sql.NullScanner); !ok {
+ return fmt.Errorf("unexpected type %T for field auth_tokens_roles", values[i])
+ } else if value.Valid {
+ ar.auth_tokens_roles = new(uuid.UUID)
+ *ar.auth_tokens_roles = *value.S.(*uuid.UUID)
+ }
+ default:
+ ar.selectValues.Set(columns[i], values[i])
+ }
+ }
+ return nil
+}
+
+// Value returns the ent.Value that was dynamically selected and assigned to the AuthRoles.
+// This includes values selected through modifiers, order, etc.
+func (ar *AuthRoles) Value(name string) (ent.Value, error) {
+ return ar.selectValues.Get(name)
+}
+
+// QueryToken queries the "token" edge of the AuthRoles entity.
+func (ar *AuthRoles) QueryToken() *AuthTokensQuery {
+ return NewAuthRolesClient(ar.config).QueryToken(ar)
+}
+
+// Update returns a builder for updating this AuthRoles.
+// Note that you need to call AuthRoles.Unwrap() before calling this method if this AuthRoles
+// was returned from a transaction, and the transaction was committed or rolled back.
+func (ar *AuthRoles) Update() *AuthRolesUpdateOne {
+ return NewAuthRolesClient(ar.config).UpdateOne(ar)
+}
+
+// Unwrap unwraps the AuthRoles entity that was returned from a transaction after it was closed,
+// so that all future queries will be executed through the driver which created the transaction.
+func (ar *AuthRoles) Unwrap() *AuthRoles {
+ _tx, ok := ar.config.driver.(*txDriver)
+ if !ok {
+ panic("ent: AuthRoles is not a transactional entity")
+ }
+ ar.config.driver = _tx.drv
+ return ar
+}
+
+// String implements the fmt.Stringer.
+func (ar *AuthRoles) String() string {
+ var builder strings.Builder
+ builder.WriteString("AuthRoles(")
+ builder.WriteString(fmt.Sprintf("id=%v, ", ar.ID))
+ builder.WriteString("role=")
+ builder.WriteString(fmt.Sprintf("%v", ar.Role))
+ builder.WriteByte(')')
+ return builder.String()
+}
+
+// AuthRolesSlice is a parsable slice of AuthRoles.
+type AuthRolesSlice []*AuthRoles
diff --git a/backend/internal/data/ent/authroles/authroles.go b/backend/internal/data/ent/authroles/authroles.go
new file mode 100644
index 0000000..bb5e87a
--- /dev/null
+++ b/backend/internal/data/ent/authroles/authroles.go
@@ -0,0 +1,111 @@
+// Code generated by ent, DO NOT EDIT.
+
+package authroles
+
+import (
+ "fmt"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+)
+
+const (
+ // Label holds the string label denoting the authroles type in the database.
+ Label = "auth_roles"
+ // FieldID holds the string denoting the id field in the database.
+ FieldID = "id"
+ // FieldRole holds the string denoting the role field in the database.
+ FieldRole = "role"
+ // EdgeToken holds the string denoting the token edge name in mutations.
+ EdgeToken = "token"
+ // Table holds the table name of the authroles in the database.
+ Table = "auth_roles"
+ // TokenTable is the table that holds the token relation/edge.
+ TokenTable = "auth_roles"
+ // TokenInverseTable is the table name for the AuthTokens entity.
+ // It exists in this package in order to avoid circular dependency with the "authtokens" package.
+ TokenInverseTable = "auth_tokens"
+ // TokenColumn is the table column denoting the token relation/edge.
+ TokenColumn = "auth_tokens_roles"
+)
+
+// Columns holds all SQL columns for authroles fields.
+var Columns = []string{
+ FieldID,
+ FieldRole,
+}
+
+// ForeignKeys holds the SQL foreign-keys that are owned by the "auth_roles"
+// table and are not defined as standalone fields in the schema.
+var ForeignKeys = []string{
+ "auth_tokens_roles",
+}
+
+// ValidColumn reports if the column name is valid (part of the table columns).
+func ValidColumn(column string) bool {
+ for i := range Columns {
+ if column == Columns[i] {
+ return true
+ }
+ }
+ for i := range ForeignKeys {
+ if column == ForeignKeys[i] {
+ return true
+ }
+ }
+ return false
+}
+
+// Role defines the type for the "role" enum field.
+type Role string
+
+// RoleUser is the default value of the Role enum.
+const DefaultRole = RoleUser
+
+// Role values.
+const (
+ RoleAdmin Role = "admin"
+ RoleUser Role = "user"
+ RoleAttachments Role = "attachments"
+)
+
+func (r Role) String() string {
+ return string(r)
+}
+
+// RoleValidator is a validator for the "role" field enum values. It is called by the builders before save.
+func RoleValidator(r Role) error {
+ switch r {
+ case RoleAdmin, RoleUser, RoleAttachments:
+ return nil
+ default:
+ return fmt.Errorf("authroles: invalid enum value for role field: %q", r)
+ }
+}
+
+// OrderOption defines the ordering options for the AuthRoles queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByRole orders the results by the role field.
+func ByRole(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldRole, opts...).ToFunc()
+}
+
+// ByTokenField orders the results by token field.
+func ByTokenField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newTokenStep(), sql.OrderByField(field, opts...))
+ }
+}
+func newTokenStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(TokenInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2O, true, TokenTable, TokenColumn),
+ )
+}
diff --git a/backend/internal/data/ent/authroles/where.go b/backend/internal/data/ent/authroles/where.go
new file mode 100644
index 0000000..bb5b54a
--- /dev/null
+++ b/backend/internal/data/ent/authroles/where.go
@@ -0,0 +1,112 @@
+// Code generated by ent, DO NOT EDIT.
+
+package authroles
+
+import (
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
+)
+
+// ID filters vertices based on their ID field.
+func ID(id int) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.FieldEQ(FieldID, id))
+}
+
+// IDEQ applies the EQ predicate on the ID field.
+func IDEQ(id int) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.FieldEQ(FieldID, id))
+}
+
+// IDNEQ applies the NEQ predicate on the ID field.
+func IDNEQ(id int) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.FieldNEQ(FieldID, id))
+}
+
+// IDIn applies the In predicate on the ID field.
+func IDIn(ids ...int) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.FieldIn(FieldID, ids...))
+}
+
+// IDNotIn applies the NotIn predicate on the ID field.
+func IDNotIn(ids ...int) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.FieldNotIn(FieldID, ids...))
+}
+
+// IDGT applies the GT predicate on the ID field.
+func IDGT(id int) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.FieldGT(FieldID, id))
+}
+
+// IDGTE applies the GTE predicate on the ID field.
+func IDGTE(id int) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.FieldGTE(FieldID, id))
+}
+
+// IDLT applies the LT predicate on the ID field.
+func IDLT(id int) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.FieldLT(FieldID, id))
+}
+
+// IDLTE applies the LTE predicate on the ID field.
+func IDLTE(id int) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.FieldLTE(FieldID, id))
+}
+
+// RoleEQ applies the EQ predicate on the "role" field.
+func RoleEQ(v Role) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.FieldEQ(FieldRole, v))
+}
+
+// RoleNEQ applies the NEQ predicate on the "role" field.
+func RoleNEQ(v Role) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.FieldNEQ(FieldRole, v))
+}
+
+// RoleIn applies the In predicate on the "role" field.
+func RoleIn(vs ...Role) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.FieldIn(FieldRole, vs...))
+}
+
+// RoleNotIn applies the NotIn predicate on the "role" field.
+func RoleNotIn(vs ...Role) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.FieldNotIn(FieldRole, vs...))
+}
+
+// HasToken applies the HasEdge predicate on the "token" edge.
+func HasToken() predicate.AuthRoles {
+ return predicate.AuthRoles(func(s *sql.Selector) {
+ step := sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.Edge(sqlgraph.O2O, true, TokenTable, TokenColumn),
+ )
+ sqlgraph.HasNeighbors(s, step)
+ })
+}
+
+// HasTokenWith applies the HasEdge predicate on the "token" edge with a given conditions (other predicates).
+func HasTokenWith(preds ...predicate.AuthTokens) predicate.AuthRoles {
+ return predicate.AuthRoles(func(s *sql.Selector) {
+ step := newTokenStep()
+ sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+ for _, p := range preds {
+ p(s)
+ }
+ })
+ })
+}
+
+// And groups predicates with the AND operator between them.
+func And(predicates ...predicate.AuthRoles) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.AndPredicates(predicates...))
+}
+
+// Or groups predicates with the OR operator between them.
+func Or(predicates ...predicate.AuthRoles) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.OrPredicates(predicates...))
+}
+
+// Not applies the not operator on the given predicate.
+func Not(p predicate.AuthRoles) predicate.AuthRoles {
+ return predicate.AuthRoles(sql.NotPredicates(p))
+}
diff --git a/backend/internal/data/ent/authroles_create.go b/backend/internal/data/ent/authroles_create.go
new file mode 100644
index 0000000..19e594f
--- /dev/null
+++ b/backend/internal/data/ent/authroles_create.go
@@ -0,0 +1,244 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
+)
+
+// AuthRolesCreate is the builder for creating a AuthRoles entity.
+type AuthRolesCreate struct {
+ config
+ mutation *AuthRolesMutation
+ hooks []Hook
+}
+
+// SetRole sets the "role" field.
+func (arc *AuthRolesCreate) SetRole(a authroles.Role) *AuthRolesCreate {
+ arc.mutation.SetRole(a)
+ return arc
+}
+
+// SetNillableRole sets the "role" field if the given value is not nil.
+func (arc *AuthRolesCreate) SetNillableRole(a *authroles.Role) *AuthRolesCreate {
+ if a != nil {
+ arc.SetRole(*a)
+ }
+ return arc
+}
+
+// SetTokenID sets the "token" edge to the AuthTokens entity by ID.
+func (arc *AuthRolesCreate) SetTokenID(id uuid.UUID) *AuthRolesCreate {
+ arc.mutation.SetTokenID(id)
+ return arc
+}
+
+// SetNillableTokenID sets the "token" edge to the AuthTokens entity by ID if the given value is not nil.
+func (arc *AuthRolesCreate) SetNillableTokenID(id *uuid.UUID) *AuthRolesCreate {
+ if id != nil {
+ arc = arc.SetTokenID(*id)
+ }
+ return arc
+}
+
+// SetToken sets the "token" edge to the AuthTokens entity.
+func (arc *AuthRolesCreate) SetToken(a *AuthTokens) *AuthRolesCreate {
+ return arc.SetTokenID(a.ID)
+}
+
+// Mutation returns the AuthRolesMutation object of the builder.
+func (arc *AuthRolesCreate) Mutation() *AuthRolesMutation {
+ return arc.mutation
+}
+
+// Save creates the AuthRoles in the database.
+func (arc *AuthRolesCreate) Save(ctx context.Context) (*AuthRoles, error) {
+ arc.defaults()
+ return withHooks(ctx, arc.sqlSave, arc.mutation, arc.hooks)
+}
+
+// SaveX calls Save and panics if Save returns an error.
+func (arc *AuthRolesCreate) SaveX(ctx context.Context) *AuthRoles {
+ v, err := arc.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// Exec executes the query.
+func (arc *AuthRolesCreate) Exec(ctx context.Context) error {
+ _, err := arc.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (arc *AuthRolesCreate) ExecX(ctx context.Context) {
+ if err := arc.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
+
+// defaults sets the default values of the builder before save.
+func (arc *AuthRolesCreate) defaults() {
+ if _, ok := arc.mutation.Role(); !ok {
+ v := authroles.DefaultRole
+ arc.mutation.SetRole(v)
+ }
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (arc *AuthRolesCreate) check() error {
+ if _, ok := arc.mutation.Role(); !ok {
+ return &ValidationError{Name: "role", err: errors.New(`ent: missing required field "AuthRoles.role"`)}
+ }
+ if v, ok := arc.mutation.Role(); ok {
+ if err := authroles.RoleValidator(v); err != nil {
+ return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "AuthRoles.role": %w`, err)}
+ }
+ }
+ return nil
+}
+
+func (arc *AuthRolesCreate) sqlSave(ctx context.Context) (*AuthRoles, error) {
+ if err := arc.check(); err != nil {
+ return nil, err
+ }
+ _node, _spec := arc.createSpec()
+ if err := sqlgraph.CreateNode(ctx, arc.driver, _spec); err != nil {
+ if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ return nil, err
+ }
+ id := _spec.ID.Value.(int64)
+ _node.ID = int(id)
+ arc.mutation.id = &_node.ID
+ arc.mutation.done = true
+ return _node, nil
+}
+
+func (arc *AuthRolesCreate) createSpec() (*AuthRoles, *sqlgraph.CreateSpec) {
+ var (
+ _node = &AuthRoles{config: arc.config}
+ _spec = sqlgraph.NewCreateSpec(authroles.Table, sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt))
+ )
+ if value, ok := arc.mutation.Role(); ok {
+ _spec.SetField(authroles.FieldRole, field.TypeEnum, value)
+ _node.Role = value
+ }
+ if nodes := arc.mutation.TokenIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2O,
+ Inverse: true,
+ Table: authroles.TokenTable,
+ Columns: []string{authroles.TokenColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _node.auth_tokens_roles = &nodes[0]
+ _spec.Edges = append(_spec.Edges, edge)
+ }
+ return _node, _spec
+}
+
+// AuthRolesCreateBulk is the builder for creating many AuthRoles entities in bulk.
+type AuthRolesCreateBulk struct {
+ config
+ err error
+ builders []*AuthRolesCreate
+}
+
+// Save creates the AuthRoles entities in the database.
+func (arcb *AuthRolesCreateBulk) Save(ctx context.Context) ([]*AuthRoles, error) {
+ if arcb.err != nil {
+ return nil, arcb.err
+ }
+ specs := make([]*sqlgraph.CreateSpec, len(arcb.builders))
+ nodes := make([]*AuthRoles, len(arcb.builders))
+ mutators := make([]Mutator, len(arcb.builders))
+ for i := range arcb.builders {
+ func(i int, root context.Context) {
+ builder := arcb.builders[i]
+ builder.defaults()
+ var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+ mutation, ok := m.(*AuthRolesMutation)
+ if !ok {
+ return nil, fmt.Errorf("unexpected mutation type %T", m)
+ }
+ if err := builder.check(); err != nil {
+ return nil, err
+ }
+ builder.mutation = mutation
+ var err error
+ nodes[i], specs[i] = builder.createSpec()
+ if i < len(mutators)-1 {
+ _, err = mutators[i+1].Mutate(root, arcb.builders[i+1].mutation)
+ } else {
+ spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
+ // Invoke the actual operation on the latest mutation in the chain.
+ if err = sqlgraph.BatchCreate(ctx, arcb.driver, spec); err != nil {
+ if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ mutation.id = &nodes[i].ID
+ if specs[i].ID.Value != nil {
+ id := specs[i].ID.Value.(int64)
+ nodes[i].ID = int(id)
+ }
+ mutation.done = true
+ return nodes[i], nil
+ })
+ for i := len(builder.hooks) - 1; i >= 0; i-- {
+ mut = builder.hooks[i](mut)
+ }
+ mutators[i] = mut
+ }(i, ctx)
+ }
+ if len(mutators) > 0 {
+ if _, err := mutators[0].Mutate(ctx, arcb.builders[0].mutation); err != nil {
+ return nil, err
+ }
+ }
+ return nodes, nil
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (arcb *AuthRolesCreateBulk) SaveX(ctx context.Context) []*AuthRoles {
+ v, err := arcb.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// Exec executes the query.
+func (arcb *AuthRolesCreateBulk) Exec(ctx context.Context) error {
+ _, err := arcb.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (arcb *AuthRolesCreateBulk) ExecX(ctx context.Context) {
+ if err := arcb.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
diff --git a/backend/internal/data/ent/authroles_delete.go b/backend/internal/data/ent/authroles_delete.go
new file mode 100644
index 0000000..68a0dfc
--- /dev/null
+++ b/backend/internal/data/ent/authroles_delete.go
@@ -0,0 +1,88 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
+)
+
+// AuthRolesDelete is the builder for deleting a AuthRoles entity.
+type AuthRolesDelete struct {
+ config
+ hooks []Hook
+ mutation *AuthRolesMutation
+}
+
+// Where appends a list predicates to the AuthRolesDelete builder.
+func (ard *AuthRolesDelete) Where(ps ...predicate.AuthRoles) *AuthRolesDelete {
+ ard.mutation.Where(ps...)
+ return ard
+}
+
+// Exec executes the deletion query and returns how many vertices were deleted.
+func (ard *AuthRolesDelete) Exec(ctx context.Context) (int, error) {
+ return withHooks(ctx, ard.sqlExec, ard.mutation, ard.hooks)
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (ard *AuthRolesDelete) ExecX(ctx context.Context) int {
+ n, err := ard.Exec(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return n
+}
+
+func (ard *AuthRolesDelete) sqlExec(ctx context.Context) (int, error) {
+ _spec := sqlgraph.NewDeleteSpec(authroles.Table, sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt))
+ if ps := ard.mutation.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ affected, err := sqlgraph.DeleteNodes(ctx, ard.driver, _spec)
+ if err != nil && sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ ard.mutation.done = true
+ return affected, err
+}
+
+// AuthRolesDeleteOne is the builder for deleting a single AuthRoles entity.
+type AuthRolesDeleteOne struct {
+ ard *AuthRolesDelete
+}
+
+// Where appends a list predicates to the AuthRolesDelete builder.
+func (ardo *AuthRolesDeleteOne) Where(ps ...predicate.AuthRoles) *AuthRolesDeleteOne {
+ ardo.ard.mutation.Where(ps...)
+ return ardo
+}
+
+// Exec executes the deletion query.
+func (ardo *AuthRolesDeleteOne) Exec(ctx context.Context) error {
+ n, err := ardo.ard.Exec(ctx)
+ switch {
+ case err != nil:
+ return err
+ case n == 0:
+ return &NotFoundError{authroles.Label}
+ default:
+ return nil
+ }
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (ardo *AuthRolesDeleteOne) ExecX(ctx context.Context) {
+ if err := ardo.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
diff --git a/backend/internal/data/ent/authroles_query.go b/backend/internal/data/ent/authroles_query.go
new file mode 100644
index 0000000..bf47577
--- /dev/null
+++ b/backend/internal/data/ent/authroles_query.go
@@ -0,0 +1,614 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+ "fmt"
+ "math"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
+)
+
+// AuthRolesQuery is the builder for querying AuthRoles entities.
+type AuthRolesQuery struct {
+ config
+ ctx *QueryContext
+ order []authroles.OrderOption
+ inters []Interceptor
+ predicates []predicate.AuthRoles
+ withToken *AuthTokensQuery
+ withFKs bool
+ // intermediate query (i.e. traversal path).
+ sql *sql.Selector
+ path func(context.Context) (*sql.Selector, error)
+}
+
+// Where adds a new predicate for the AuthRolesQuery builder.
+func (arq *AuthRolesQuery) Where(ps ...predicate.AuthRoles) *AuthRolesQuery {
+ arq.predicates = append(arq.predicates, ps...)
+ return arq
+}
+
+// Limit the number of records to be returned by this query.
+func (arq *AuthRolesQuery) Limit(limit int) *AuthRolesQuery {
+ arq.ctx.Limit = &limit
+ return arq
+}
+
+// Offset to start from.
+func (arq *AuthRolesQuery) Offset(offset int) *AuthRolesQuery {
+ arq.ctx.Offset = &offset
+ return arq
+}
+
+// Unique configures the query builder to filter duplicate records on query.
+// By default, unique is set to true, and can be disabled using this method.
+func (arq *AuthRolesQuery) Unique(unique bool) *AuthRolesQuery {
+ arq.ctx.Unique = &unique
+ return arq
+}
+
+// Order specifies how the records should be ordered.
+func (arq *AuthRolesQuery) Order(o ...authroles.OrderOption) *AuthRolesQuery {
+ arq.order = append(arq.order, o...)
+ return arq
+}
+
+// QueryToken chains the current query on the "token" edge.
+func (arq *AuthRolesQuery) QueryToken() *AuthTokensQuery {
+ query := (&AuthTokensClient{config: arq.config}).Query()
+ query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+ if err := arq.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ selector := arq.sqlQuery(ctx)
+ if err := selector.Err(); err != nil {
+ return nil, err
+ }
+ step := sqlgraph.NewStep(
+ sqlgraph.From(authroles.Table, authroles.FieldID, selector),
+ sqlgraph.To(authtokens.Table, authtokens.FieldID),
+ sqlgraph.Edge(sqlgraph.O2O, true, authroles.TokenTable, authroles.TokenColumn),
+ )
+ fromU = sqlgraph.SetNeighbors(arq.driver.Dialect(), step)
+ return fromU, nil
+ }
+ return query
+}
+
+// First returns the first AuthRoles entity from the query.
+// Returns a *NotFoundError when no AuthRoles was found.
+func (arq *AuthRolesQuery) First(ctx context.Context) (*AuthRoles, error) {
+ nodes, err := arq.Limit(1).All(setContextOp(ctx, arq.ctx, "First"))
+ if err != nil {
+ return nil, err
+ }
+ if len(nodes) == 0 {
+ return nil, &NotFoundError{authroles.Label}
+ }
+ return nodes[0], nil
+}
+
+// FirstX is like First, but panics if an error occurs.
+func (arq *AuthRolesQuery) FirstX(ctx context.Context) *AuthRoles {
+ node, err := arq.First(ctx)
+ if err != nil && !IsNotFound(err) {
+ panic(err)
+ }
+ return node
+}
+
+// FirstID returns the first AuthRoles ID from the query.
+// Returns a *NotFoundError when no AuthRoles ID was found.
+func (arq *AuthRolesQuery) FirstID(ctx context.Context) (id int, err error) {
+ var ids []int
+ if ids, err = arq.Limit(1).IDs(setContextOp(ctx, arq.ctx, "FirstID")); err != nil {
+ return
+ }
+ if len(ids) == 0 {
+ err = &NotFoundError{authroles.Label}
+ return
+ }
+ return ids[0], nil
+}
+
+// FirstIDX is like FirstID, but panics if an error occurs.
+func (arq *AuthRolesQuery) FirstIDX(ctx context.Context) int {
+ id, err := arq.FirstID(ctx)
+ if err != nil && !IsNotFound(err) {
+ panic(err)
+ }
+ return id
+}
+
+// Only returns a single AuthRoles entity found by the query, ensuring it only returns one.
+// Returns a *NotSingularError when more than one AuthRoles entity is found.
+// Returns a *NotFoundError when no AuthRoles entities are found.
+func (arq *AuthRolesQuery) Only(ctx context.Context) (*AuthRoles, error) {
+ nodes, err := arq.Limit(2).All(setContextOp(ctx, arq.ctx, "Only"))
+ if err != nil {
+ return nil, err
+ }
+ switch len(nodes) {
+ case 1:
+ return nodes[0], nil
+ case 0:
+ return nil, &NotFoundError{authroles.Label}
+ default:
+ return nil, &NotSingularError{authroles.Label}
+ }
+}
+
+// OnlyX is like Only, but panics if an error occurs.
+func (arq *AuthRolesQuery) OnlyX(ctx context.Context) *AuthRoles {
+ node, err := arq.Only(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return node
+}
+
+// OnlyID is like Only, but returns the only AuthRoles ID in the query.
+// Returns a *NotSingularError when more than one AuthRoles ID is found.
+// Returns a *NotFoundError when no entities are found.
+func (arq *AuthRolesQuery) OnlyID(ctx context.Context) (id int, err error) {
+ var ids []int
+ if ids, err = arq.Limit(2).IDs(setContextOp(ctx, arq.ctx, "OnlyID")); err != nil {
+ return
+ }
+ switch len(ids) {
+ case 1:
+ id = ids[0]
+ case 0:
+ err = &NotFoundError{authroles.Label}
+ default:
+ err = &NotSingularError{authroles.Label}
+ }
+ return
+}
+
+// OnlyIDX is like OnlyID, but panics if an error occurs.
+func (arq *AuthRolesQuery) OnlyIDX(ctx context.Context) int {
+ id, err := arq.OnlyID(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+// All executes the query and returns a list of AuthRolesSlice.
+func (arq *AuthRolesQuery) All(ctx context.Context) ([]*AuthRoles, error) {
+ ctx = setContextOp(ctx, arq.ctx, "All")
+ if err := arq.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ qr := querierAll[[]*AuthRoles, *AuthRolesQuery]()
+ return withInterceptors[[]*AuthRoles](ctx, arq, qr, arq.inters)
+}
+
+// AllX is like All, but panics if an error occurs.
+func (arq *AuthRolesQuery) AllX(ctx context.Context) []*AuthRoles {
+ nodes, err := arq.All(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return nodes
+}
+
+// IDs executes the query and returns a list of AuthRoles IDs.
+func (arq *AuthRolesQuery) IDs(ctx context.Context) (ids []int, err error) {
+ if arq.ctx.Unique == nil && arq.path != nil {
+ arq.Unique(true)
+ }
+ ctx = setContextOp(ctx, arq.ctx, "IDs")
+ if err = arq.Select(authroles.FieldID).Scan(ctx, &ids); err != nil {
+ return nil, err
+ }
+ return ids, nil
+}
+
+// IDsX is like IDs, but panics if an error occurs.
+func (arq *AuthRolesQuery) IDsX(ctx context.Context) []int {
+ ids, err := arq.IDs(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return ids
+}
+
+// Count returns the count of the given query.
+func (arq *AuthRolesQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, arq.ctx, "Count")
+ if err := arq.prepareQuery(ctx); err != nil {
+ return 0, err
+ }
+ return withInterceptors[int](ctx, arq, querierCount[*AuthRolesQuery](), arq.inters)
+}
+
+// CountX is like Count, but panics if an error occurs.
+func (arq *AuthRolesQuery) CountX(ctx context.Context) int {
+ count, err := arq.Count(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return count
+}
+
+// Exist returns true if the query has elements in the graph.
+func (arq *AuthRolesQuery) Exist(ctx context.Context) (bool, error) {
+ ctx = setContextOp(ctx, arq.ctx, "Exist")
+ switch _, err := arq.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
+ }
+}
+
+// ExistX is like Exist, but panics if an error occurs.
+func (arq *AuthRolesQuery) ExistX(ctx context.Context) bool {
+ exist, err := arq.Exist(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return exist
+}
+
+// Clone returns a duplicate of the AuthRolesQuery builder, including all associated steps. It can be
+// used to prepare common query builders and use them differently after the clone is made.
+func (arq *AuthRolesQuery) Clone() *AuthRolesQuery {
+ if arq == nil {
+ return nil
+ }
+ return &AuthRolesQuery{
+ config: arq.config,
+ ctx: arq.ctx.Clone(),
+ order: append([]authroles.OrderOption{}, arq.order...),
+ inters: append([]Interceptor{}, arq.inters...),
+ predicates: append([]predicate.AuthRoles{}, arq.predicates...),
+ withToken: arq.withToken.Clone(),
+ // clone intermediate query.
+ sql: arq.sql.Clone(),
+ path: arq.path,
+ }
+}
+
+// WithToken tells the query-builder to eager-load the nodes that are connected to
+// the "token" edge. The optional arguments are used to configure the query builder of the edge.
+func (arq *AuthRolesQuery) WithToken(opts ...func(*AuthTokensQuery)) *AuthRolesQuery {
+ query := (&AuthTokensClient{config: arq.config}).Query()
+ for _, opt := range opts {
+ opt(query)
+ }
+ arq.withToken = query
+ return arq
+}
+
+// GroupBy is used to group vertices by one or more fields/columns.
+// It is often used with aggregate functions, like: count, max, mean, min, sum.
+//
+// Example:
+//
+// var v []struct {
+// Role authroles.Role `json:"role,omitempty"`
+// Count int `json:"count,omitempty"`
+// }
+//
+// client.AuthRoles.Query().
+// GroupBy(authroles.FieldRole).
+// Aggregate(ent.Count()).
+// Scan(ctx, &v)
+func (arq *AuthRolesQuery) GroupBy(field string, fields ...string) *AuthRolesGroupBy {
+ arq.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &AuthRolesGroupBy{build: arq}
+ grbuild.flds = &arq.ctx.Fields
+ grbuild.label = authroles.Label
+ grbuild.scan = grbuild.Scan
+ return grbuild
+}
+
+// Select allows the selection one or more fields/columns for the given query,
+// instead of selecting all fields in the entity.
+//
+// Example:
+//
+// var v []struct {
+// Role authroles.Role `json:"role,omitempty"`
+// }
+//
+// client.AuthRoles.Query().
+// Select(authroles.FieldRole).
+// Scan(ctx, &v)
+func (arq *AuthRolesQuery) Select(fields ...string) *AuthRolesSelect {
+ arq.ctx.Fields = append(arq.ctx.Fields, fields...)
+ sbuild := &AuthRolesSelect{AuthRolesQuery: arq}
+ sbuild.label = authroles.Label
+ sbuild.flds, sbuild.scan = &arq.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a AuthRolesSelect configured with the given aggregations.
+func (arq *AuthRolesQuery) Aggregate(fns ...AggregateFunc) *AuthRolesSelect {
+ return arq.Select().Aggregate(fns...)
+}
+
+func (arq *AuthRolesQuery) prepareQuery(ctx context.Context) error {
+ for _, inter := range arq.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, arq); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range arq.ctx.Fields {
+ if !authroles.ValidColumn(f) {
+ return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+ }
+ }
+ if arq.path != nil {
+ prev, err := arq.path(ctx)
+ if err != nil {
+ return err
+ }
+ arq.sql = prev
+ }
+ return nil
+}
+
+func (arq *AuthRolesQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AuthRoles, error) {
+ var (
+ nodes = []*AuthRoles{}
+ withFKs = arq.withFKs
+ _spec = arq.querySpec()
+ loadedTypes = [1]bool{
+ arq.withToken != nil,
+ }
+ )
+ if arq.withToken != nil {
+ withFKs = true
+ }
+ if withFKs {
+ _spec.Node.Columns = append(_spec.Node.Columns, authroles.ForeignKeys...)
+ }
+ _spec.ScanValues = func(columns []string) ([]any, error) {
+ return (*AuthRoles).scanValues(nil, columns)
+ }
+ _spec.Assign = func(columns []string, values []any) error {
+ node := &AuthRoles{config: arq.config}
+ nodes = append(nodes, node)
+ node.Edges.loadedTypes = loadedTypes
+ return node.assignValues(columns, values)
+ }
+ for i := range hooks {
+ hooks[i](ctx, _spec)
+ }
+ if err := sqlgraph.QueryNodes(ctx, arq.driver, _spec); err != nil {
+ return nil, err
+ }
+ if len(nodes) == 0 {
+ return nodes, nil
+ }
+ if query := arq.withToken; query != nil {
+ if err := arq.loadToken(ctx, query, nodes, nil,
+ func(n *AuthRoles, e *AuthTokens) { n.Edges.Token = e }); err != nil {
+ return nil, err
+ }
+ }
+ return nodes, nil
+}
+
+func (arq *AuthRolesQuery) loadToken(ctx context.Context, query *AuthTokensQuery, nodes []*AuthRoles, init func(*AuthRoles), assign func(*AuthRoles, *AuthTokens)) error {
+ ids := make([]uuid.UUID, 0, len(nodes))
+ nodeids := make(map[uuid.UUID][]*AuthRoles)
+ for i := range nodes {
+ if nodes[i].auth_tokens_roles == nil {
+ continue
+ }
+ fk := *nodes[i].auth_tokens_roles
+ if _, ok := nodeids[fk]; !ok {
+ ids = append(ids, fk)
+ }
+ nodeids[fk] = append(nodeids[fk], nodes[i])
+ }
+ if len(ids) == 0 {
+ return nil
+ }
+ query.Where(authtokens.IDIn(ids...))
+ neighbors, err := query.All(ctx)
+ if err != nil {
+ return err
+ }
+ for _, n := range neighbors {
+ nodes, ok := nodeids[n.ID]
+ if !ok {
+ return fmt.Errorf(`unexpected foreign-key "auth_tokens_roles" returned %v`, n.ID)
+ }
+ for i := range nodes {
+ assign(nodes[i], n)
+ }
+ }
+ return nil
+}
+
+func (arq *AuthRolesQuery) sqlCount(ctx context.Context) (int, error) {
+ _spec := arq.querySpec()
+ _spec.Node.Columns = arq.ctx.Fields
+ if len(arq.ctx.Fields) > 0 {
+ _spec.Unique = arq.ctx.Unique != nil && *arq.ctx.Unique
+ }
+ return sqlgraph.CountNodes(ctx, arq.driver, _spec)
+}
+
+func (arq *AuthRolesQuery) querySpec() *sqlgraph.QuerySpec {
+ _spec := sqlgraph.NewQuerySpec(authroles.Table, authroles.Columns, sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt))
+ _spec.From = arq.sql
+ if unique := arq.ctx.Unique; unique != nil {
+ _spec.Unique = *unique
+ } else if arq.path != nil {
+ _spec.Unique = true
+ }
+ if fields := arq.ctx.Fields; len(fields) > 0 {
+ _spec.Node.Columns = make([]string, 0, len(fields))
+ _spec.Node.Columns = append(_spec.Node.Columns, authroles.FieldID)
+ for i := range fields {
+ if fields[i] != authroles.FieldID {
+ _spec.Node.Columns = append(_spec.Node.Columns, fields[i])
+ }
+ }
+ }
+ if ps := arq.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ if limit := arq.ctx.Limit; limit != nil {
+ _spec.Limit = *limit
+ }
+ if offset := arq.ctx.Offset; offset != nil {
+ _spec.Offset = *offset
+ }
+ if ps := arq.order; len(ps) > 0 {
+ _spec.Order = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ return _spec
+}
+
+func (arq *AuthRolesQuery) sqlQuery(ctx context.Context) *sql.Selector {
+ builder := sql.Dialect(arq.driver.Dialect())
+ t1 := builder.Table(authroles.Table)
+ columns := arq.ctx.Fields
+ if len(columns) == 0 {
+ columns = authroles.Columns
+ }
+ selector := builder.Select(t1.Columns(columns...)...).From(t1)
+ if arq.sql != nil {
+ selector = arq.sql
+ selector.Select(selector.Columns(columns...)...)
+ }
+ if arq.ctx.Unique != nil && *arq.ctx.Unique {
+ selector.Distinct()
+ }
+ for _, p := range arq.predicates {
+ p(selector)
+ }
+ for _, p := range arq.order {
+ p(selector)
+ }
+ if offset := arq.ctx.Offset; offset != nil {
+ // limit is mandatory for offset clause. We start
+ // with default value, and override it below if needed.
+ selector.Offset(*offset).Limit(math.MaxInt32)
+ }
+ if limit := arq.ctx.Limit; limit != nil {
+ selector.Limit(*limit)
+ }
+ return selector
+}
+
+// AuthRolesGroupBy is the group-by builder for AuthRoles entities.
+type AuthRolesGroupBy struct {
+ selector
+ build *AuthRolesQuery
+}
+
+// Aggregate adds the given aggregation functions to the group-by query.
+func (argb *AuthRolesGroupBy) Aggregate(fns ...AggregateFunc) *AuthRolesGroupBy {
+ argb.fns = append(argb.fns, fns...)
+ return argb
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (argb *AuthRolesGroupBy) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, argb.build.ctx, "GroupBy")
+ if err := argb.build.prepareQuery(ctx); err != nil {
+ return err
+ }
+ return scanWithInterceptors[*AuthRolesQuery, *AuthRolesGroupBy](ctx, argb.build, argb, argb.build.inters, v)
+}
+
+func (argb *AuthRolesGroupBy) sqlScan(ctx context.Context, root *AuthRolesQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
+ aggregation := make([]string, 0, len(argb.fns))
+ for _, fn := range argb.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ if len(selector.SelectedColumns()) == 0 {
+ columns := make([]string, 0, len(*argb.flds)+len(argb.fns))
+ for _, f := range *argb.flds {
+ columns = append(columns, selector.C(f))
+ }
+ columns = append(columns, aggregation...)
+ selector.Select(columns...)
+ }
+ selector.GroupBy(selector.Columns(*argb.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := argb.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
+}
+
+// AuthRolesSelect is the builder for selecting fields of AuthRoles entities.
+type AuthRolesSelect struct {
+ *AuthRolesQuery
+ selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (ars *AuthRolesSelect) Aggregate(fns ...AggregateFunc) *AuthRolesSelect {
+ ars.fns = append(ars.fns, fns...)
+ return ars
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (ars *AuthRolesSelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, ars.ctx, "Select")
+ if err := ars.prepareQuery(ctx); err != nil {
+ return err
+ }
+ return scanWithInterceptors[*AuthRolesQuery, *AuthRolesSelect](ctx, ars.AuthRolesQuery, ars, ars.inters, v)
+}
+
+func (ars *AuthRolesSelect) sqlScan(ctx context.Context, root *AuthRolesQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(ars.fns))
+ for _, fn := range ars.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*ars.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := ars.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
+}
diff --git a/backend/internal/data/ent/authroles_update.go b/backend/internal/data/ent/authroles_update.go
new file mode 100644
index 0000000..fbec4f9
--- /dev/null
+++ b/backend/internal/data/ent/authroles_update.go
@@ -0,0 +1,345 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
+)
+
+// AuthRolesUpdate is the builder for updating AuthRoles entities.
+type AuthRolesUpdate struct {
+ config
+ hooks []Hook
+ mutation *AuthRolesMutation
+}
+
+// Where appends a list predicates to the AuthRolesUpdate builder.
+func (aru *AuthRolesUpdate) Where(ps ...predicate.AuthRoles) *AuthRolesUpdate {
+ aru.mutation.Where(ps...)
+ return aru
+}
+
+// SetRole sets the "role" field.
+func (aru *AuthRolesUpdate) SetRole(a authroles.Role) *AuthRolesUpdate {
+ aru.mutation.SetRole(a)
+ return aru
+}
+
+// SetNillableRole sets the "role" field if the given value is not nil.
+func (aru *AuthRolesUpdate) SetNillableRole(a *authroles.Role) *AuthRolesUpdate {
+ if a != nil {
+ aru.SetRole(*a)
+ }
+ return aru
+}
+
+// SetTokenID sets the "token" edge to the AuthTokens entity by ID.
+func (aru *AuthRolesUpdate) SetTokenID(id uuid.UUID) *AuthRolesUpdate {
+ aru.mutation.SetTokenID(id)
+ return aru
+}
+
+// SetNillableTokenID sets the "token" edge to the AuthTokens entity by ID if the given value is not nil.
+func (aru *AuthRolesUpdate) SetNillableTokenID(id *uuid.UUID) *AuthRolesUpdate {
+ if id != nil {
+ aru = aru.SetTokenID(*id)
+ }
+ return aru
+}
+
+// SetToken sets the "token" edge to the AuthTokens entity.
+func (aru *AuthRolesUpdate) SetToken(a *AuthTokens) *AuthRolesUpdate {
+ return aru.SetTokenID(a.ID)
+}
+
+// Mutation returns the AuthRolesMutation object of the builder.
+func (aru *AuthRolesUpdate) Mutation() *AuthRolesMutation {
+ return aru.mutation
+}
+
+// ClearToken clears the "token" edge to the AuthTokens entity.
+func (aru *AuthRolesUpdate) ClearToken() *AuthRolesUpdate {
+ aru.mutation.ClearToken()
+ return aru
+}
+
+// Save executes the query and returns the number of nodes affected by the update operation.
+func (aru *AuthRolesUpdate) Save(ctx context.Context) (int, error) {
+ return withHooks(ctx, aru.sqlSave, aru.mutation, aru.hooks)
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (aru *AuthRolesUpdate) SaveX(ctx context.Context) int {
+ affected, err := aru.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return affected
+}
+
+// Exec executes the query.
+func (aru *AuthRolesUpdate) Exec(ctx context.Context) error {
+ _, err := aru.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (aru *AuthRolesUpdate) ExecX(ctx context.Context) {
+ if err := aru.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (aru *AuthRolesUpdate) check() error {
+ if v, ok := aru.mutation.Role(); ok {
+ if err := authroles.RoleValidator(v); err != nil {
+ return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "AuthRoles.role": %w`, err)}
+ }
+ }
+ return nil
+}
+
+func (aru *AuthRolesUpdate) sqlSave(ctx context.Context) (n int, err error) {
+ if err := aru.check(); err != nil {
+ return n, err
+ }
+ _spec := sqlgraph.NewUpdateSpec(authroles.Table, authroles.Columns, sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt))
+ if ps := aru.mutation.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ if value, ok := aru.mutation.Role(); ok {
+ _spec.SetField(authroles.FieldRole, field.TypeEnum, value)
+ }
+ if aru.mutation.TokenCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2O,
+ Inverse: true,
+ Table: authroles.TokenTable,
+ Columns: []string{authroles.TokenColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := aru.mutation.TokenIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2O,
+ Inverse: true,
+ Table: authroles.TokenTable,
+ Columns: []string{authroles.TokenColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if n, err = sqlgraph.UpdateNodes(ctx, aru.driver, _spec); err != nil {
+ if _, ok := err.(*sqlgraph.NotFoundError); ok {
+ err = &NotFoundError{authroles.Label}
+ } else if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ return 0, err
+ }
+ aru.mutation.done = true
+ return n, nil
+}
+
+// AuthRolesUpdateOne is the builder for updating a single AuthRoles entity.
+type AuthRolesUpdateOne struct {
+ config
+ fields []string
+ hooks []Hook
+ mutation *AuthRolesMutation
+}
+
+// SetRole sets the "role" field.
+func (aruo *AuthRolesUpdateOne) SetRole(a authroles.Role) *AuthRolesUpdateOne {
+ aruo.mutation.SetRole(a)
+ return aruo
+}
+
+// SetNillableRole sets the "role" field if the given value is not nil.
+func (aruo *AuthRolesUpdateOne) SetNillableRole(a *authroles.Role) *AuthRolesUpdateOne {
+ if a != nil {
+ aruo.SetRole(*a)
+ }
+ return aruo
+}
+
+// SetTokenID sets the "token" edge to the AuthTokens entity by ID.
+func (aruo *AuthRolesUpdateOne) SetTokenID(id uuid.UUID) *AuthRolesUpdateOne {
+ aruo.mutation.SetTokenID(id)
+ return aruo
+}
+
+// SetNillableTokenID sets the "token" edge to the AuthTokens entity by ID if the given value is not nil.
+func (aruo *AuthRolesUpdateOne) SetNillableTokenID(id *uuid.UUID) *AuthRolesUpdateOne {
+ if id != nil {
+ aruo = aruo.SetTokenID(*id)
+ }
+ return aruo
+}
+
+// SetToken sets the "token" edge to the AuthTokens entity.
+func (aruo *AuthRolesUpdateOne) SetToken(a *AuthTokens) *AuthRolesUpdateOne {
+ return aruo.SetTokenID(a.ID)
+}
+
+// Mutation returns the AuthRolesMutation object of the builder.
+func (aruo *AuthRolesUpdateOne) Mutation() *AuthRolesMutation {
+ return aruo.mutation
+}
+
+// ClearToken clears the "token" edge to the AuthTokens entity.
+func (aruo *AuthRolesUpdateOne) ClearToken() *AuthRolesUpdateOne {
+ aruo.mutation.ClearToken()
+ return aruo
+}
+
+// Where appends a list predicates to the AuthRolesUpdate builder.
+func (aruo *AuthRolesUpdateOne) Where(ps ...predicate.AuthRoles) *AuthRolesUpdateOne {
+ aruo.mutation.Where(ps...)
+ return aruo
+}
+
+// Select allows selecting one or more fields (columns) of the returned entity.
+// The default is selecting all fields defined in the entity schema.
+func (aruo *AuthRolesUpdateOne) Select(field string, fields ...string) *AuthRolesUpdateOne {
+ aruo.fields = append([]string{field}, fields...)
+ return aruo
+}
+
+// Save executes the query and returns the updated AuthRoles entity.
+func (aruo *AuthRolesUpdateOne) Save(ctx context.Context) (*AuthRoles, error) {
+ return withHooks(ctx, aruo.sqlSave, aruo.mutation, aruo.hooks)
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (aruo *AuthRolesUpdateOne) SaveX(ctx context.Context) *AuthRoles {
+ node, err := aruo.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return node
+}
+
+// Exec executes the query on the entity.
+func (aruo *AuthRolesUpdateOne) Exec(ctx context.Context) error {
+ _, err := aruo.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (aruo *AuthRolesUpdateOne) ExecX(ctx context.Context) {
+ if err := aruo.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (aruo *AuthRolesUpdateOne) check() error {
+ if v, ok := aruo.mutation.Role(); ok {
+ if err := authroles.RoleValidator(v); err != nil {
+ return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "AuthRoles.role": %w`, err)}
+ }
+ }
+ return nil
+}
+
+func (aruo *AuthRolesUpdateOne) sqlSave(ctx context.Context) (_node *AuthRoles, err error) {
+ if err := aruo.check(); err != nil {
+ return _node, err
+ }
+ _spec := sqlgraph.NewUpdateSpec(authroles.Table, authroles.Columns, sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt))
+ id, ok := aruo.mutation.ID()
+ if !ok {
+ return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AuthRoles.id" for update`)}
+ }
+ _spec.Node.ID.Value = id
+ if fields := aruo.fields; len(fields) > 0 {
+ _spec.Node.Columns = make([]string, 0, len(fields))
+ _spec.Node.Columns = append(_spec.Node.Columns, authroles.FieldID)
+ for _, f := range fields {
+ if !authroles.ValidColumn(f) {
+ return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+ }
+ if f != authroles.FieldID {
+ _spec.Node.Columns = append(_spec.Node.Columns, f)
+ }
+ }
+ }
+ if ps := aruo.mutation.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ if value, ok := aruo.mutation.Role(); ok {
+ _spec.SetField(authroles.FieldRole, field.TypeEnum, value)
+ }
+ if aruo.mutation.TokenCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2O,
+ Inverse: true,
+ Table: authroles.TokenTable,
+ Columns: []string{authroles.TokenColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := aruo.mutation.TokenIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2O,
+ Inverse: true,
+ Table: authroles.TokenTable,
+ Columns: []string{authroles.TokenColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ _node = &AuthRoles{config: aruo.config}
+ _spec.Assign = _node.assignValues
+ _spec.ScanValues = _node.scanValues
+ if err = sqlgraph.UpdateNode(ctx, aruo.driver, _spec); err != nil {
+ if _, ok := err.(*sqlgraph.NotFoundError); ok {
+ err = &NotFoundError{authroles.Label}
+ } else if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ return nil, err
+ }
+ aruo.mutation.done = true
+ return _node, nil
+}
diff --git a/backend/internal/data/ent/authtokens.go b/backend/internal/data/ent/authtokens.go
index 6e8e53d..14299ba 100644
--- a/backend/internal/data/ent/authtokens.go
+++ b/backend/internal/data/ent/authtokens.go
@@ -7,8 +7,10 @@ import (
"strings"
"time"
+ "entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
)
@@ -30,15 +32,18 @@ type AuthTokens struct {
// The values are being populated by the AuthTokensQuery when eager-loading is set.
Edges AuthTokensEdges `json:"edges"`
user_auth_tokens *uuid.UUID
+ selectValues sql.SelectValues
}
// AuthTokensEdges holds the relations/edges for other nodes in the graph.
type AuthTokensEdges struct {
// User holds the value of the user edge.
User *User `json:"user,omitempty"`
+ // Roles holds the value of the roles edge.
+ Roles *AuthRoles `json:"roles,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
- loadedTypes [1]bool
+ loadedTypes [2]bool
}
// UserOrErr returns the User value or an error if the edge
@@ -54,6 +59,19 @@ func (e AuthTokensEdges) UserOrErr() (*User, error) {
return nil, &NotLoadedError{edge: "user"}
}
+// RolesOrErr returns the Roles value or an error if the edge
+// was not loaded in eager-loading, or loaded but was not found.
+func (e AuthTokensEdges) RolesOrErr() (*AuthRoles, error) {
+ if e.loadedTypes[1] {
+ if e.Roles == nil {
+ // Edge was loaded but was not found.
+ return nil, &NotFoundError{label: authroles.Label}
+ }
+ return e.Roles, nil
+ }
+ return nil, &NotLoadedError{edge: "roles"}
+}
+
// scanValues returns the types for scanning values from sql.Rows.
func (*AuthTokens) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
@@ -68,7 +86,7 @@ func (*AuthTokens) scanValues(columns []string) ([]any, error) {
case authtokens.ForeignKeys[0]: // user_auth_tokens
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
- return nil, fmt.Errorf("unexpected column %q for type AuthTokens", columns[i])
+ values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -119,21 +137,34 @@ func (at *AuthTokens) assignValues(columns []string, values []any) error {
at.user_auth_tokens = new(uuid.UUID)
*at.user_auth_tokens = *value.S.(*uuid.UUID)
}
+ default:
+ at.selectValues.Set(columns[i], values[i])
}
}
return nil
}
+// Value returns the ent.Value that was dynamically selected and assigned to the AuthTokens.
+// This includes values selected through modifiers, order, etc.
+func (at *AuthTokens) Value(name string) (ent.Value, error) {
+ return at.selectValues.Get(name)
+}
+
// QueryUser queries the "user" edge of the AuthTokens entity.
func (at *AuthTokens) QueryUser() *UserQuery {
- return (&AuthTokensClient{config: at.config}).QueryUser(at)
+ return NewAuthTokensClient(at.config).QueryUser(at)
+}
+
+// QueryRoles queries the "roles" edge of the AuthTokens entity.
+func (at *AuthTokens) QueryRoles() *AuthRolesQuery {
+ return NewAuthTokensClient(at.config).QueryRoles(at)
}
// Update returns a builder for updating this AuthTokens.
// Note that you need to call AuthTokens.Unwrap() before calling this method if this AuthTokens
// was returned from a transaction, and the transaction was committed or rolled back.
func (at *AuthTokens) Update() *AuthTokensUpdateOne {
- return (&AuthTokensClient{config: at.config}).UpdateOne(at)
+ return NewAuthTokensClient(at.config).UpdateOne(at)
}
// Unwrap unwraps the AuthTokens entity that was returned from a transaction after it was closed,
@@ -169,9 +200,3 @@ func (at *AuthTokens) String() string {
// AuthTokensSlice is a parsable slice of AuthTokens.
type AuthTokensSlice []*AuthTokens
-
-func (at AuthTokensSlice) config(cfg config) {
- for _i := range at {
- at[_i].config = cfg
- }
-}
diff --git a/backend/internal/data/ent/authtokens/authtokens.go b/backend/internal/data/ent/authtokens/authtokens.go
index af22805..ff555df 100644
--- a/backend/internal/data/ent/authtokens/authtokens.go
+++ b/backend/internal/data/ent/authtokens/authtokens.go
@@ -5,6 +5,8 @@ package authtokens
import (
"time"
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -23,6 +25,8 @@ const (
FieldExpiresAt = "expires_at"
// EdgeUser holds the string denoting the user edge name in mutations.
EdgeUser = "user"
+ // EdgeRoles holds the string denoting the roles edge name in mutations.
+ EdgeRoles = "roles"
// Table holds the table name of the authtokens in the database.
Table = "auth_tokens"
// UserTable is the table that holds the user relation/edge.
@@ -32,6 +36,13 @@ const (
UserInverseTable = "users"
// UserColumn is the table column denoting the user relation/edge.
UserColumn = "user_auth_tokens"
+ // RolesTable is the table that holds the roles relation/edge.
+ RolesTable = "auth_roles"
+ // RolesInverseTable is the table name for the AuthRoles entity.
+ // It exists in this package in order to avoid circular dependency with the "authroles" package.
+ RolesInverseTable = "auth_roles"
+ // RolesColumn is the table column denoting the roles relation/edge.
+ RolesColumn = "auth_tokens_roles"
)
// Columns holds all SQL columns for authtokens fields.
@@ -76,3 +87,54 @@ var (
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)
+
+// OrderOption defines the ordering options for the AuthTokens queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByExpiresAt orders the results by the expires_at field.
+func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
+}
+
+// ByUserField orders the results by user field.
+func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
+ }
+}
+
+// ByRolesField orders the results by roles field.
+func ByRolesField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newRolesStep(), sql.OrderByField(field, opts...))
+ }
+}
+func newUserStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(UserInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
+ )
+}
+func newRolesStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(RolesInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2O, false, RolesTable, RolesColumn),
+ )
+}
diff --git a/backend/internal/data/ent/authtokens/where.go b/backend/internal/data/ent/authtokens/where.go
index 5ef7df9..d3642d8 100644
--- a/backend/internal/data/ent/authtokens/where.go
+++ b/backend/internal/data/ent/authtokens/where.go
@@ -13,357 +13,227 @@ import (
// ID filters vertices based on their ID field.
func ID(id uuid.UUID) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.AuthTokens(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id uuid.UUID) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.AuthTokens(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id uuid.UUID) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldID), id))
- })
+ return predicate.AuthTokens(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...uuid.UUID) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.In(s.C(FieldID), v...))
- })
+ return predicate.AuthTokens(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...uuid.UUID) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.NotIn(s.C(FieldID), v...))
- })
+ return predicate.AuthTokens(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id uuid.UUID) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldID), id))
- })
+ return predicate.AuthTokens(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id uuid.UUID) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldID), id))
- })
+ return predicate.AuthTokens(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id uuid.UUID) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldID), id))
- })
+ return predicate.AuthTokens(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id uuid.UUID) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldID), id))
- })
+ return predicate.AuthTokens(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldEQ(FieldUpdatedAt, v))
}
// Token applies equality check predicate on the "token" field. It's identical to TokenEQ.
func Token(v []byte) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldToken), v))
- })
+ return predicate.AuthTokens(sql.FieldEQ(FieldToken, v))
}
// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ.
func ExpiresAt(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldExpiresAt), v))
- })
+ return predicate.AuthTokens(sql.FieldEQ(FieldExpiresAt, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.AuthTokens {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldCreatedAt), v...))
- })
+ return predicate.AuthTokens(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.AuthTokens {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
- })
+ return predicate.AuthTokens(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldCreatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldCreatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.AuthTokens {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.AuthTokens(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.AuthTokens {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.AuthTokens(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.AuthTokens(sql.FieldLTE(FieldUpdatedAt, v))
}
// TokenEQ applies the EQ predicate on the "token" field.
func TokenEQ(v []byte) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldToken), v))
- })
+ return predicate.AuthTokens(sql.FieldEQ(FieldToken, v))
}
// TokenNEQ applies the NEQ predicate on the "token" field.
func TokenNEQ(v []byte) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldToken), v))
- })
+ return predicate.AuthTokens(sql.FieldNEQ(FieldToken, v))
}
// TokenIn applies the In predicate on the "token" field.
func TokenIn(vs ...[]byte) predicate.AuthTokens {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldToken), v...))
- })
+ return predicate.AuthTokens(sql.FieldIn(FieldToken, vs...))
}
// TokenNotIn applies the NotIn predicate on the "token" field.
func TokenNotIn(vs ...[]byte) predicate.AuthTokens {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldToken), v...))
- })
+ return predicate.AuthTokens(sql.FieldNotIn(FieldToken, vs...))
}
// TokenGT applies the GT predicate on the "token" field.
func TokenGT(v []byte) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldToken), v))
- })
+ return predicate.AuthTokens(sql.FieldGT(FieldToken, v))
}
// TokenGTE applies the GTE predicate on the "token" field.
func TokenGTE(v []byte) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldToken), v))
- })
+ return predicate.AuthTokens(sql.FieldGTE(FieldToken, v))
}
// TokenLT applies the LT predicate on the "token" field.
func TokenLT(v []byte) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldToken), v))
- })
+ return predicate.AuthTokens(sql.FieldLT(FieldToken, v))
}
// TokenLTE applies the LTE predicate on the "token" field.
func TokenLTE(v []byte) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldToken), v))
- })
+ return predicate.AuthTokens(sql.FieldLTE(FieldToken, v))
}
// ExpiresAtEQ applies the EQ predicate on the "expires_at" field.
func ExpiresAtEQ(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldExpiresAt), v))
- })
+ return predicate.AuthTokens(sql.FieldEQ(FieldExpiresAt, v))
}
// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field.
func ExpiresAtNEQ(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldExpiresAt), v))
- })
+ return predicate.AuthTokens(sql.FieldNEQ(FieldExpiresAt, v))
}
// ExpiresAtIn applies the In predicate on the "expires_at" field.
func ExpiresAtIn(vs ...time.Time) predicate.AuthTokens {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldExpiresAt), v...))
- })
+ return predicate.AuthTokens(sql.FieldIn(FieldExpiresAt, vs...))
}
// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field.
func ExpiresAtNotIn(vs ...time.Time) predicate.AuthTokens {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldExpiresAt), v...))
- })
+ return predicate.AuthTokens(sql.FieldNotIn(FieldExpiresAt, vs...))
}
// ExpiresAtGT applies the GT predicate on the "expires_at" field.
func ExpiresAtGT(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldExpiresAt), v))
- })
+ return predicate.AuthTokens(sql.FieldGT(FieldExpiresAt, v))
}
// ExpiresAtGTE applies the GTE predicate on the "expires_at" field.
func ExpiresAtGTE(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldExpiresAt), v))
- })
+ return predicate.AuthTokens(sql.FieldGTE(FieldExpiresAt, v))
}
// ExpiresAtLT applies the LT predicate on the "expires_at" field.
func ExpiresAtLT(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldExpiresAt), v))
- })
+ return predicate.AuthTokens(sql.FieldLT(FieldExpiresAt, v))
}
// ExpiresAtLTE applies the LTE predicate on the "expires_at" field.
func ExpiresAtLTE(v time.Time) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldExpiresAt), v))
- })
+ return predicate.AuthTokens(sql.FieldLTE(FieldExpiresAt, v))
}
// HasUser applies the HasEdge predicate on the "user" edge.
@@ -371,7 +241,6 @@ func HasUser() predicate.AuthTokens {
return predicate.AuthTokens(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(UserTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -380,12 +249,31 @@ func HasUser() predicate.AuthTokens {
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
func HasUserWith(preds ...predicate.User) predicate.AuthTokens {
+ return predicate.AuthTokens(func(s *sql.Selector) {
+ step := newUserStep()
+ sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+ for _, p := range preds {
+ p(s)
+ }
+ })
+ })
+}
+
+// HasRoles applies the HasEdge predicate on the "roles" edge.
+func HasRoles() predicate.AuthTokens {
return predicate.AuthTokens(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(UserInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
+ sqlgraph.Edge(sqlgraph.O2O, false, RolesTable, RolesColumn),
)
+ sqlgraph.HasNeighbors(s, step)
+ })
+}
+
+// HasRolesWith applies the HasEdge predicate on the "roles" edge with a given conditions (other predicates).
+func HasRolesWith(preds ...predicate.AuthRoles) predicate.AuthTokens {
+ return predicate.AuthTokens(func(s *sql.Selector) {
+ step := newRolesStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -396,32 +284,15 @@ func HasUserWith(preds ...predicate.User) predicate.AuthTokens {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.AuthTokens) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for _, p := range predicates {
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.AuthTokens(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.AuthTokens) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for i, p := range predicates {
- if i > 0 {
- s1.Or()
- }
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.AuthTokens(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.AuthTokens) predicate.AuthTokens {
- return predicate.AuthTokens(func(s *sql.Selector) {
- p(s.Not())
- })
+ return predicate.AuthTokens(sql.NotPredicates(p))
}
diff --git a/backend/internal/data/ent/authtokens_create.go b/backend/internal/data/ent/authtokens_create.go
index e05e849..afddb3b 100644
--- a/backend/internal/data/ent/authtokens_create.go
+++ b/backend/internal/data/ent/authtokens_create.go
@@ -11,6 +11,7 @@ import (
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
)
@@ -103,6 +104,25 @@ func (atc *AuthTokensCreate) SetUser(u *User) *AuthTokensCreate {
return atc.SetUserID(u.ID)
}
+// SetRolesID sets the "roles" edge to the AuthRoles entity by ID.
+func (atc *AuthTokensCreate) SetRolesID(id int) *AuthTokensCreate {
+ atc.mutation.SetRolesID(id)
+ return atc
+}
+
+// SetNillableRolesID sets the "roles" edge to the AuthRoles entity by ID if the given value is not nil.
+func (atc *AuthTokensCreate) SetNillableRolesID(id *int) *AuthTokensCreate {
+ if id != nil {
+ atc = atc.SetRolesID(*id)
+ }
+ return atc
+}
+
+// SetRoles sets the "roles" edge to the AuthRoles entity.
+func (atc *AuthTokensCreate) SetRoles(a *AuthRoles) *AuthTokensCreate {
+ return atc.SetRolesID(a.ID)
+}
+
// Mutation returns the AuthTokensMutation object of the builder.
func (atc *AuthTokensCreate) Mutation() *AuthTokensMutation {
return atc.mutation
@@ -110,50 +130,8 @@ func (atc *AuthTokensCreate) Mutation() *AuthTokensMutation {
// Save creates the AuthTokens in the database.
func (atc *AuthTokensCreate) Save(ctx context.Context) (*AuthTokens, error) {
- var (
- err error
- node *AuthTokens
- )
atc.defaults()
- if len(atc.hooks) == 0 {
- if err = atc.check(); err != nil {
- return nil, err
- }
- node, err = atc.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*AuthTokensMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = atc.check(); err != nil {
- return nil, err
- }
- atc.mutation = mutation
- if node, err = atc.sqlSave(ctx); err != nil {
- return nil, err
- }
- mutation.id = &node.ID
- mutation.done = true
- return node, err
- })
- for i := len(atc.hooks) - 1; i >= 0; i-- {
- if atc.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = atc.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, atc.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*AuthTokens)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from AuthTokensMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, atc.sqlSave, atc.mutation, atc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -216,6 +194,9 @@ func (atc *AuthTokensCreate) check() error {
}
func (atc *AuthTokensCreate) sqlSave(ctx context.Context) (*AuthTokens, error) {
+ if err := atc.check(); err != nil {
+ return nil, err
+ }
_node, _spec := atc.createSpec()
if err := sqlgraph.CreateNode(ctx, atc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@@ -230,54 +211,34 @@ func (atc *AuthTokensCreate) sqlSave(ctx context.Context) (*AuthTokens, error) {
return nil, err
}
}
+ atc.mutation.id = &_node.ID
+ atc.mutation.done = true
return _node, nil
}
func (atc *AuthTokensCreate) createSpec() (*AuthTokens, *sqlgraph.CreateSpec) {
var (
_node = &AuthTokens{config: atc.config}
- _spec = &sqlgraph.CreateSpec{
- Table: authtokens.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: authtokens.FieldID,
- },
- }
+ _spec = sqlgraph.NewCreateSpec(authtokens.Table, sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID))
)
if id, ok := atc.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = &id
}
if value, ok := atc.mutation.CreatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: authtokens.FieldCreatedAt,
- })
+ _spec.SetField(authtokens.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := atc.mutation.UpdatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: authtokens.FieldUpdatedAt,
- })
+ _spec.SetField(authtokens.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := atc.mutation.Token(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeBytes,
- Value: value,
- Column: authtokens.FieldToken,
- })
+ _spec.SetField(authtokens.FieldToken, field.TypeBytes, value)
_node.Token = value
}
if value, ok := atc.mutation.ExpiresAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: authtokens.FieldExpiresAt,
- })
+ _spec.SetField(authtokens.FieldExpiresAt, field.TypeTime, value)
_node.ExpiresAt = value
}
if nodes := atc.mutation.UserIDs(); len(nodes) > 0 {
@@ -288,10 +249,7 @@ func (atc *AuthTokensCreate) createSpec() (*AuthTokens, *sqlgraph.CreateSpec) {
Columns: []string{authtokens.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -300,17 +258,37 @@ func (atc *AuthTokensCreate) createSpec() (*AuthTokens, *sqlgraph.CreateSpec) {
_node.user_auth_tokens = &nodes[0]
_spec.Edges = append(_spec.Edges, edge)
}
+ if nodes := atc.mutation.RolesIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2O,
+ Inverse: false,
+ Table: authtokens.RolesTable,
+ Columns: []string{authtokens.RolesColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges = append(_spec.Edges, edge)
+ }
return _node, _spec
}
// AuthTokensCreateBulk is the builder for creating many AuthTokens entities in bulk.
type AuthTokensCreateBulk struct {
config
+ err error
builders []*AuthTokensCreate
}
// Save creates the AuthTokens entities in the database.
func (atcb *AuthTokensCreateBulk) Save(ctx context.Context) ([]*AuthTokens, error) {
+ if atcb.err != nil {
+ return nil, atcb.err
+ }
specs := make([]*sqlgraph.CreateSpec, len(atcb.builders))
nodes := make([]*AuthTokens, len(atcb.builders))
mutators := make([]Mutator, len(atcb.builders))
@@ -327,8 +305,8 @@ func (atcb *AuthTokensCreateBulk) Save(ctx context.Context) ([]*AuthTokens, erro
return nil, err
}
builder.mutation = mutation
- nodes[i], specs[i] = builder.createSpec()
var err error
+ nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, atcb.builders[i+1].mutation)
} else {
diff --git a/backend/internal/data/ent/authtokens_delete.go b/backend/internal/data/ent/authtokens_delete.go
index 5041362..4c29851 100644
--- a/backend/internal/data/ent/authtokens_delete.go
+++ b/backend/internal/data/ent/authtokens_delete.go
@@ -4,7 +4,6 @@ package ent
import (
"context"
- "fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@@ -28,34 +27,7 @@ func (atd *AuthTokensDelete) Where(ps ...predicate.AuthTokens) *AuthTokensDelete
// Exec executes the deletion query and returns how many vertices were deleted.
func (atd *AuthTokensDelete) Exec(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
- if len(atd.hooks) == 0 {
- affected, err = atd.sqlExec(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*AuthTokensMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- atd.mutation = mutation
- affected, err = atd.sqlExec(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(atd.hooks) - 1; i >= 0; i-- {
- if atd.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = atd.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, atd.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, atd.sqlExec, atd.mutation, atd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@@ -68,15 +40,7 @@ func (atd *AuthTokensDelete) ExecX(ctx context.Context) int {
}
func (atd *AuthTokensDelete) sqlExec(ctx context.Context) (int, error) {
- _spec := &sqlgraph.DeleteSpec{
- Node: &sqlgraph.NodeSpec{
- Table: authtokens.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: authtokens.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewDeleteSpec(authtokens.Table, sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID))
if ps := atd.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -88,6 +52,7 @@ func (atd *AuthTokensDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
+ atd.mutation.done = true
return affected, err
}
@@ -96,6 +61,12 @@ type AuthTokensDeleteOne struct {
atd *AuthTokensDelete
}
+// Where appends a list predicates to the AuthTokensDelete builder.
+func (atdo *AuthTokensDeleteOne) Where(ps ...predicate.AuthTokens) *AuthTokensDeleteOne {
+ atdo.atd.mutation.Where(ps...)
+ return atdo
+}
+
// Exec executes the deletion query.
func (atdo *AuthTokensDeleteOne) Exec(ctx context.Context) error {
n, err := atdo.atd.Exec(ctx)
@@ -111,5 +82,7 @@ func (atdo *AuthTokensDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (atdo *AuthTokensDeleteOne) ExecX(ctx context.Context) {
- atdo.atd.ExecX(ctx)
+ if err := atdo.Exec(ctx); err != nil {
+ panic(err)
+ }
}
diff --git a/backend/internal/data/ent/authtokens_query.go b/backend/internal/data/ent/authtokens_query.go
index 77c13e2..238ab88 100644
--- a/backend/internal/data/ent/authtokens_query.go
+++ b/backend/internal/data/ent/authtokens_query.go
@@ -4,6 +4,7 @@ package ent
import (
"context"
+ "database/sql/driver"
"fmt"
"math"
@@ -11,6 +12,7 @@ import (
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
@@ -19,13 +21,12 @@ import (
// AuthTokensQuery is the builder for querying AuthTokens entities.
type AuthTokensQuery struct {
config
- limit *int
- offset *int
- unique *bool
- order []OrderFunc
- fields []string
+ ctx *QueryContext
+ order []authtokens.OrderOption
+ inters []Interceptor
predicates []predicate.AuthTokens
withUser *UserQuery
+ withRoles *AuthRolesQuery
withFKs bool
// intermediate query (i.e. traversal path).
sql *sql.Selector
@@ -38,34 +39,34 @@ func (atq *AuthTokensQuery) Where(ps ...predicate.AuthTokens) *AuthTokensQuery {
return atq
}
-// Limit adds a limit step to the query.
+// Limit the number of records to be returned by this query.
func (atq *AuthTokensQuery) Limit(limit int) *AuthTokensQuery {
- atq.limit = &limit
+ atq.ctx.Limit = &limit
return atq
}
-// Offset adds an offset step to the query.
+// Offset to start from.
func (atq *AuthTokensQuery) Offset(offset int) *AuthTokensQuery {
- atq.offset = &offset
+ atq.ctx.Offset = &offset
return atq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (atq *AuthTokensQuery) Unique(unique bool) *AuthTokensQuery {
- atq.unique = &unique
+ atq.ctx.Unique = &unique
return atq
}
-// Order adds an order step to the query.
-func (atq *AuthTokensQuery) Order(o ...OrderFunc) *AuthTokensQuery {
+// Order specifies how the records should be ordered.
+func (atq *AuthTokensQuery) Order(o ...authtokens.OrderOption) *AuthTokensQuery {
atq.order = append(atq.order, o...)
return atq
}
// QueryUser chains the current query on the "user" edge.
func (atq *AuthTokensQuery) QueryUser() *UserQuery {
- query := &UserQuery{config: atq.config}
+ query := (&UserClient{config: atq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := atq.prepareQuery(ctx); err != nil {
return nil, err
@@ -85,10 +86,32 @@ func (atq *AuthTokensQuery) QueryUser() *UserQuery {
return query
}
+// QueryRoles chains the current query on the "roles" edge.
+func (atq *AuthTokensQuery) QueryRoles() *AuthRolesQuery {
+ query := (&AuthRolesClient{config: atq.config}).Query()
+ query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+ if err := atq.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ selector := atq.sqlQuery(ctx)
+ if err := selector.Err(); err != nil {
+ return nil, err
+ }
+ step := sqlgraph.NewStep(
+ sqlgraph.From(authtokens.Table, authtokens.FieldID, selector),
+ sqlgraph.To(authroles.Table, authroles.FieldID),
+ sqlgraph.Edge(sqlgraph.O2O, false, authtokens.RolesTable, authtokens.RolesColumn),
+ )
+ fromU = sqlgraph.SetNeighbors(atq.driver.Dialect(), step)
+ return fromU, nil
+ }
+ return query
+}
+
// First returns the first AuthTokens entity from the query.
// Returns a *NotFoundError when no AuthTokens was found.
func (atq *AuthTokensQuery) First(ctx context.Context) (*AuthTokens, error) {
- nodes, err := atq.Limit(1).All(ctx)
+ nodes, err := atq.Limit(1).All(setContextOp(ctx, atq.ctx, "First"))
if err != nil {
return nil, err
}
@@ -111,7 +134,7 @@ func (atq *AuthTokensQuery) FirstX(ctx context.Context) *AuthTokens {
// Returns a *NotFoundError when no AuthTokens ID was found.
func (atq *AuthTokensQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = atq.Limit(1).IDs(ctx); err != nil {
+ if ids, err = atq.Limit(1).IDs(setContextOp(ctx, atq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@@ -134,7 +157,7 @@ func (atq *AuthTokensQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one AuthTokens entity is found.
// Returns a *NotFoundError when no AuthTokens entities are found.
func (atq *AuthTokensQuery) Only(ctx context.Context) (*AuthTokens, error) {
- nodes, err := atq.Limit(2).All(ctx)
+ nodes, err := atq.Limit(2).All(setContextOp(ctx, atq.ctx, "Only"))
if err != nil {
return nil, err
}
@@ -162,7 +185,7 @@ func (atq *AuthTokensQuery) OnlyX(ctx context.Context) *AuthTokens {
// Returns a *NotFoundError when no entities are found.
func (atq *AuthTokensQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = atq.Limit(2).IDs(ctx); err != nil {
+ if ids, err = atq.Limit(2).IDs(setContextOp(ctx, atq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@@ -187,10 +210,12 @@ func (atq *AuthTokensQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of AuthTokensSlice.
func (atq *AuthTokensQuery) All(ctx context.Context) ([]*AuthTokens, error) {
+ ctx = setContextOp(ctx, atq.ctx, "All")
if err := atq.prepareQuery(ctx); err != nil {
return nil, err
}
- return atq.sqlAll(ctx)
+ qr := querierAll[[]*AuthTokens, *AuthTokensQuery]()
+ return withInterceptors[[]*AuthTokens](ctx, atq, qr, atq.inters)
}
// AllX is like All, but panics if an error occurs.
@@ -203,9 +228,12 @@ func (atq *AuthTokensQuery) AllX(ctx context.Context) []*AuthTokens {
}
// IDs executes the query and returns a list of AuthTokens IDs.
-func (atq *AuthTokensQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
- var ids []uuid.UUID
- if err := atq.Select(authtokens.FieldID).Scan(ctx, &ids); err != nil {
+func (atq *AuthTokensQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
+ if atq.ctx.Unique == nil && atq.path != nil {
+ atq.Unique(true)
+ }
+ ctx = setContextOp(ctx, atq.ctx, "IDs")
+ if err = atq.Select(authtokens.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@@ -222,10 +250,11 @@ func (atq *AuthTokensQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query.
func (atq *AuthTokensQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, atq.ctx, "Count")
if err := atq.prepareQuery(ctx); err != nil {
return 0, err
}
- return atq.sqlCount(ctx)
+ return withInterceptors[int](ctx, atq, querierCount[*AuthTokensQuery](), atq.inters)
}
// CountX is like Count, but panics if an error occurs.
@@ -239,10 +268,15 @@ func (atq *AuthTokensQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (atq *AuthTokensQuery) Exist(ctx context.Context) (bool, error) {
- if err := atq.prepareQuery(ctx); err != nil {
- return false, err
+ ctx = setContextOp(ctx, atq.ctx, "Exist")
+ switch _, err := atq.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
}
- return atq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@@ -262,22 +296,22 @@ func (atq *AuthTokensQuery) Clone() *AuthTokensQuery {
}
return &AuthTokensQuery{
config: atq.config,
- limit: atq.limit,
- offset: atq.offset,
- order: append([]OrderFunc{}, atq.order...),
+ ctx: atq.ctx.Clone(),
+ order: append([]authtokens.OrderOption{}, atq.order...),
+ inters: append([]Interceptor{}, atq.inters...),
predicates: append([]predicate.AuthTokens{}, atq.predicates...),
withUser: atq.withUser.Clone(),
+ withRoles: atq.withRoles.Clone(),
// clone intermediate query.
- sql: atq.sql.Clone(),
- path: atq.path,
- unique: atq.unique,
+ sql: atq.sql.Clone(),
+ path: atq.path,
}
}
// WithUser tells the query-builder to eager-load the nodes that are connected to
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
func (atq *AuthTokensQuery) WithUser(opts ...func(*UserQuery)) *AuthTokensQuery {
- query := &UserQuery{config: atq.config}
+ query := (&UserClient{config: atq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -285,6 +319,17 @@ func (atq *AuthTokensQuery) WithUser(opts ...func(*UserQuery)) *AuthTokensQuery
return atq
}
+// WithRoles tells the query-builder to eager-load the nodes that are connected to
+// the "roles" edge. The optional arguments are used to configure the query builder of the edge.
+func (atq *AuthTokensQuery) WithRoles(opts ...func(*AuthRolesQuery)) *AuthTokensQuery {
+ query := (&AuthRolesClient{config: atq.config}).Query()
+ for _, opt := range opts {
+ opt(query)
+ }
+ atq.withRoles = query
+ return atq
+}
+
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
@@ -300,16 +345,11 @@ func (atq *AuthTokensQuery) WithUser(opts ...func(*UserQuery)) *AuthTokensQuery
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (atq *AuthTokensQuery) GroupBy(field string, fields ...string) *AuthTokensGroupBy {
- grbuild := &AuthTokensGroupBy{config: atq.config}
- grbuild.fields = append([]string{field}, fields...)
- grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
- if err := atq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- return atq.sqlQuery(ctx), nil
- }
+ atq.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &AuthTokensGroupBy{build: atq}
+ grbuild.flds = &atq.ctx.Fields
grbuild.label = authtokens.Label
- grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
+ grbuild.scan = grbuild.Scan
return grbuild
}
@@ -326,15 +366,30 @@ func (atq *AuthTokensQuery) GroupBy(field string, fields ...string) *AuthTokensG
// Select(authtokens.FieldCreatedAt).
// Scan(ctx, &v)
func (atq *AuthTokensQuery) Select(fields ...string) *AuthTokensSelect {
- atq.fields = append(atq.fields, fields...)
- selbuild := &AuthTokensSelect{AuthTokensQuery: atq}
- selbuild.label = authtokens.Label
- selbuild.flds, selbuild.scan = &atq.fields, selbuild.Scan
- return selbuild
+ atq.ctx.Fields = append(atq.ctx.Fields, fields...)
+ sbuild := &AuthTokensSelect{AuthTokensQuery: atq}
+ sbuild.label = authtokens.Label
+ sbuild.flds, sbuild.scan = &atq.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a AuthTokensSelect configured with the given aggregations.
+func (atq *AuthTokensQuery) Aggregate(fns ...AggregateFunc) *AuthTokensSelect {
+ return atq.Select().Aggregate(fns...)
}
func (atq *AuthTokensQuery) prepareQuery(ctx context.Context) error {
- for _, f := range atq.fields {
+ for _, inter := range atq.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, atq); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range atq.ctx.Fields {
if !authtokens.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@@ -354,8 +409,9 @@ func (atq *AuthTokensQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*
nodes = []*AuthTokens{}
withFKs = atq.withFKs
_spec = atq.querySpec()
- loadedTypes = [1]bool{
+ loadedTypes = [2]bool{
atq.withUser != nil,
+ atq.withRoles != nil,
}
)
if atq.withUser != nil {
@@ -388,6 +444,12 @@ func (atq *AuthTokensQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*
return nil, err
}
}
+ if query := atq.withRoles; query != nil {
+ if err := atq.loadRoles(ctx, query, nodes, nil,
+ func(n *AuthTokens, e *AuthRoles) { n.Edges.Roles = e }); err != nil {
+ return nil, err
+ }
+ }
return nodes, nil
}
@@ -404,6 +466,9 @@ func (atq *AuthTokensQuery) loadUser(ctx context.Context, query *UserQuery, node
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
+ if len(ids) == 0 {
+ return nil
+ }
query.Where(user.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@@ -420,44 +485,53 @@ func (atq *AuthTokensQuery) loadUser(ctx context.Context, query *UserQuery, node
}
return nil
}
+func (atq *AuthTokensQuery) loadRoles(ctx context.Context, query *AuthRolesQuery, nodes []*AuthTokens, init func(*AuthTokens), assign func(*AuthTokens, *AuthRoles)) error {
+ fks := make([]driver.Value, 0, len(nodes))
+ nodeids := make(map[uuid.UUID]*AuthTokens)
+ for i := range nodes {
+ fks = append(fks, nodes[i].ID)
+ nodeids[nodes[i].ID] = nodes[i]
+ }
+ query.withFKs = true
+ query.Where(predicate.AuthRoles(func(s *sql.Selector) {
+ s.Where(sql.InValues(s.C(authtokens.RolesColumn), fks...))
+ }))
+ neighbors, err := query.All(ctx)
+ if err != nil {
+ return err
+ }
+ for _, n := range neighbors {
+ fk := n.auth_tokens_roles
+ if fk == nil {
+ return fmt.Errorf(`foreign-key "auth_tokens_roles" is nil for node %v`, n.ID)
+ }
+ node, ok := nodeids[*fk]
+ if !ok {
+ return fmt.Errorf(`unexpected referenced foreign-key "auth_tokens_roles" returned %v for node %v`, *fk, n.ID)
+ }
+ assign(node, n)
+ }
+ return nil
+}
func (atq *AuthTokensQuery) sqlCount(ctx context.Context) (int, error) {
_spec := atq.querySpec()
- _spec.Node.Columns = atq.fields
- if len(atq.fields) > 0 {
- _spec.Unique = atq.unique != nil && *atq.unique
+ _spec.Node.Columns = atq.ctx.Fields
+ if len(atq.ctx.Fields) > 0 {
+ _spec.Unique = atq.ctx.Unique != nil && *atq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, atq.driver, _spec)
}
-func (atq *AuthTokensQuery) sqlExist(ctx context.Context) (bool, error) {
- switch _, err := atq.FirstID(ctx); {
- case IsNotFound(err):
- return false, nil
- case err != nil:
- return false, fmt.Errorf("ent: check existence: %w", err)
- default:
- return true, nil
- }
-}
-
func (atq *AuthTokensQuery) querySpec() *sqlgraph.QuerySpec {
- _spec := &sqlgraph.QuerySpec{
- Node: &sqlgraph.NodeSpec{
- Table: authtokens.Table,
- Columns: authtokens.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: authtokens.FieldID,
- },
- },
- From: atq.sql,
- Unique: true,
- }
- if unique := atq.unique; unique != nil {
+ _spec := sqlgraph.NewQuerySpec(authtokens.Table, authtokens.Columns, sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID))
+ _spec.From = atq.sql
+ if unique := atq.ctx.Unique; unique != nil {
_spec.Unique = *unique
+ } else if atq.path != nil {
+ _spec.Unique = true
}
- if fields := atq.fields; len(fields) > 0 {
+ if fields := atq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, authtokens.FieldID)
for i := range fields {
@@ -473,10 +547,10 @@ func (atq *AuthTokensQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
- if limit := atq.limit; limit != nil {
+ if limit := atq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
- if offset := atq.offset; offset != nil {
+ if offset := atq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := atq.order; len(ps) > 0 {
@@ -492,7 +566,7 @@ func (atq *AuthTokensQuery) querySpec() *sqlgraph.QuerySpec {
func (atq *AuthTokensQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(atq.driver.Dialect())
t1 := builder.Table(authtokens.Table)
- columns := atq.fields
+ columns := atq.ctx.Fields
if len(columns) == 0 {
columns = authtokens.Columns
}
@@ -501,7 +575,7 @@ func (atq *AuthTokensQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = atq.sql
selector.Select(selector.Columns(columns...)...)
}
- if atq.unique != nil && *atq.unique {
+ if atq.ctx.Unique != nil && *atq.ctx.Unique {
selector.Distinct()
}
for _, p := range atq.predicates {
@@ -510,12 +584,12 @@ func (atq *AuthTokensQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range atq.order {
p(selector)
}
- if offset := atq.offset; offset != nil {
+ if offset := atq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
- if limit := atq.limit; limit != nil {
+ if limit := atq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@@ -523,13 +597,8 @@ func (atq *AuthTokensQuery) sqlQuery(ctx context.Context) *sql.Selector {
// AuthTokensGroupBy is the group-by builder for AuthTokens entities.
type AuthTokensGroupBy struct {
- config
selector
- fields []string
- fns []AggregateFunc
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
- path func(context.Context) (*sql.Selector, error)
+ build *AuthTokensQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@@ -538,74 +607,77 @@ func (atgb *AuthTokensGroupBy) Aggregate(fns ...AggregateFunc) *AuthTokensGroupB
return atgb
}
-// Scan applies the group-by query and scans the result into the given value.
+// Scan applies the selector query and scans the result into the given value.
func (atgb *AuthTokensGroupBy) Scan(ctx context.Context, v any) error {
- query, err := atgb.path(ctx)
- if err != nil {
+ ctx = setContextOp(ctx, atgb.build.ctx, "GroupBy")
+ if err := atgb.build.prepareQuery(ctx); err != nil {
return err
}
- atgb.sql = query
- return atgb.sqlScan(ctx, v)
+ return scanWithInterceptors[*AuthTokensQuery, *AuthTokensGroupBy](ctx, atgb.build, atgb, atgb.build.inters, v)
}
-func (atgb *AuthTokensGroupBy) sqlScan(ctx context.Context, v any) error {
- for _, f := range atgb.fields {
- if !authtokens.ValidColumn(f) {
- return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
- }
- }
- selector := atgb.sqlQuery()
- if err := selector.Err(); err != nil {
- return err
- }
- rows := &sql.Rows{}
- query, args := selector.Query()
- if err := atgb.driver.Query(ctx, query, args, rows); err != nil {
- return err
- }
- defer rows.Close()
- return sql.ScanSlice(rows, v)
-}
-
-func (atgb *AuthTokensGroupBy) sqlQuery() *sql.Selector {
- selector := atgb.sql.Select()
+func (atgb *AuthTokensGroupBy) sqlScan(ctx context.Context, root *AuthTokensQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(atgb.fns))
for _, fn := range atgb.fns {
aggregation = append(aggregation, fn(selector))
}
- // If no columns were selected in a custom aggregation function, the default
- // selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
- columns := make([]string, 0, len(atgb.fields)+len(atgb.fns))
- for _, f := range atgb.fields {
+ columns := make([]string, 0, len(*atgb.flds)+len(atgb.fns))
+ for _, f := range *atgb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
- return selector.GroupBy(selector.Columns(atgb.fields...)...)
+ selector.GroupBy(selector.Columns(*atgb.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := atgb.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
}
// AuthTokensSelect is the builder for selecting fields of AuthTokens entities.
type AuthTokensSelect struct {
*AuthTokensQuery
selector
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (ats *AuthTokensSelect) Aggregate(fns ...AggregateFunc) *AuthTokensSelect {
+ ats.fns = append(ats.fns, fns...)
+ return ats
}
// Scan applies the selector query and scans the result into the given value.
func (ats *AuthTokensSelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, ats.ctx, "Select")
if err := ats.prepareQuery(ctx); err != nil {
return err
}
- ats.sql = ats.AuthTokensQuery.sqlQuery(ctx)
- return ats.sqlScan(ctx, v)
+ return scanWithInterceptors[*AuthTokensQuery, *AuthTokensSelect](ctx, ats.AuthTokensQuery, ats, ats.inters, v)
}
-func (ats *AuthTokensSelect) sqlScan(ctx context.Context, v any) error {
+func (ats *AuthTokensSelect) sqlScan(ctx context.Context, root *AuthTokensQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(ats.fns))
+ for _, fn := range ats.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*ats.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
rows := &sql.Rows{}
- query, args := ats.sql.Query()
+ query, args := selector.Query()
if err := ats.driver.Query(ctx, query, args, rows); err != nil {
return err
}
diff --git a/backend/internal/data/ent/authtokens_update.go b/backend/internal/data/ent/authtokens_update.go
index 7d7c541..776888e 100644
--- a/backend/internal/data/ent/authtokens_update.go
+++ b/backend/internal/data/ent/authtokens_update.go
@@ -12,6 +12,7 @@ import (
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
@@ -75,6 +76,25 @@ func (atu *AuthTokensUpdate) SetUser(u *User) *AuthTokensUpdate {
return atu.SetUserID(u.ID)
}
+// SetRolesID sets the "roles" edge to the AuthRoles entity by ID.
+func (atu *AuthTokensUpdate) SetRolesID(id int) *AuthTokensUpdate {
+ atu.mutation.SetRolesID(id)
+ return atu
+}
+
+// SetNillableRolesID sets the "roles" edge to the AuthRoles entity by ID if the given value is not nil.
+func (atu *AuthTokensUpdate) SetNillableRolesID(id *int) *AuthTokensUpdate {
+ if id != nil {
+ atu = atu.SetRolesID(*id)
+ }
+ return atu
+}
+
+// SetRoles sets the "roles" edge to the AuthRoles entity.
+func (atu *AuthTokensUpdate) SetRoles(a *AuthRoles) *AuthTokensUpdate {
+ return atu.SetRolesID(a.ID)
+}
+
// Mutation returns the AuthTokensMutation object of the builder.
func (atu *AuthTokensUpdate) Mutation() *AuthTokensMutation {
return atu.mutation
@@ -86,37 +106,16 @@ func (atu *AuthTokensUpdate) ClearUser() *AuthTokensUpdate {
return atu
}
+// ClearRoles clears the "roles" edge to the AuthRoles entity.
+func (atu *AuthTokensUpdate) ClearRoles() *AuthTokensUpdate {
+ atu.mutation.ClearRoles()
+ return atu
+}
+
// Save executes the query and returns the number of nodes affected by the update operation.
func (atu *AuthTokensUpdate) Save(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
atu.defaults()
- if len(atu.hooks) == 0 {
- affected, err = atu.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*AuthTokensMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- atu.mutation = mutation
- affected, err = atu.sqlSave(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(atu.hooks) - 1; i >= 0; i-- {
- if atu.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = atu.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, atu.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, atu.sqlSave, atu.mutation, atu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -150,16 +149,7 @@ func (atu *AuthTokensUpdate) defaults() {
}
func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: authtokens.Table,
- Columns: authtokens.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: authtokens.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewUpdateSpec(authtokens.Table, authtokens.Columns, sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID))
if ps := atu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -168,25 +158,13 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := atu.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: authtokens.FieldUpdatedAt,
- })
+ _spec.SetField(authtokens.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := atu.mutation.Token(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBytes,
- Value: value,
- Column: authtokens.FieldToken,
- })
+ _spec.SetField(authtokens.FieldToken, field.TypeBytes, value)
}
if value, ok := atu.mutation.ExpiresAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: authtokens.FieldExpiresAt,
- })
+ _spec.SetField(authtokens.FieldExpiresAt, field.TypeTime, value)
}
if atu.mutation.UserCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -196,10 +174,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{authtokens.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -212,10 +187,36 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{authtokens.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if atu.mutation.RolesCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2O,
+ Inverse: false,
+ Table: authtokens.RolesTable,
+ Columns: []string{authtokens.RolesColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := atu.mutation.RolesIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2O,
+ Inverse: false,
+ Table: authtokens.RolesTable,
+ Columns: []string{authtokens.RolesColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@@ -231,6 +232,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
+ atu.mutation.done = true
return n, nil
}
@@ -287,6 +289,25 @@ func (atuo *AuthTokensUpdateOne) SetUser(u *User) *AuthTokensUpdateOne {
return atuo.SetUserID(u.ID)
}
+// SetRolesID sets the "roles" edge to the AuthRoles entity by ID.
+func (atuo *AuthTokensUpdateOne) SetRolesID(id int) *AuthTokensUpdateOne {
+ atuo.mutation.SetRolesID(id)
+ return atuo
+}
+
+// SetNillableRolesID sets the "roles" edge to the AuthRoles entity by ID if the given value is not nil.
+func (atuo *AuthTokensUpdateOne) SetNillableRolesID(id *int) *AuthTokensUpdateOne {
+ if id != nil {
+ atuo = atuo.SetRolesID(*id)
+ }
+ return atuo
+}
+
+// SetRoles sets the "roles" edge to the AuthRoles entity.
+func (atuo *AuthTokensUpdateOne) SetRoles(a *AuthRoles) *AuthTokensUpdateOne {
+ return atuo.SetRolesID(a.ID)
+}
+
// Mutation returns the AuthTokensMutation object of the builder.
func (atuo *AuthTokensUpdateOne) Mutation() *AuthTokensMutation {
return atuo.mutation
@@ -298,6 +319,18 @@ func (atuo *AuthTokensUpdateOne) ClearUser() *AuthTokensUpdateOne {
return atuo
}
+// ClearRoles clears the "roles" edge to the AuthRoles entity.
+func (atuo *AuthTokensUpdateOne) ClearRoles() *AuthTokensUpdateOne {
+ atuo.mutation.ClearRoles()
+ return atuo
+}
+
+// Where appends a list predicates to the AuthTokensUpdate builder.
+func (atuo *AuthTokensUpdateOne) Where(ps ...predicate.AuthTokens) *AuthTokensUpdateOne {
+ atuo.mutation.Where(ps...)
+ return atuo
+}
+
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (atuo *AuthTokensUpdateOne) Select(field string, fields ...string) *AuthTokensUpdateOne {
@@ -307,41 +340,8 @@ func (atuo *AuthTokensUpdateOne) Select(field string, fields ...string) *AuthTok
// Save executes the query and returns the updated AuthTokens entity.
func (atuo *AuthTokensUpdateOne) Save(ctx context.Context) (*AuthTokens, error) {
- var (
- err error
- node *AuthTokens
- )
atuo.defaults()
- if len(atuo.hooks) == 0 {
- node, err = atuo.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*AuthTokensMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- atuo.mutation = mutation
- node, err = atuo.sqlSave(ctx)
- mutation.done = true
- return node, err
- })
- for i := len(atuo.hooks) - 1; i >= 0; i-- {
- if atuo.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = atuo.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, atuo.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*AuthTokens)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from AuthTokensMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, atuo.sqlSave, atuo.mutation, atuo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -375,16 +375,7 @@ func (atuo *AuthTokensUpdateOne) defaults() {
}
func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: authtokens.Table,
- Columns: authtokens.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: authtokens.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewUpdateSpec(authtokens.Table, authtokens.Columns, sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID))
id, ok := atuo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AuthTokens.id" for update`)}
@@ -410,25 +401,13 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens
}
}
if value, ok := atuo.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: authtokens.FieldUpdatedAt,
- })
+ _spec.SetField(authtokens.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := atuo.mutation.Token(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBytes,
- Value: value,
- Column: authtokens.FieldToken,
- })
+ _spec.SetField(authtokens.FieldToken, field.TypeBytes, value)
}
if value, ok := atuo.mutation.ExpiresAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: authtokens.FieldExpiresAt,
- })
+ _spec.SetField(authtokens.FieldExpiresAt, field.TypeTime, value)
}
if atuo.mutation.UserCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -438,10 +417,7 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens
Columns: []string{authtokens.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -454,10 +430,36 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens
Columns: []string{authtokens.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if atuo.mutation.RolesCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2O,
+ Inverse: false,
+ Table: authtokens.RolesTable,
+ Columns: []string{authtokens.RolesColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := atuo.mutation.RolesIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2O,
+ Inverse: false,
+ Table: authtokens.RolesTable,
+ Columns: []string{authtokens.RolesColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@@ -476,5 +478,6 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens
}
return nil, err
}
+ atuo.mutation.done = true
return _node, nil
}
diff --git a/backend/internal/data/ent/client.go b/backend/internal/data/ent/client.go
index 8273e32..2fb9b53 100644
--- a/backend/internal/data/ent/client.go
+++ b/backend/internal/data/ent/client.go
@@ -7,25 +7,28 @@ import (
"errors"
"fmt"
"log"
+ "reflect"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/migrate"
+ "entgo.io/ent"
+ "entgo.io/ent/dialect"
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
"github.com/hay-kot/homebox/backend/internal/data/ent/document"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
"github.com/hay-kot/homebox/backend/internal/data/ent/groupinvitationtoken"
"github.com/hay-kot/homebox/backend/internal/data/ent/item"
"github.com/hay-kot/homebox/backend/internal/data/ent/itemfield"
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
-
- "entgo.io/ent/dialect"
- "entgo.io/ent/dialect/sql"
- "entgo.io/ent/dialect/sql/sqlgraph"
)
// Client is the client that holds all ent builders.
@@ -35,12 +38,12 @@ type Client struct {
Schema *migrate.Schema
// Attachment is the client for interacting with the Attachment builders.
Attachment *AttachmentClient
+ // AuthRoles is the client for interacting with the AuthRoles builders.
+ AuthRoles *AuthRolesClient
// AuthTokens is the client for interacting with the AuthTokens builders.
AuthTokens *AuthTokensClient
// Document is the client for interacting with the Document builders.
Document *DocumentClient
- // DocumentToken is the client for interacting with the DocumentToken builders.
- DocumentToken *DocumentTokenClient
// Group is the client for interacting with the Group builders.
Group *GroupClient
// GroupInvitationToken is the client for interacting with the GroupInvitationToken builders.
@@ -53,15 +56,17 @@ type Client struct {
Label *LabelClient
// Location is the client for interacting with the Location builders.
Location *LocationClient
+ // MaintenanceEntry is the client for interacting with the MaintenanceEntry builders.
+ MaintenanceEntry *MaintenanceEntryClient
+ // Notifier is the client for interacting with the Notifier builders.
+ Notifier *NotifierClient
// User is the client for interacting with the User builders.
User *UserClient
}
// NewClient creates a new client configured with the given options.
func NewClient(opts ...Option) *Client {
- cfg := config{log: log.Println, hooks: &hooks{}}
- cfg.options(opts...)
- client := &Client{config: cfg}
+ client := &Client{config: newConfig(opts...)}
client.init()
return client
}
@@ -69,18 +74,76 @@ func NewClient(opts ...Option) *Client {
func (c *Client) init() {
c.Schema = migrate.NewSchema(c.driver)
c.Attachment = NewAttachmentClient(c.config)
+ c.AuthRoles = NewAuthRolesClient(c.config)
c.AuthTokens = NewAuthTokensClient(c.config)
c.Document = NewDocumentClient(c.config)
- c.DocumentToken = NewDocumentTokenClient(c.config)
c.Group = NewGroupClient(c.config)
c.GroupInvitationToken = NewGroupInvitationTokenClient(c.config)
c.Item = NewItemClient(c.config)
c.ItemField = NewItemFieldClient(c.config)
c.Label = NewLabelClient(c.config)
c.Location = NewLocationClient(c.config)
+ c.MaintenanceEntry = NewMaintenanceEntryClient(c.config)
+ c.Notifier = NewNotifierClient(c.config)
c.User = NewUserClient(c.config)
}
+type (
+ // config is the configuration for the client and its builder.
+ config struct {
+ // driver used for executing database requests.
+ driver dialect.Driver
+ // debug enable a debug logging.
+ debug bool
+ // log used for logging on debug mode.
+ log func(...any)
+ // hooks to execute on mutations.
+ hooks *hooks
+ // interceptors to execute on queries.
+ inters *inters
+ }
+ // Option function to configure the client.
+ Option func(*config)
+)
+
+// newConfig creates a new config for the client.
+func newConfig(opts ...Option) config {
+ cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}}
+ cfg.options(opts...)
+ return cfg
+}
+
+// options applies the options on the config object.
+func (c *config) options(opts ...Option) {
+ for _, opt := range opts {
+ opt(c)
+ }
+ if c.debug {
+ c.driver = dialect.Debug(c.driver, c.log)
+ }
+}
+
+// Debug enables debug logging on the ent.Driver.
+func Debug() Option {
+ return func(c *config) {
+ c.debug = true
+ }
+}
+
+// Log sets the logging function for debug mode.
+func Log(fn func(...any)) Option {
+ return func(c *config) {
+ c.log = fn
+ }
+}
+
+// Driver configures the client driver.
+func Driver(driver dialect.Driver) Option {
+ return func(c *config) {
+ c.driver = driver
+ }
+}
+
// Open opens a database/sql.DB specified by the driver name and
// the data source name, and returns a new client attached to it.
// Optional parameters can be added for configuring the client.
@@ -97,11 +160,14 @@ func Open(driverName, dataSourceName string, options ...Option) (*Client, error)
}
}
+// ErrTxStarted is returned when trying to start a new transaction from a transactional client.
+var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction")
+
// Tx returns a new transactional client. The provided context
// is used until the transaction is committed or rolled back.
func (c *Client) Tx(ctx context.Context) (*Tx, error) {
if _, ok := c.driver.(*txDriver); ok {
- return nil, errors.New("ent: cannot start a transaction within a transaction")
+ return nil, ErrTxStarted
}
tx, err := newTx(ctx, c.driver)
if err != nil {
@@ -113,15 +179,17 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
ctx: ctx,
config: cfg,
Attachment: NewAttachmentClient(cfg),
+ AuthRoles: NewAuthRolesClient(cfg),
AuthTokens: NewAuthTokensClient(cfg),
Document: NewDocumentClient(cfg),
- DocumentToken: NewDocumentTokenClient(cfg),
Group: NewGroupClient(cfg),
GroupInvitationToken: NewGroupInvitationTokenClient(cfg),
Item: NewItemClient(cfg),
ItemField: NewItemFieldClient(cfg),
Label: NewLabelClient(cfg),
Location: NewLocationClient(cfg),
+ MaintenanceEntry: NewMaintenanceEntryClient(cfg),
+ Notifier: NewNotifierClient(cfg),
User: NewUserClient(cfg),
}, nil
}
@@ -143,15 +211,17 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
ctx: ctx,
config: cfg,
Attachment: NewAttachmentClient(cfg),
+ AuthRoles: NewAuthRolesClient(cfg),
AuthTokens: NewAuthTokensClient(cfg),
Document: NewDocumentClient(cfg),
- DocumentToken: NewDocumentTokenClient(cfg),
Group: NewGroupClient(cfg),
GroupInvitationToken: NewGroupInvitationTokenClient(cfg),
Item: NewItemClient(cfg),
ItemField: NewItemFieldClient(cfg),
Label: NewLabelClient(cfg),
Location: NewLocationClient(cfg),
+ MaintenanceEntry: NewMaintenanceEntryClient(cfg),
+ Notifier: NewNotifierClient(cfg),
User: NewUserClient(cfg),
}, nil
}
@@ -181,17 +251,59 @@ func (c *Client) Close() error {
// Use adds the mutation hooks to all the entity clients.
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
func (c *Client) Use(hooks ...Hook) {
- c.Attachment.Use(hooks...)
- c.AuthTokens.Use(hooks...)
- c.Document.Use(hooks...)
- c.DocumentToken.Use(hooks...)
- c.Group.Use(hooks...)
- c.GroupInvitationToken.Use(hooks...)
- c.Item.Use(hooks...)
- c.ItemField.Use(hooks...)
- c.Label.Use(hooks...)
- c.Location.Use(hooks...)
- c.User.Use(hooks...)
+ for _, n := range []interface{ Use(...Hook) }{
+ c.Attachment, c.AuthRoles, c.AuthTokens, c.Document, c.Group,
+ c.GroupInvitationToken, c.Item, c.ItemField, c.Label, c.Location,
+ c.MaintenanceEntry, c.Notifier, c.User,
+ } {
+ n.Use(hooks...)
+ }
+}
+
+// Intercept adds the query interceptors to all the entity clients.
+// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
+func (c *Client) Intercept(interceptors ...Interceptor) {
+ for _, n := range []interface{ Intercept(...Interceptor) }{
+ c.Attachment, c.AuthRoles, c.AuthTokens, c.Document, c.Group,
+ c.GroupInvitationToken, c.Item, c.ItemField, c.Label, c.Location,
+ c.MaintenanceEntry, c.Notifier, c.User,
+ } {
+ n.Intercept(interceptors...)
+ }
+}
+
+// Mutate implements the ent.Mutator interface.
+func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
+ switch m := m.(type) {
+ case *AttachmentMutation:
+ return c.Attachment.mutate(ctx, m)
+ case *AuthRolesMutation:
+ return c.AuthRoles.mutate(ctx, m)
+ case *AuthTokensMutation:
+ return c.AuthTokens.mutate(ctx, m)
+ case *DocumentMutation:
+ return c.Document.mutate(ctx, m)
+ case *GroupMutation:
+ return c.Group.mutate(ctx, m)
+ case *GroupInvitationTokenMutation:
+ return c.GroupInvitationToken.mutate(ctx, m)
+ case *ItemMutation:
+ return c.Item.mutate(ctx, m)
+ case *ItemFieldMutation:
+ return c.ItemField.mutate(ctx, m)
+ case *LabelMutation:
+ return c.Label.mutate(ctx, m)
+ case *LocationMutation:
+ return c.Location.mutate(ctx, m)
+ case *MaintenanceEntryMutation:
+ return c.MaintenanceEntry.mutate(ctx, m)
+ case *NotifierMutation:
+ return c.Notifier.mutate(ctx, m)
+ case *UserMutation:
+ return c.User.mutate(ctx, m)
+ default:
+ return nil, fmt.Errorf("ent: unknown mutation type %T", m)
+ }
}
// AttachmentClient is a client for the Attachment schema.
@@ -210,6 +322,12 @@ func (c *AttachmentClient) Use(hooks ...Hook) {
c.hooks.Attachment = append(c.hooks.Attachment, hooks...)
}
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `attachment.Intercept(f(g(h())))`.
+func (c *AttachmentClient) Intercept(interceptors ...Interceptor) {
+ c.inters.Attachment = append(c.inters.Attachment, interceptors...)
+}
+
// Create returns a builder for creating a Attachment entity.
func (c *AttachmentClient) Create() *AttachmentCreate {
mutation := newAttachmentMutation(c.config, OpCreate)
@@ -221,6 +339,21 @@ func (c *AttachmentClient) CreateBulk(builders ...*AttachmentCreate) *Attachment
return &AttachmentCreateBulk{config: c.config, builders: builders}
}
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *AttachmentClient) MapCreateBulk(slice any, setFunc func(*AttachmentCreate, int)) *AttachmentCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &AttachmentCreateBulk{err: fmt.Errorf("calling to AttachmentClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*AttachmentCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &AttachmentCreateBulk{config: c.config, builders: builders}
+}
+
// Update returns an update builder for Attachment.
func (c *AttachmentClient) Update() *AttachmentUpdate {
mutation := newAttachmentMutation(c.config, OpUpdate)
@@ -250,7 +383,7 @@ func (c *AttachmentClient) DeleteOne(a *Attachment) *AttachmentDeleteOne {
return c.DeleteOneID(a.ID)
}
-// DeleteOne returns a builder for deleting the given entity by its id.
+// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *AttachmentClient) DeleteOneID(id uuid.UUID) *AttachmentDeleteOne {
builder := c.Delete().Where(attachment.ID(id))
builder.mutation.id = &id
@@ -262,6 +395,8 @@ func (c *AttachmentClient) DeleteOneID(id uuid.UUID) *AttachmentDeleteOne {
func (c *AttachmentClient) Query() *AttachmentQuery {
return &AttachmentQuery{
config: c.config,
+ ctx: &QueryContext{Type: TypeAttachment},
+ inters: c.Interceptors(),
}
}
@@ -281,8 +416,8 @@ func (c *AttachmentClient) GetX(ctx context.Context, id uuid.UUID) *Attachment {
// QueryItem queries the item edge of a Attachment.
func (c *AttachmentClient) QueryItem(a *Attachment) *ItemQuery {
- query := &ItemQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&ItemClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := a.ID
step := sqlgraph.NewStep(
sqlgraph.From(attachment.Table, attachment.FieldID, id),
@@ -297,8 +432,8 @@ func (c *AttachmentClient) QueryItem(a *Attachment) *ItemQuery {
// QueryDocument queries the document edge of a Attachment.
func (c *AttachmentClient) QueryDocument(a *Attachment) *DocumentQuery {
- query := &DocumentQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&DocumentClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := a.ID
step := sqlgraph.NewStep(
sqlgraph.From(attachment.Table, attachment.FieldID, id),
@@ -316,6 +451,175 @@ func (c *AttachmentClient) Hooks() []Hook {
return c.hooks.Attachment
}
+// Interceptors returns the client interceptors.
+func (c *AttachmentClient) Interceptors() []Interceptor {
+ return c.inters.Attachment
+}
+
+func (c *AttachmentClient) mutate(ctx context.Context, m *AttachmentMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&AttachmentCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&AttachmentUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&AttachmentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&AttachmentDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown Attachment mutation op: %q", m.Op())
+ }
+}
+
+// AuthRolesClient is a client for the AuthRoles schema.
+type AuthRolesClient struct {
+ config
+}
+
+// NewAuthRolesClient returns a client for the AuthRoles from the given config.
+func NewAuthRolesClient(c config) *AuthRolesClient {
+ return &AuthRolesClient{config: c}
+}
+
+// Use adds a list of mutation hooks to the hooks stack.
+// A call to `Use(f, g, h)` equals to `authroles.Hooks(f(g(h())))`.
+func (c *AuthRolesClient) Use(hooks ...Hook) {
+ c.hooks.AuthRoles = append(c.hooks.AuthRoles, hooks...)
+}
+
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `authroles.Intercept(f(g(h())))`.
+func (c *AuthRolesClient) Intercept(interceptors ...Interceptor) {
+ c.inters.AuthRoles = append(c.inters.AuthRoles, interceptors...)
+}
+
+// Create returns a builder for creating a AuthRoles entity.
+func (c *AuthRolesClient) Create() *AuthRolesCreate {
+ mutation := newAuthRolesMutation(c.config, OpCreate)
+ return &AuthRolesCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// CreateBulk returns a builder for creating a bulk of AuthRoles entities.
+func (c *AuthRolesClient) CreateBulk(builders ...*AuthRolesCreate) *AuthRolesCreateBulk {
+ return &AuthRolesCreateBulk{config: c.config, builders: builders}
+}
+
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *AuthRolesClient) MapCreateBulk(slice any, setFunc func(*AuthRolesCreate, int)) *AuthRolesCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &AuthRolesCreateBulk{err: fmt.Errorf("calling to AuthRolesClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*AuthRolesCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &AuthRolesCreateBulk{config: c.config, builders: builders}
+}
+
+// Update returns an update builder for AuthRoles.
+func (c *AuthRolesClient) Update() *AuthRolesUpdate {
+ mutation := newAuthRolesMutation(c.config, OpUpdate)
+ return &AuthRolesUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOne returns an update builder for the given entity.
+func (c *AuthRolesClient) UpdateOne(ar *AuthRoles) *AuthRolesUpdateOne {
+ mutation := newAuthRolesMutation(c.config, OpUpdateOne, withAuthRoles(ar))
+ return &AuthRolesUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOneID returns an update builder for the given id.
+func (c *AuthRolesClient) UpdateOneID(id int) *AuthRolesUpdateOne {
+ mutation := newAuthRolesMutation(c.config, OpUpdateOne, withAuthRolesID(id))
+ return &AuthRolesUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// Delete returns a delete builder for AuthRoles.
+func (c *AuthRolesClient) Delete() *AuthRolesDelete {
+ mutation := newAuthRolesMutation(c.config, OpDelete)
+ return &AuthRolesDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// DeleteOne returns a builder for deleting the given entity.
+func (c *AuthRolesClient) DeleteOne(ar *AuthRoles) *AuthRolesDeleteOne {
+ return c.DeleteOneID(ar.ID)
+}
+
+// DeleteOneID returns a builder for deleting the given entity by its id.
+func (c *AuthRolesClient) DeleteOneID(id int) *AuthRolesDeleteOne {
+ builder := c.Delete().Where(authroles.ID(id))
+ builder.mutation.id = &id
+ builder.mutation.op = OpDeleteOne
+ return &AuthRolesDeleteOne{builder}
+}
+
+// Query returns a query builder for AuthRoles.
+func (c *AuthRolesClient) Query() *AuthRolesQuery {
+ return &AuthRolesQuery{
+ config: c.config,
+ ctx: &QueryContext{Type: TypeAuthRoles},
+ inters: c.Interceptors(),
+ }
+}
+
+// Get returns a AuthRoles entity by its id.
+func (c *AuthRolesClient) Get(ctx context.Context, id int) (*AuthRoles, error) {
+ return c.Query().Where(authroles.ID(id)).Only(ctx)
+}
+
+// GetX is like Get, but panics if an error occurs.
+func (c *AuthRolesClient) GetX(ctx context.Context, id int) *AuthRoles {
+ obj, err := c.Get(ctx, id)
+ if err != nil {
+ panic(err)
+ }
+ return obj
+}
+
+// QueryToken queries the token edge of a AuthRoles.
+func (c *AuthRolesClient) QueryToken(ar *AuthRoles) *AuthTokensQuery {
+ query := (&AuthTokensClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+ id := ar.ID
+ step := sqlgraph.NewStep(
+ sqlgraph.From(authroles.Table, authroles.FieldID, id),
+ sqlgraph.To(authtokens.Table, authtokens.FieldID),
+ sqlgraph.Edge(sqlgraph.O2O, true, authroles.TokenTable, authroles.TokenColumn),
+ )
+ fromV = sqlgraph.Neighbors(ar.driver.Dialect(), step)
+ return fromV, nil
+ }
+ return query
+}
+
+// Hooks returns the client hooks.
+func (c *AuthRolesClient) Hooks() []Hook {
+ return c.hooks.AuthRoles
+}
+
+// Interceptors returns the client interceptors.
+func (c *AuthRolesClient) Interceptors() []Interceptor {
+ return c.inters.AuthRoles
+}
+
+func (c *AuthRolesClient) mutate(ctx context.Context, m *AuthRolesMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&AuthRolesCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&AuthRolesUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&AuthRolesUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&AuthRolesDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown AuthRoles mutation op: %q", m.Op())
+ }
+}
+
// AuthTokensClient is a client for the AuthTokens schema.
type AuthTokensClient struct {
config
@@ -332,6 +636,12 @@ func (c *AuthTokensClient) Use(hooks ...Hook) {
c.hooks.AuthTokens = append(c.hooks.AuthTokens, hooks...)
}
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `authtokens.Intercept(f(g(h())))`.
+func (c *AuthTokensClient) Intercept(interceptors ...Interceptor) {
+ c.inters.AuthTokens = append(c.inters.AuthTokens, interceptors...)
+}
+
// Create returns a builder for creating a AuthTokens entity.
func (c *AuthTokensClient) Create() *AuthTokensCreate {
mutation := newAuthTokensMutation(c.config, OpCreate)
@@ -343,6 +653,21 @@ func (c *AuthTokensClient) CreateBulk(builders ...*AuthTokensCreate) *AuthTokens
return &AuthTokensCreateBulk{config: c.config, builders: builders}
}
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *AuthTokensClient) MapCreateBulk(slice any, setFunc func(*AuthTokensCreate, int)) *AuthTokensCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &AuthTokensCreateBulk{err: fmt.Errorf("calling to AuthTokensClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*AuthTokensCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &AuthTokensCreateBulk{config: c.config, builders: builders}
+}
+
// Update returns an update builder for AuthTokens.
func (c *AuthTokensClient) Update() *AuthTokensUpdate {
mutation := newAuthTokensMutation(c.config, OpUpdate)
@@ -372,7 +697,7 @@ func (c *AuthTokensClient) DeleteOne(at *AuthTokens) *AuthTokensDeleteOne {
return c.DeleteOneID(at.ID)
}
-// DeleteOne returns a builder for deleting the given entity by its id.
+// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *AuthTokensClient) DeleteOneID(id uuid.UUID) *AuthTokensDeleteOne {
builder := c.Delete().Where(authtokens.ID(id))
builder.mutation.id = &id
@@ -384,6 +709,8 @@ func (c *AuthTokensClient) DeleteOneID(id uuid.UUID) *AuthTokensDeleteOne {
func (c *AuthTokensClient) Query() *AuthTokensQuery {
return &AuthTokensQuery{
config: c.config,
+ ctx: &QueryContext{Type: TypeAuthTokens},
+ inters: c.Interceptors(),
}
}
@@ -403,8 +730,8 @@ func (c *AuthTokensClient) GetX(ctx context.Context, id uuid.UUID) *AuthTokens {
// QueryUser queries the user edge of a AuthTokens.
func (c *AuthTokensClient) QueryUser(at *AuthTokens) *UserQuery {
- query := &UserQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&UserClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := at.ID
step := sqlgraph.NewStep(
sqlgraph.From(authtokens.Table, authtokens.FieldID, id),
@@ -417,11 +744,47 @@ func (c *AuthTokensClient) QueryUser(at *AuthTokens) *UserQuery {
return query
}
+// QueryRoles queries the roles edge of a AuthTokens.
+func (c *AuthTokensClient) QueryRoles(at *AuthTokens) *AuthRolesQuery {
+ query := (&AuthRolesClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+ id := at.ID
+ step := sqlgraph.NewStep(
+ sqlgraph.From(authtokens.Table, authtokens.FieldID, id),
+ sqlgraph.To(authroles.Table, authroles.FieldID),
+ sqlgraph.Edge(sqlgraph.O2O, false, authtokens.RolesTable, authtokens.RolesColumn),
+ )
+ fromV = sqlgraph.Neighbors(at.driver.Dialect(), step)
+ return fromV, nil
+ }
+ return query
+}
+
// Hooks returns the client hooks.
func (c *AuthTokensClient) Hooks() []Hook {
return c.hooks.AuthTokens
}
+// Interceptors returns the client interceptors.
+func (c *AuthTokensClient) Interceptors() []Interceptor {
+ return c.inters.AuthTokens
+}
+
+func (c *AuthTokensClient) mutate(ctx context.Context, m *AuthTokensMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&AuthTokensCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&AuthTokensUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&AuthTokensUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&AuthTokensDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown AuthTokens mutation op: %q", m.Op())
+ }
+}
+
// DocumentClient is a client for the Document schema.
type DocumentClient struct {
config
@@ -438,6 +801,12 @@ func (c *DocumentClient) Use(hooks ...Hook) {
c.hooks.Document = append(c.hooks.Document, hooks...)
}
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `document.Intercept(f(g(h())))`.
+func (c *DocumentClient) Intercept(interceptors ...Interceptor) {
+ c.inters.Document = append(c.inters.Document, interceptors...)
+}
+
// Create returns a builder for creating a Document entity.
func (c *DocumentClient) Create() *DocumentCreate {
mutation := newDocumentMutation(c.config, OpCreate)
@@ -449,6 +818,21 @@ func (c *DocumentClient) CreateBulk(builders ...*DocumentCreate) *DocumentCreate
return &DocumentCreateBulk{config: c.config, builders: builders}
}
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *DocumentClient) MapCreateBulk(slice any, setFunc func(*DocumentCreate, int)) *DocumentCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &DocumentCreateBulk{err: fmt.Errorf("calling to DocumentClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*DocumentCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &DocumentCreateBulk{config: c.config, builders: builders}
+}
+
// Update returns an update builder for Document.
func (c *DocumentClient) Update() *DocumentUpdate {
mutation := newDocumentMutation(c.config, OpUpdate)
@@ -478,7 +862,7 @@ func (c *DocumentClient) DeleteOne(d *Document) *DocumentDeleteOne {
return c.DeleteOneID(d.ID)
}
-// DeleteOne returns a builder for deleting the given entity by its id.
+// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *DocumentClient) DeleteOneID(id uuid.UUID) *DocumentDeleteOne {
builder := c.Delete().Where(document.ID(id))
builder.mutation.id = &id
@@ -490,6 +874,8 @@ func (c *DocumentClient) DeleteOneID(id uuid.UUID) *DocumentDeleteOne {
func (c *DocumentClient) Query() *DocumentQuery {
return &DocumentQuery{
config: c.config,
+ ctx: &QueryContext{Type: TypeDocument},
+ inters: c.Interceptors(),
}
}
@@ -509,8 +895,8 @@ func (c *DocumentClient) GetX(ctx context.Context, id uuid.UUID) *Document {
// QueryGroup queries the group edge of a Document.
func (c *DocumentClient) QueryGroup(d *Document) *GroupQuery {
- query := &GroupQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&GroupClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := d.ID
step := sqlgraph.NewStep(
sqlgraph.From(document.Table, document.FieldID, id),
@@ -523,26 +909,10 @@ func (c *DocumentClient) QueryGroup(d *Document) *GroupQuery {
return query
}
-// QueryDocumentTokens queries the document_tokens edge of a Document.
-func (c *DocumentClient) QueryDocumentTokens(d *Document) *DocumentTokenQuery {
- query := &DocumentTokenQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
- id := d.ID
- step := sqlgraph.NewStep(
- sqlgraph.From(document.Table, document.FieldID, id),
- sqlgraph.To(documenttoken.Table, documenttoken.FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, document.DocumentTokensTable, document.DocumentTokensColumn),
- )
- fromV = sqlgraph.Neighbors(d.driver.Dialect(), step)
- return fromV, nil
- }
- return query
-}
-
// QueryAttachments queries the attachments edge of a Document.
func (c *DocumentClient) QueryAttachments(d *Document) *AttachmentQuery {
- query := &AttachmentQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&AttachmentClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := d.ID
step := sqlgraph.NewStep(
sqlgraph.From(document.Table, document.FieldID, id),
@@ -560,112 +930,26 @@ func (c *DocumentClient) Hooks() []Hook {
return c.hooks.Document
}
-// DocumentTokenClient is a client for the DocumentToken schema.
-type DocumentTokenClient struct {
- config
+// Interceptors returns the client interceptors.
+func (c *DocumentClient) Interceptors() []Interceptor {
+ return c.inters.Document
}
-// NewDocumentTokenClient returns a client for the DocumentToken from the given config.
-func NewDocumentTokenClient(c config) *DocumentTokenClient {
- return &DocumentTokenClient{config: c}
-}
-
-// Use adds a list of mutation hooks to the hooks stack.
-// A call to `Use(f, g, h)` equals to `documenttoken.Hooks(f(g(h())))`.
-func (c *DocumentTokenClient) Use(hooks ...Hook) {
- c.hooks.DocumentToken = append(c.hooks.DocumentToken, hooks...)
-}
-
-// Create returns a builder for creating a DocumentToken entity.
-func (c *DocumentTokenClient) Create() *DocumentTokenCreate {
- mutation := newDocumentTokenMutation(c.config, OpCreate)
- return &DocumentTokenCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// CreateBulk returns a builder for creating a bulk of DocumentToken entities.
-func (c *DocumentTokenClient) CreateBulk(builders ...*DocumentTokenCreate) *DocumentTokenCreateBulk {
- return &DocumentTokenCreateBulk{config: c.config, builders: builders}
-}
-
-// Update returns an update builder for DocumentToken.
-func (c *DocumentTokenClient) Update() *DocumentTokenUpdate {
- mutation := newDocumentTokenMutation(c.config, OpUpdate)
- return &DocumentTokenUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// UpdateOne returns an update builder for the given entity.
-func (c *DocumentTokenClient) UpdateOne(dt *DocumentToken) *DocumentTokenUpdateOne {
- mutation := newDocumentTokenMutation(c.config, OpUpdateOne, withDocumentToken(dt))
- return &DocumentTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// UpdateOneID returns an update builder for the given id.
-func (c *DocumentTokenClient) UpdateOneID(id uuid.UUID) *DocumentTokenUpdateOne {
- mutation := newDocumentTokenMutation(c.config, OpUpdateOne, withDocumentTokenID(id))
- return &DocumentTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// Delete returns a delete builder for DocumentToken.
-func (c *DocumentTokenClient) Delete() *DocumentTokenDelete {
- mutation := newDocumentTokenMutation(c.config, OpDelete)
- return &DocumentTokenDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// DeleteOne returns a builder for deleting the given entity.
-func (c *DocumentTokenClient) DeleteOne(dt *DocumentToken) *DocumentTokenDeleteOne {
- return c.DeleteOneID(dt.ID)
-}
-
-// DeleteOne returns a builder for deleting the given entity by its id.
-func (c *DocumentTokenClient) DeleteOneID(id uuid.UUID) *DocumentTokenDeleteOne {
- builder := c.Delete().Where(documenttoken.ID(id))
- builder.mutation.id = &id
- builder.mutation.op = OpDeleteOne
- return &DocumentTokenDeleteOne{builder}
-}
-
-// Query returns a query builder for DocumentToken.
-func (c *DocumentTokenClient) Query() *DocumentTokenQuery {
- return &DocumentTokenQuery{
- config: c.config,
+func (c *DocumentClient) mutate(ctx context.Context, m *DocumentMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&DocumentCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&DocumentUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&DocumentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&DocumentDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown Document mutation op: %q", m.Op())
}
}
-// Get returns a DocumentToken entity by its id.
-func (c *DocumentTokenClient) Get(ctx context.Context, id uuid.UUID) (*DocumentToken, error) {
- return c.Query().Where(documenttoken.ID(id)).Only(ctx)
-}
-
-// GetX is like Get, but panics if an error occurs.
-func (c *DocumentTokenClient) GetX(ctx context.Context, id uuid.UUID) *DocumentToken {
- obj, err := c.Get(ctx, id)
- if err != nil {
- panic(err)
- }
- return obj
-}
-
-// QueryDocument queries the document edge of a DocumentToken.
-func (c *DocumentTokenClient) QueryDocument(dt *DocumentToken) *DocumentQuery {
- query := &DocumentQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
- id := dt.ID
- step := sqlgraph.NewStep(
- sqlgraph.From(documenttoken.Table, documenttoken.FieldID, id),
- sqlgraph.To(document.Table, document.FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, documenttoken.DocumentTable, documenttoken.DocumentColumn),
- )
- fromV = sqlgraph.Neighbors(dt.driver.Dialect(), step)
- return fromV, nil
- }
- return query
-}
-
-// Hooks returns the client hooks.
-func (c *DocumentTokenClient) Hooks() []Hook {
- return c.hooks.DocumentToken
-}
-
// GroupClient is a client for the Group schema.
type GroupClient struct {
config
@@ -682,6 +966,12 @@ func (c *GroupClient) Use(hooks ...Hook) {
c.hooks.Group = append(c.hooks.Group, hooks...)
}
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `group.Intercept(f(g(h())))`.
+func (c *GroupClient) Intercept(interceptors ...Interceptor) {
+ c.inters.Group = append(c.inters.Group, interceptors...)
+}
+
// Create returns a builder for creating a Group entity.
func (c *GroupClient) Create() *GroupCreate {
mutation := newGroupMutation(c.config, OpCreate)
@@ -693,6 +983,21 @@ func (c *GroupClient) CreateBulk(builders ...*GroupCreate) *GroupCreateBulk {
return &GroupCreateBulk{config: c.config, builders: builders}
}
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *GroupClient) MapCreateBulk(slice any, setFunc func(*GroupCreate, int)) *GroupCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &GroupCreateBulk{err: fmt.Errorf("calling to GroupClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*GroupCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &GroupCreateBulk{config: c.config, builders: builders}
+}
+
// Update returns an update builder for Group.
func (c *GroupClient) Update() *GroupUpdate {
mutation := newGroupMutation(c.config, OpUpdate)
@@ -722,7 +1027,7 @@ func (c *GroupClient) DeleteOne(gr *Group) *GroupDeleteOne {
return c.DeleteOneID(gr.ID)
}
-// DeleteOne returns a builder for deleting the given entity by its id.
+// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *GroupClient) DeleteOneID(id uuid.UUID) *GroupDeleteOne {
builder := c.Delete().Where(group.ID(id))
builder.mutation.id = &id
@@ -734,6 +1039,8 @@ func (c *GroupClient) DeleteOneID(id uuid.UUID) *GroupDeleteOne {
func (c *GroupClient) Query() *GroupQuery {
return &GroupQuery{
config: c.config,
+ ctx: &QueryContext{Type: TypeGroup},
+ inters: c.Interceptors(),
}
}
@@ -753,8 +1060,8 @@ func (c *GroupClient) GetX(ctx context.Context, id uuid.UUID) *Group {
// QueryUsers queries the users edge of a Group.
func (c *GroupClient) QueryUsers(gr *Group) *UserQuery {
- query := &UserQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&UserClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := gr.ID
step := sqlgraph.NewStep(
sqlgraph.From(group.Table, group.FieldID, id),
@@ -769,8 +1076,8 @@ func (c *GroupClient) QueryUsers(gr *Group) *UserQuery {
// QueryLocations queries the locations edge of a Group.
func (c *GroupClient) QueryLocations(gr *Group) *LocationQuery {
- query := &LocationQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&LocationClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := gr.ID
step := sqlgraph.NewStep(
sqlgraph.From(group.Table, group.FieldID, id),
@@ -785,8 +1092,8 @@ func (c *GroupClient) QueryLocations(gr *Group) *LocationQuery {
// QueryItems queries the items edge of a Group.
func (c *GroupClient) QueryItems(gr *Group) *ItemQuery {
- query := &ItemQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&ItemClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := gr.ID
step := sqlgraph.NewStep(
sqlgraph.From(group.Table, group.FieldID, id),
@@ -801,8 +1108,8 @@ func (c *GroupClient) QueryItems(gr *Group) *ItemQuery {
// QueryLabels queries the labels edge of a Group.
func (c *GroupClient) QueryLabels(gr *Group) *LabelQuery {
- query := &LabelQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&LabelClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := gr.ID
step := sqlgraph.NewStep(
sqlgraph.From(group.Table, group.FieldID, id),
@@ -817,8 +1124,8 @@ func (c *GroupClient) QueryLabels(gr *Group) *LabelQuery {
// QueryDocuments queries the documents edge of a Group.
func (c *GroupClient) QueryDocuments(gr *Group) *DocumentQuery {
- query := &DocumentQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&DocumentClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := gr.ID
step := sqlgraph.NewStep(
sqlgraph.From(group.Table, group.FieldID, id),
@@ -833,8 +1140,8 @@ func (c *GroupClient) QueryDocuments(gr *Group) *DocumentQuery {
// QueryInvitationTokens queries the invitation_tokens edge of a Group.
func (c *GroupClient) QueryInvitationTokens(gr *Group) *GroupInvitationTokenQuery {
- query := &GroupInvitationTokenQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&GroupInvitationTokenClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := gr.ID
step := sqlgraph.NewStep(
sqlgraph.From(group.Table, group.FieldID, id),
@@ -847,11 +1154,47 @@ func (c *GroupClient) QueryInvitationTokens(gr *Group) *GroupInvitationTokenQuer
return query
}
+// QueryNotifiers queries the notifiers edge of a Group.
+func (c *GroupClient) QueryNotifiers(gr *Group) *NotifierQuery {
+ query := (&NotifierClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+ id := gr.ID
+ step := sqlgraph.NewStep(
+ sqlgraph.From(group.Table, group.FieldID, id),
+ sqlgraph.To(notifier.Table, notifier.FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, group.NotifiersTable, group.NotifiersColumn),
+ )
+ fromV = sqlgraph.Neighbors(gr.driver.Dialect(), step)
+ return fromV, nil
+ }
+ return query
+}
+
// Hooks returns the client hooks.
func (c *GroupClient) Hooks() []Hook {
return c.hooks.Group
}
+// Interceptors returns the client interceptors.
+func (c *GroupClient) Interceptors() []Interceptor {
+ return c.inters.Group
+}
+
+func (c *GroupClient) mutate(ctx context.Context, m *GroupMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&GroupCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&GroupUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&GroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&GroupDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown Group mutation op: %q", m.Op())
+ }
+}
+
// GroupInvitationTokenClient is a client for the GroupInvitationToken schema.
type GroupInvitationTokenClient struct {
config
@@ -868,6 +1211,12 @@ func (c *GroupInvitationTokenClient) Use(hooks ...Hook) {
c.hooks.GroupInvitationToken = append(c.hooks.GroupInvitationToken, hooks...)
}
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `groupinvitationtoken.Intercept(f(g(h())))`.
+func (c *GroupInvitationTokenClient) Intercept(interceptors ...Interceptor) {
+ c.inters.GroupInvitationToken = append(c.inters.GroupInvitationToken, interceptors...)
+}
+
// Create returns a builder for creating a GroupInvitationToken entity.
func (c *GroupInvitationTokenClient) Create() *GroupInvitationTokenCreate {
mutation := newGroupInvitationTokenMutation(c.config, OpCreate)
@@ -879,6 +1228,21 @@ func (c *GroupInvitationTokenClient) CreateBulk(builders ...*GroupInvitationToke
return &GroupInvitationTokenCreateBulk{config: c.config, builders: builders}
}
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *GroupInvitationTokenClient) MapCreateBulk(slice any, setFunc func(*GroupInvitationTokenCreate, int)) *GroupInvitationTokenCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &GroupInvitationTokenCreateBulk{err: fmt.Errorf("calling to GroupInvitationTokenClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*GroupInvitationTokenCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &GroupInvitationTokenCreateBulk{config: c.config, builders: builders}
+}
+
// Update returns an update builder for GroupInvitationToken.
func (c *GroupInvitationTokenClient) Update() *GroupInvitationTokenUpdate {
mutation := newGroupInvitationTokenMutation(c.config, OpUpdate)
@@ -908,7 +1272,7 @@ func (c *GroupInvitationTokenClient) DeleteOne(git *GroupInvitationToken) *Group
return c.DeleteOneID(git.ID)
}
-// DeleteOne returns a builder for deleting the given entity by its id.
+// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *GroupInvitationTokenClient) DeleteOneID(id uuid.UUID) *GroupInvitationTokenDeleteOne {
builder := c.Delete().Where(groupinvitationtoken.ID(id))
builder.mutation.id = &id
@@ -920,6 +1284,8 @@ func (c *GroupInvitationTokenClient) DeleteOneID(id uuid.UUID) *GroupInvitationT
func (c *GroupInvitationTokenClient) Query() *GroupInvitationTokenQuery {
return &GroupInvitationTokenQuery{
config: c.config,
+ ctx: &QueryContext{Type: TypeGroupInvitationToken},
+ inters: c.Interceptors(),
}
}
@@ -939,8 +1305,8 @@ func (c *GroupInvitationTokenClient) GetX(ctx context.Context, id uuid.UUID) *Gr
// QueryGroup queries the group edge of a GroupInvitationToken.
func (c *GroupInvitationTokenClient) QueryGroup(git *GroupInvitationToken) *GroupQuery {
- query := &GroupQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&GroupClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := git.ID
step := sqlgraph.NewStep(
sqlgraph.From(groupinvitationtoken.Table, groupinvitationtoken.FieldID, id),
@@ -958,6 +1324,26 @@ func (c *GroupInvitationTokenClient) Hooks() []Hook {
return c.hooks.GroupInvitationToken
}
+// Interceptors returns the client interceptors.
+func (c *GroupInvitationTokenClient) Interceptors() []Interceptor {
+ return c.inters.GroupInvitationToken
+}
+
+func (c *GroupInvitationTokenClient) mutate(ctx context.Context, m *GroupInvitationTokenMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&GroupInvitationTokenCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&GroupInvitationTokenUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&GroupInvitationTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&GroupInvitationTokenDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown GroupInvitationToken mutation op: %q", m.Op())
+ }
+}
+
// ItemClient is a client for the Item schema.
type ItemClient struct {
config
@@ -974,6 +1360,12 @@ func (c *ItemClient) Use(hooks ...Hook) {
c.hooks.Item = append(c.hooks.Item, hooks...)
}
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `item.Intercept(f(g(h())))`.
+func (c *ItemClient) Intercept(interceptors ...Interceptor) {
+ c.inters.Item = append(c.inters.Item, interceptors...)
+}
+
// Create returns a builder for creating a Item entity.
func (c *ItemClient) Create() *ItemCreate {
mutation := newItemMutation(c.config, OpCreate)
@@ -985,6 +1377,21 @@ func (c *ItemClient) CreateBulk(builders ...*ItemCreate) *ItemCreateBulk {
return &ItemCreateBulk{config: c.config, builders: builders}
}
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *ItemClient) MapCreateBulk(slice any, setFunc func(*ItemCreate, int)) *ItemCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &ItemCreateBulk{err: fmt.Errorf("calling to ItemClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*ItemCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &ItemCreateBulk{config: c.config, builders: builders}
+}
+
// Update returns an update builder for Item.
func (c *ItemClient) Update() *ItemUpdate {
mutation := newItemMutation(c.config, OpUpdate)
@@ -1014,7 +1421,7 @@ func (c *ItemClient) DeleteOne(i *Item) *ItemDeleteOne {
return c.DeleteOneID(i.ID)
}
-// DeleteOne returns a builder for deleting the given entity by its id.
+// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *ItemClient) DeleteOneID(id uuid.UUID) *ItemDeleteOne {
builder := c.Delete().Where(item.ID(id))
builder.mutation.id = &id
@@ -1026,6 +1433,8 @@ func (c *ItemClient) DeleteOneID(id uuid.UUID) *ItemDeleteOne {
func (c *ItemClient) Query() *ItemQuery {
return &ItemQuery{
config: c.config,
+ ctx: &QueryContext{Type: TypeItem},
+ inters: c.Interceptors(),
}
}
@@ -1043,10 +1452,26 @@ func (c *ItemClient) GetX(ctx context.Context, id uuid.UUID) *Item {
return obj
}
+// QueryGroup queries the group edge of a Item.
+func (c *ItemClient) QueryGroup(i *Item) *GroupQuery {
+ query := (&GroupClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+ id := i.ID
+ step := sqlgraph.NewStep(
+ sqlgraph.From(item.Table, item.FieldID, id),
+ sqlgraph.To(group.Table, group.FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, item.GroupTable, item.GroupColumn),
+ )
+ fromV = sqlgraph.Neighbors(i.driver.Dialect(), step)
+ return fromV, nil
+ }
+ return query
+}
+
// QueryParent queries the parent edge of a Item.
func (c *ItemClient) QueryParent(i *Item) *ItemQuery {
- query := &ItemQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&ItemClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := i.ID
step := sqlgraph.NewStep(
sqlgraph.From(item.Table, item.FieldID, id),
@@ -1061,8 +1486,8 @@ func (c *ItemClient) QueryParent(i *Item) *ItemQuery {
// QueryChildren queries the children edge of a Item.
func (c *ItemClient) QueryChildren(i *Item) *ItemQuery {
- query := &ItemQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&ItemClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := i.ID
step := sqlgraph.NewStep(
sqlgraph.From(item.Table, item.FieldID, id),
@@ -1075,26 +1500,10 @@ func (c *ItemClient) QueryChildren(i *Item) *ItemQuery {
return query
}
-// QueryGroup queries the group edge of a Item.
-func (c *ItemClient) QueryGroup(i *Item) *GroupQuery {
- query := &GroupQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
- id := i.ID
- step := sqlgraph.NewStep(
- sqlgraph.From(item.Table, item.FieldID, id),
- sqlgraph.To(group.Table, group.FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, item.GroupTable, item.GroupColumn),
- )
- fromV = sqlgraph.Neighbors(i.driver.Dialect(), step)
- return fromV, nil
- }
- return query
-}
-
// QueryLabel queries the label edge of a Item.
func (c *ItemClient) QueryLabel(i *Item) *LabelQuery {
- query := &LabelQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&LabelClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := i.ID
step := sqlgraph.NewStep(
sqlgraph.From(item.Table, item.FieldID, id),
@@ -1109,8 +1518,8 @@ func (c *ItemClient) QueryLabel(i *Item) *LabelQuery {
// QueryLocation queries the location edge of a Item.
func (c *ItemClient) QueryLocation(i *Item) *LocationQuery {
- query := &LocationQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&LocationClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := i.ID
step := sqlgraph.NewStep(
sqlgraph.From(item.Table, item.FieldID, id),
@@ -1125,8 +1534,8 @@ func (c *ItemClient) QueryLocation(i *Item) *LocationQuery {
// QueryFields queries the fields edge of a Item.
func (c *ItemClient) QueryFields(i *Item) *ItemFieldQuery {
- query := &ItemFieldQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&ItemFieldClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := i.ID
step := sqlgraph.NewStep(
sqlgraph.From(item.Table, item.FieldID, id),
@@ -1139,10 +1548,26 @@ func (c *ItemClient) QueryFields(i *Item) *ItemFieldQuery {
return query
}
+// QueryMaintenanceEntries queries the maintenance_entries edge of a Item.
+func (c *ItemClient) QueryMaintenanceEntries(i *Item) *MaintenanceEntryQuery {
+ query := (&MaintenanceEntryClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+ id := i.ID
+ step := sqlgraph.NewStep(
+ sqlgraph.From(item.Table, item.FieldID, id),
+ sqlgraph.To(maintenanceentry.Table, maintenanceentry.FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, item.MaintenanceEntriesTable, item.MaintenanceEntriesColumn),
+ )
+ fromV = sqlgraph.Neighbors(i.driver.Dialect(), step)
+ return fromV, nil
+ }
+ return query
+}
+
// QueryAttachments queries the attachments edge of a Item.
func (c *ItemClient) QueryAttachments(i *Item) *AttachmentQuery {
- query := &AttachmentQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&AttachmentClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := i.ID
step := sqlgraph.NewStep(
sqlgraph.From(item.Table, item.FieldID, id),
@@ -1160,6 +1585,26 @@ func (c *ItemClient) Hooks() []Hook {
return c.hooks.Item
}
+// Interceptors returns the client interceptors.
+func (c *ItemClient) Interceptors() []Interceptor {
+ return c.inters.Item
+}
+
+func (c *ItemClient) mutate(ctx context.Context, m *ItemMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&ItemCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&ItemUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&ItemUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&ItemDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown Item mutation op: %q", m.Op())
+ }
+}
+
// ItemFieldClient is a client for the ItemField schema.
type ItemFieldClient struct {
config
@@ -1176,6 +1621,12 @@ func (c *ItemFieldClient) Use(hooks ...Hook) {
c.hooks.ItemField = append(c.hooks.ItemField, hooks...)
}
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `itemfield.Intercept(f(g(h())))`.
+func (c *ItemFieldClient) Intercept(interceptors ...Interceptor) {
+ c.inters.ItemField = append(c.inters.ItemField, interceptors...)
+}
+
// Create returns a builder for creating a ItemField entity.
func (c *ItemFieldClient) Create() *ItemFieldCreate {
mutation := newItemFieldMutation(c.config, OpCreate)
@@ -1187,6 +1638,21 @@ func (c *ItemFieldClient) CreateBulk(builders ...*ItemFieldCreate) *ItemFieldCre
return &ItemFieldCreateBulk{config: c.config, builders: builders}
}
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *ItemFieldClient) MapCreateBulk(slice any, setFunc func(*ItemFieldCreate, int)) *ItemFieldCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &ItemFieldCreateBulk{err: fmt.Errorf("calling to ItemFieldClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*ItemFieldCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &ItemFieldCreateBulk{config: c.config, builders: builders}
+}
+
// Update returns an update builder for ItemField.
func (c *ItemFieldClient) Update() *ItemFieldUpdate {
mutation := newItemFieldMutation(c.config, OpUpdate)
@@ -1216,7 +1682,7 @@ func (c *ItemFieldClient) DeleteOne(_if *ItemField) *ItemFieldDeleteOne {
return c.DeleteOneID(_if.ID)
}
-// DeleteOne returns a builder for deleting the given entity by its id.
+// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *ItemFieldClient) DeleteOneID(id uuid.UUID) *ItemFieldDeleteOne {
builder := c.Delete().Where(itemfield.ID(id))
builder.mutation.id = &id
@@ -1228,6 +1694,8 @@ func (c *ItemFieldClient) DeleteOneID(id uuid.UUID) *ItemFieldDeleteOne {
func (c *ItemFieldClient) Query() *ItemFieldQuery {
return &ItemFieldQuery{
config: c.config,
+ ctx: &QueryContext{Type: TypeItemField},
+ inters: c.Interceptors(),
}
}
@@ -1247,8 +1715,8 @@ func (c *ItemFieldClient) GetX(ctx context.Context, id uuid.UUID) *ItemField {
// QueryItem queries the item edge of a ItemField.
func (c *ItemFieldClient) QueryItem(_if *ItemField) *ItemQuery {
- query := &ItemQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&ItemClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := _if.ID
step := sqlgraph.NewStep(
sqlgraph.From(itemfield.Table, itemfield.FieldID, id),
@@ -1266,6 +1734,26 @@ func (c *ItemFieldClient) Hooks() []Hook {
return c.hooks.ItemField
}
+// Interceptors returns the client interceptors.
+func (c *ItemFieldClient) Interceptors() []Interceptor {
+ return c.inters.ItemField
+}
+
+func (c *ItemFieldClient) mutate(ctx context.Context, m *ItemFieldMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&ItemFieldCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&ItemFieldUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&ItemFieldUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&ItemFieldDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown ItemField mutation op: %q", m.Op())
+ }
+}
+
// LabelClient is a client for the Label schema.
type LabelClient struct {
config
@@ -1282,6 +1770,12 @@ func (c *LabelClient) Use(hooks ...Hook) {
c.hooks.Label = append(c.hooks.Label, hooks...)
}
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `label.Intercept(f(g(h())))`.
+func (c *LabelClient) Intercept(interceptors ...Interceptor) {
+ c.inters.Label = append(c.inters.Label, interceptors...)
+}
+
// Create returns a builder for creating a Label entity.
func (c *LabelClient) Create() *LabelCreate {
mutation := newLabelMutation(c.config, OpCreate)
@@ -1293,6 +1787,21 @@ func (c *LabelClient) CreateBulk(builders ...*LabelCreate) *LabelCreateBulk {
return &LabelCreateBulk{config: c.config, builders: builders}
}
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *LabelClient) MapCreateBulk(slice any, setFunc func(*LabelCreate, int)) *LabelCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &LabelCreateBulk{err: fmt.Errorf("calling to LabelClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*LabelCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &LabelCreateBulk{config: c.config, builders: builders}
+}
+
// Update returns an update builder for Label.
func (c *LabelClient) Update() *LabelUpdate {
mutation := newLabelMutation(c.config, OpUpdate)
@@ -1322,7 +1831,7 @@ func (c *LabelClient) DeleteOne(l *Label) *LabelDeleteOne {
return c.DeleteOneID(l.ID)
}
-// DeleteOne returns a builder for deleting the given entity by its id.
+// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *LabelClient) DeleteOneID(id uuid.UUID) *LabelDeleteOne {
builder := c.Delete().Where(label.ID(id))
builder.mutation.id = &id
@@ -1334,6 +1843,8 @@ func (c *LabelClient) DeleteOneID(id uuid.UUID) *LabelDeleteOne {
func (c *LabelClient) Query() *LabelQuery {
return &LabelQuery{
config: c.config,
+ ctx: &QueryContext{Type: TypeLabel},
+ inters: c.Interceptors(),
}
}
@@ -1353,8 +1864,8 @@ func (c *LabelClient) GetX(ctx context.Context, id uuid.UUID) *Label {
// QueryGroup queries the group edge of a Label.
func (c *LabelClient) QueryGroup(l *Label) *GroupQuery {
- query := &GroupQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&GroupClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := l.ID
step := sqlgraph.NewStep(
sqlgraph.From(label.Table, label.FieldID, id),
@@ -1369,8 +1880,8 @@ func (c *LabelClient) QueryGroup(l *Label) *GroupQuery {
// QueryItems queries the items edge of a Label.
func (c *LabelClient) QueryItems(l *Label) *ItemQuery {
- query := &ItemQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&ItemClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := l.ID
step := sqlgraph.NewStep(
sqlgraph.From(label.Table, label.FieldID, id),
@@ -1388,6 +1899,26 @@ func (c *LabelClient) Hooks() []Hook {
return c.hooks.Label
}
+// Interceptors returns the client interceptors.
+func (c *LabelClient) Interceptors() []Interceptor {
+ return c.inters.Label
+}
+
+func (c *LabelClient) mutate(ctx context.Context, m *LabelMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&LabelCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&LabelUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&LabelUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&LabelDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown Label mutation op: %q", m.Op())
+ }
+}
+
// LocationClient is a client for the Location schema.
type LocationClient struct {
config
@@ -1404,6 +1935,12 @@ func (c *LocationClient) Use(hooks ...Hook) {
c.hooks.Location = append(c.hooks.Location, hooks...)
}
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `location.Intercept(f(g(h())))`.
+func (c *LocationClient) Intercept(interceptors ...Interceptor) {
+ c.inters.Location = append(c.inters.Location, interceptors...)
+}
+
// Create returns a builder for creating a Location entity.
func (c *LocationClient) Create() *LocationCreate {
mutation := newLocationMutation(c.config, OpCreate)
@@ -1415,6 +1952,21 @@ func (c *LocationClient) CreateBulk(builders ...*LocationCreate) *LocationCreate
return &LocationCreateBulk{config: c.config, builders: builders}
}
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *LocationClient) MapCreateBulk(slice any, setFunc func(*LocationCreate, int)) *LocationCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &LocationCreateBulk{err: fmt.Errorf("calling to LocationClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*LocationCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &LocationCreateBulk{config: c.config, builders: builders}
+}
+
// Update returns an update builder for Location.
func (c *LocationClient) Update() *LocationUpdate {
mutation := newLocationMutation(c.config, OpUpdate)
@@ -1444,7 +1996,7 @@ func (c *LocationClient) DeleteOne(l *Location) *LocationDeleteOne {
return c.DeleteOneID(l.ID)
}
-// DeleteOne returns a builder for deleting the given entity by its id.
+// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *LocationClient) DeleteOneID(id uuid.UUID) *LocationDeleteOne {
builder := c.Delete().Where(location.ID(id))
builder.mutation.id = &id
@@ -1456,6 +2008,8 @@ func (c *LocationClient) DeleteOneID(id uuid.UUID) *LocationDeleteOne {
func (c *LocationClient) Query() *LocationQuery {
return &LocationQuery{
config: c.config,
+ ctx: &QueryContext{Type: TypeLocation},
+ inters: c.Interceptors(),
}
}
@@ -1473,10 +2027,26 @@ func (c *LocationClient) GetX(ctx context.Context, id uuid.UUID) *Location {
return obj
}
+// QueryGroup queries the group edge of a Location.
+func (c *LocationClient) QueryGroup(l *Location) *GroupQuery {
+ query := (&GroupClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+ id := l.ID
+ step := sqlgraph.NewStep(
+ sqlgraph.From(location.Table, location.FieldID, id),
+ sqlgraph.To(group.Table, group.FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, location.GroupTable, location.GroupColumn),
+ )
+ fromV = sqlgraph.Neighbors(l.driver.Dialect(), step)
+ return fromV, nil
+ }
+ return query
+}
+
// QueryParent queries the parent edge of a Location.
func (c *LocationClient) QueryParent(l *Location) *LocationQuery {
- query := &LocationQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&LocationClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := l.ID
step := sqlgraph.NewStep(
sqlgraph.From(location.Table, location.FieldID, id),
@@ -1491,8 +2061,8 @@ func (c *LocationClient) QueryParent(l *Location) *LocationQuery {
// QueryChildren queries the children edge of a Location.
func (c *LocationClient) QueryChildren(l *Location) *LocationQuery {
- query := &LocationQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&LocationClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := l.ID
step := sqlgraph.NewStep(
sqlgraph.From(location.Table, location.FieldID, id),
@@ -1505,26 +2075,10 @@ func (c *LocationClient) QueryChildren(l *Location) *LocationQuery {
return query
}
-// QueryGroup queries the group edge of a Location.
-func (c *LocationClient) QueryGroup(l *Location) *GroupQuery {
- query := &GroupQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
- id := l.ID
- step := sqlgraph.NewStep(
- sqlgraph.From(location.Table, location.FieldID, id),
- sqlgraph.To(group.Table, group.FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, location.GroupTable, location.GroupColumn),
- )
- fromV = sqlgraph.Neighbors(l.driver.Dialect(), step)
- return fromV, nil
- }
- return query
-}
-
// QueryItems queries the items edge of a Location.
func (c *LocationClient) QueryItems(l *Location) *ItemQuery {
- query := &ItemQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&ItemClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := l.ID
step := sqlgraph.NewStep(
sqlgraph.From(location.Table, location.FieldID, id),
@@ -1542,6 +2096,340 @@ func (c *LocationClient) Hooks() []Hook {
return c.hooks.Location
}
+// Interceptors returns the client interceptors.
+func (c *LocationClient) Interceptors() []Interceptor {
+ return c.inters.Location
+}
+
+func (c *LocationClient) mutate(ctx context.Context, m *LocationMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&LocationCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&LocationUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&LocationUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&LocationDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown Location mutation op: %q", m.Op())
+ }
+}
+
+// MaintenanceEntryClient is a client for the MaintenanceEntry schema.
+type MaintenanceEntryClient struct {
+ config
+}
+
+// NewMaintenanceEntryClient returns a client for the MaintenanceEntry from the given config.
+func NewMaintenanceEntryClient(c config) *MaintenanceEntryClient {
+ return &MaintenanceEntryClient{config: c}
+}
+
+// Use adds a list of mutation hooks to the hooks stack.
+// A call to `Use(f, g, h)` equals to `maintenanceentry.Hooks(f(g(h())))`.
+func (c *MaintenanceEntryClient) Use(hooks ...Hook) {
+ c.hooks.MaintenanceEntry = append(c.hooks.MaintenanceEntry, hooks...)
+}
+
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `maintenanceentry.Intercept(f(g(h())))`.
+func (c *MaintenanceEntryClient) Intercept(interceptors ...Interceptor) {
+ c.inters.MaintenanceEntry = append(c.inters.MaintenanceEntry, interceptors...)
+}
+
+// Create returns a builder for creating a MaintenanceEntry entity.
+func (c *MaintenanceEntryClient) Create() *MaintenanceEntryCreate {
+ mutation := newMaintenanceEntryMutation(c.config, OpCreate)
+ return &MaintenanceEntryCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// CreateBulk returns a builder for creating a bulk of MaintenanceEntry entities.
+func (c *MaintenanceEntryClient) CreateBulk(builders ...*MaintenanceEntryCreate) *MaintenanceEntryCreateBulk {
+ return &MaintenanceEntryCreateBulk{config: c.config, builders: builders}
+}
+
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *MaintenanceEntryClient) MapCreateBulk(slice any, setFunc func(*MaintenanceEntryCreate, int)) *MaintenanceEntryCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &MaintenanceEntryCreateBulk{err: fmt.Errorf("calling to MaintenanceEntryClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*MaintenanceEntryCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &MaintenanceEntryCreateBulk{config: c.config, builders: builders}
+}
+
+// Update returns an update builder for MaintenanceEntry.
+func (c *MaintenanceEntryClient) Update() *MaintenanceEntryUpdate {
+ mutation := newMaintenanceEntryMutation(c.config, OpUpdate)
+ return &MaintenanceEntryUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOne returns an update builder for the given entity.
+func (c *MaintenanceEntryClient) UpdateOne(me *MaintenanceEntry) *MaintenanceEntryUpdateOne {
+ mutation := newMaintenanceEntryMutation(c.config, OpUpdateOne, withMaintenanceEntry(me))
+ return &MaintenanceEntryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOneID returns an update builder for the given id.
+func (c *MaintenanceEntryClient) UpdateOneID(id uuid.UUID) *MaintenanceEntryUpdateOne {
+ mutation := newMaintenanceEntryMutation(c.config, OpUpdateOne, withMaintenanceEntryID(id))
+ return &MaintenanceEntryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// Delete returns a delete builder for MaintenanceEntry.
+func (c *MaintenanceEntryClient) Delete() *MaintenanceEntryDelete {
+ mutation := newMaintenanceEntryMutation(c.config, OpDelete)
+ return &MaintenanceEntryDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// DeleteOne returns a builder for deleting the given entity.
+func (c *MaintenanceEntryClient) DeleteOne(me *MaintenanceEntry) *MaintenanceEntryDeleteOne {
+ return c.DeleteOneID(me.ID)
+}
+
+// DeleteOneID returns a builder for deleting the given entity by its id.
+func (c *MaintenanceEntryClient) DeleteOneID(id uuid.UUID) *MaintenanceEntryDeleteOne {
+ builder := c.Delete().Where(maintenanceentry.ID(id))
+ builder.mutation.id = &id
+ builder.mutation.op = OpDeleteOne
+ return &MaintenanceEntryDeleteOne{builder}
+}
+
+// Query returns a query builder for MaintenanceEntry.
+func (c *MaintenanceEntryClient) Query() *MaintenanceEntryQuery {
+ return &MaintenanceEntryQuery{
+ config: c.config,
+ ctx: &QueryContext{Type: TypeMaintenanceEntry},
+ inters: c.Interceptors(),
+ }
+}
+
+// Get returns a MaintenanceEntry entity by its id.
+func (c *MaintenanceEntryClient) Get(ctx context.Context, id uuid.UUID) (*MaintenanceEntry, error) {
+ return c.Query().Where(maintenanceentry.ID(id)).Only(ctx)
+}
+
+// GetX is like Get, but panics if an error occurs.
+func (c *MaintenanceEntryClient) GetX(ctx context.Context, id uuid.UUID) *MaintenanceEntry {
+ obj, err := c.Get(ctx, id)
+ if err != nil {
+ panic(err)
+ }
+ return obj
+}
+
+// QueryItem queries the item edge of a MaintenanceEntry.
+func (c *MaintenanceEntryClient) QueryItem(me *MaintenanceEntry) *ItemQuery {
+ query := (&ItemClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+ id := me.ID
+ step := sqlgraph.NewStep(
+ sqlgraph.From(maintenanceentry.Table, maintenanceentry.FieldID, id),
+ sqlgraph.To(item.Table, item.FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, maintenanceentry.ItemTable, maintenanceentry.ItemColumn),
+ )
+ fromV = sqlgraph.Neighbors(me.driver.Dialect(), step)
+ return fromV, nil
+ }
+ return query
+}
+
+// Hooks returns the client hooks.
+func (c *MaintenanceEntryClient) Hooks() []Hook {
+ return c.hooks.MaintenanceEntry
+}
+
+// Interceptors returns the client interceptors.
+func (c *MaintenanceEntryClient) Interceptors() []Interceptor {
+ return c.inters.MaintenanceEntry
+}
+
+func (c *MaintenanceEntryClient) mutate(ctx context.Context, m *MaintenanceEntryMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&MaintenanceEntryCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&MaintenanceEntryUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&MaintenanceEntryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&MaintenanceEntryDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown MaintenanceEntry mutation op: %q", m.Op())
+ }
+}
+
+// NotifierClient is a client for the Notifier schema.
+type NotifierClient struct {
+ config
+}
+
+// NewNotifierClient returns a client for the Notifier from the given config.
+func NewNotifierClient(c config) *NotifierClient {
+ return &NotifierClient{config: c}
+}
+
+// Use adds a list of mutation hooks to the hooks stack.
+// A call to `Use(f, g, h)` equals to `notifier.Hooks(f(g(h())))`.
+func (c *NotifierClient) Use(hooks ...Hook) {
+ c.hooks.Notifier = append(c.hooks.Notifier, hooks...)
+}
+
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `notifier.Intercept(f(g(h())))`.
+func (c *NotifierClient) Intercept(interceptors ...Interceptor) {
+ c.inters.Notifier = append(c.inters.Notifier, interceptors...)
+}
+
+// Create returns a builder for creating a Notifier entity.
+func (c *NotifierClient) Create() *NotifierCreate {
+ mutation := newNotifierMutation(c.config, OpCreate)
+ return &NotifierCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// CreateBulk returns a builder for creating a bulk of Notifier entities.
+func (c *NotifierClient) CreateBulk(builders ...*NotifierCreate) *NotifierCreateBulk {
+ return &NotifierCreateBulk{config: c.config, builders: builders}
+}
+
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *NotifierClient) MapCreateBulk(slice any, setFunc func(*NotifierCreate, int)) *NotifierCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &NotifierCreateBulk{err: fmt.Errorf("calling to NotifierClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*NotifierCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &NotifierCreateBulk{config: c.config, builders: builders}
+}
+
+// Update returns an update builder for Notifier.
+func (c *NotifierClient) Update() *NotifierUpdate {
+ mutation := newNotifierMutation(c.config, OpUpdate)
+ return &NotifierUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOne returns an update builder for the given entity.
+func (c *NotifierClient) UpdateOne(n *Notifier) *NotifierUpdateOne {
+ mutation := newNotifierMutation(c.config, OpUpdateOne, withNotifier(n))
+ return &NotifierUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOneID returns an update builder for the given id.
+func (c *NotifierClient) UpdateOneID(id uuid.UUID) *NotifierUpdateOne {
+ mutation := newNotifierMutation(c.config, OpUpdateOne, withNotifierID(id))
+ return &NotifierUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// Delete returns a delete builder for Notifier.
+func (c *NotifierClient) Delete() *NotifierDelete {
+ mutation := newNotifierMutation(c.config, OpDelete)
+ return &NotifierDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// DeleteOne returns a builder for deleting the given entity.
+func (c *NotifierClient) DeleteOne(n *Notifier) *NotifierDeleteOne {
+ return c.DeleteOneID(n.ID)
+}
+
+// DeleteOneID returns a builder for deleting the given entity by its id.
+func (c *NotifierClient) DeleteOneID(id uuid.UUID) *NotifierDeleteOne {
+ builder := c.Delete().Where(notifier.ID(id))
+ builder.mutation.id = &id
+ builder.mutation.op = OpDeleteOne
+ return &NotifierDeleteOne{builder}
+}
+
+// Query returns a query builder for Notifier.
+func (c *NotifierClient) Query() *NotifierQuery {
+ return &NotifierQuery{
+ config: c.config,
+ ctx: &QueryContext{Type: TypeNotifier},
+ inters: c.Interceptors(),
+ }
+}
+
+// Get returns a Notifier entity by its id.
+func (c *NotifierClient) Get(ctx context.Context, id uuid.UUID) (*Notifier, error) {
+ return c.Query().Where(notifier.ID(id)).Only(ctx)
+}
+
+// GetX is like Get, but panics if an error occurs.
+func (c *NotifierClient) GetX(ctx context.Context, id uuid.UUID) *Notifier {
+ obj, err := c.Get(ctx, id)
+ if err != nil {
+ panic(err)
+ }
+ return obj
+}
+
+// QueryGroup queries the group edge of a Notifier.
+func (c *NotifierClient) QueryGroup(n *Notifier) *GroupQuery {
+ query := (&GroupClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+ id := n.ID
+ step := sqlgraph.NewStep(
+ sqlgraph.From(notifier.Table, notifier.FieldID, id),
+ sqlgraph.To(group.Table, group.FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, notifier.GroupTable, notifier.GroupColumn),
+ )
+ fromV = sqlgraph.Neighbors(n.driver.Dialect(), step)
+ return fromV, nil
+ }
+ return query
+}
+
+// QueryUser queries the user edge of a Notifier.
+func (c *NotifierClient) QueryUser(n *Notifier) *UserQuery {
+ query := (&UserClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+ id := n.ID
+ step := sqlgraph.NewStep(
+ sqlgraph.From(notifier.Table, notifier.FieldID, id),
+ sqlgraph.To(user.Table, user.FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, notifier.UserTable, notifier.UserColumn),
+ )
+ fromV = sqlgraph.Neighbors(n.driver.Dialect(), step)
+ return fromV, nil
+ }
+ return query
+}
+
+// Hooks returns the client hooks.
+func (c *NotifierClient) Hooks() []Hook {
+ return c.hooks.Notifier
+}
+
+// Interceptors returns the client interceptors.
+func (c *NotifierClient) Interceptors() []Interceptor {
+ return c.inters.Notifier
+}
+
+func (c *NotifierClient) mutate(ctx context.Context, m *NotifierMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&NotifierCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&NotifierUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&NotifierUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&NotifierDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown Notifier mutation op: %q", m.Op())
+ }
+}
+
// UserClient is a client for the User schema.
type UserClient struct {
config
@@ -1558,6 +2446,12 @@ func (c *UserClient) Use(hooks ...Hook) {
c.hooks.User = append(c.hooks.User, hooks...)
}
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `user.Intercept(f(g(h())))`.
+func (c *UserClient) Intercept(interceptors ...Interceptor) {
+ c.inters.User = append(c.inters.User, interceptors...)
+}
+
// Create returns a builder for creating a User entity.
func (c *UserClient) Create() *UserCreate {
mutation := newUserMutation(c.config, OpCreate)
@@ -1569,6 +2463,21 @@ func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk {
return &UserCreateBulk{config: c.config, builders: builders}
}
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *UserClient) MapCreateBulk(slice any, setFunc func(*UserCreate, int)) *UserCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &UserCreateBulk{err: fmt.Errorf("calling to UserClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*UserCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &UserCreateBulk{config: c.config, builders: builders}
+}
+
// Update returns an update builder for User.
func (c *UserClient) Update() *UserUpdate {
mutation := newUserMutation(c.config, OpUpdate)
@@ -1598,7 +2507,7 @@ func (c *UserClient) DeleteOne(u *User) *UserDeleteOne {
return c.DeleteOneID(u.ID)
}
-// DeleteOne returns a builder for deleting the given entity by its id.
+// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *UserClient) DeleteOneID(id uuid.UUID) *UserDeleteOne {
builder := c.Delete().Where(user.ID(id))
builder.mutation.id = &id
@@ -1610,6 +2519,8 @@ func (c *UserClient) DeleteOneID(id uuid.UUID) *UserDeleteOne {
func (c *UserClient) Query() *UserQuery {
return &UserQuery{
config: c.config,
+ ctx: &QueryContext{Type: TypeUser},
+ inters: c.Interceptors(),
}
}
@@ -1629,8 +2540,8 @@ func (c *UserClient) GetX(ctx context.Context, id uuid.UUID) *User {
// QueryGroup queries the group edge of a User.
func (c *UserClient) QueryGroup(u *User) *GroupQuery {
- query := &GroupQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&GroupClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := u.ID
step := sqlgraph.NewStep(
sqlgraph.From(user.Table, user.FieldID, id),
@@ -1645,8 +2556,8 @@ func (c *UserClient) QueryGroup(u *User) *GroupQuery {
// QueryAuthTokens queries the auth_tokens edge of a User.
func (c *UserClient) QueryAuthTokens(u *User) *AuthTokensQuery {
- query := &AuthTokensQuery{config: c.config}
- query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
+ query := (&AuthTokensClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := u.ID
step := sqlgraph.NewStep(
sqlgraph.From(user.Table, user.FieldID, id),
@@ -1659,7 +2570,55 @@ func (c *UserClient) QueryAuthTokens(u *User) *AuthTokensQuery {
return query
}
+// QueryNotifiers queries the notifiers edge of a User.
+func (c *UserClient) QueryNotifiers(u *User) *NotifierQuery {
+ query := (&NotifierClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+ id := u.ID
+ step := sqlgraph.NewStep(
+ sqlgraph.From(user.Table, user.FieldID, id),
+ sqlgraph.To(notifier.Table, notifier.FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, user.NotifiersTable, user.NotifiersColumn),
+ )
+ fromV = sqlgraph.Neighbors(u.driver.Dialect(), step)
+ return fromV, nil
+ }
+ return query
+}
+
// Hooks returns the client hooks.
func (c *UserClient) Hooks() []Hook {
return c.hooks.User
}
+
+// Interceptors returns the client interceptors.
+func (c *UserClient) Interceptors() []Interceptor {
+ return c.inters.User
+}
+
+func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&UserCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&UserUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&UserDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown User mutation op: %q", m.Op())
+ }
+}
+
+// hooks and interceptors per client, for fast access.
+type (
+ hooks struct {
+ Attachment, AuthRoles, AuthTokens, Document, Group, GroupInvitationToken, Item,
+ ItemField, Label, Location, MaintenanceEntry, Notifier, User []ent.Hook
+ }
+ inters struct {
+ Attachment, AuthRoles, AuthTokens, Document, Group, GroupInvitationToken, Item,
+ ItemField, Label, Location, MaintenanceEntry, Notifier, User []ent.Interceptor
+ }
+)
diff --git a/backend/internal/data/ent/config.go b/backend/internal/data/ent/config.go
deleted file mode 100644
index da76f30..0000000
--- a/backend/internal/data/ent/config.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
- "entgo.io/ent"
- "entgo.io/ent/dialect"
-)
-
-// Option function to configure the client.
-type Option func(*config)
-
-// Config is the configuration for the client and its builder.
-type config struct {
- // driver used for executing database requests.
- driver dialect.Driver
- // debug enable a debug logging.
- debug bool
- // log used for logging on debug mode.
- log func(...any)
- // hooks to execute on mutations.
- hooks *hooks
-}
-
-// hooks per client, for fast access.
-type hooks struct {
- Attachment []ent.Hook
- AuthTokens []ent.Hook
- Document []ent.Hook
- DocumentToken []ent.Hook
- Group []ent.Hook
- GroupInvitationToken []ent.Hook
- Item []ent.Hook
- ItemField []ent.Hook
- Label []ent.Hook
- Location []ent.Hook
- User []ent.Hook
-}
-
-// Options applies the options on the config object.
-func (c *config) options(opts ...Option) {
- for _, opt := range opts {
- opt(c)
- }
- if c.debug {
- c.driver = dialect.Debug(c.driver, c.log)
- }
-}
-
-// Debug enables debug logging on the ent.Driver.
-func Debug() Option {
- return func(c *config) {
- c.debug = true
- }
-}
-
-// Log sets the logging function for debug mode.
-func Log(fn func(...any)) Option {
- return func(c *config) {
- c.log = fn
- }
-}
-
-// Driver configures the client driver.
-func Driver(driver dialect.Driver) Option {
- return func(c *config) {
- c.driver = driver
- }
-}
diff --git a/backend/internal/data/ent/context.go b/backend/internal/data/ent/context.go
deleted file mode 100644
index 7811bfa..0000000
--- a/backend/internal/data/ent/context.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
- "context"
-)
-
-type clientCtxKey struct{}
-
-// FromContext returns a Client stored inside a context, or nil if there isn't one.
-func FromContext(ctx context.Context) *Client {
- c, _ := ctx.Value(clientCtxKey{}).(*Client)
- return c
-}
-
-// NewContext returns a new context with the given Client attached.
-func NewContext(parent context.Context, c *Client) context.Context {
- return context.WithValue(parent, clientCtxKey{}, c)
-}
-
-type txCtxKey struct{}
-
-// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
-func TxFromContext(ctx context.Context) *Tx {
- tx, _ := ctx.Value(txCtxKey{}).(*Tx)
- return tx
-}
-
-// NewTxContext returns a new context with the given Tx attached.
-func NewTxContext(parent context.Context, tx *Tx) context.Context {
- return context.WithValue(parent, txCtxKey{}, tx)
-}
diff --git a/backend/internal/data/ent/document.go b/backend/internal/data/ent/document.go
index 0c84d7d..3141bac 100644
--- a/backend/internal/data/ent/document.go
+++ b/backend/internal/data/ent/document.go
@@ -7,6 +7,7 @@ import (
"strings"
"time"
+ "entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/document"
@@ -30,19 +31,18 @@ type Document struct {
// The values are being populated by the DocumentQuery when eager-loading is set.
Edges DocumentEdges `json:"edges"`
group_documents *uuid.UUID
+ selectValues sql.SelectValues
}
// DocumentEdges holds the relations/edges for other nodes in the graph.
type DocumentEdges struct {
// Group holds the value of the group edge.
Group *Group `json:"group,omitempty"`
- // DocumentTokens holds the value of the document_tokens edge.
- DocumentTokens []*DocumentToken `json:"document_tokens,omitempty"`
// Attachments holds the value of the attachments edge.
Attachments []*Attachment `json:"attachments,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
- loadedTypes [3]bool
+ loadedTypes [2]bool
}
// GroupOrErr returns the Group value or an error if the edge
@@ -58,19 +58,10 @@ func (e DocumentEdges) GroupOrErr() (*Group, error) {
return nil, &NotLoadedError{edge: "group"}
}
-// DocumentTokensOrErr returns the DocumentTokens value or an error if the edge
-// was not loaded in eager-loading.
-func (e DocumentEdges) DocumentTokensOrErr() ([]*DocumentToken, error) {
- if e.loadedTypes[1] {
- return e.DocumentTokens, nil
- }
- return nil, &NotLoadedError{edge: "document_tokens"}
-}
-
// AttachmentsOrErr returns the Attachments value or an error if the edge
// was not loaded in eager-loading.
func (e DocumentEdges) AttachmentsOrErr() ([]*Attachment, error) {
- if e.loadedTypes[2] {
+ if e.loadedTypes[1] {
return e.Attachments, nil
}
return nil, &NotLoadedError{edge: "attachments"}
@@ -90,7 +81,7 @@ func (*Document) scanValues(columns []string) ([]any, error) {
case document.ForeignKeys[0]: // group_documents
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
- return nil, fmt.Errorf("unexpected column %q for type Document", columns[i])
+ values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -141,31 +132,34 @@ func (d *Document) assignValues(columns []string, values []any) error {
d.group_documents = new(uuid.UUID)
*d.group_documents = *value.S.(*uuid.UUID)
}
+ default:
+ d.selectValues.Set(columns[i], values[i])
}
}
return nil
}
-// QueryGroup queries the "group" edge of the Document entity.
-func (d *Document) QueryGroup() *GroupQuery {
- return (&DocumentClient{config: d.config}).QueryGroup(d)
+// Value returns the ent.Value that was dynamically selected and assigned to the Document.
+// This includes values selected through modifiers, order, etc.
+func (d *Document) Value(name string) (ent.Value, error) {
+ return d.selectValues.Get(name)
}
-// QueryDocumentTokens queries the "document_tokens" edge of the Document entity.
-func (d *Document) QueryDocumentTokens() *DocumentTokenQuery {
- return (&DocumentClient{config: d.config}).QueryDocumentTokens(d)
+// QueryGroup queries the "group" edge of the Document entity.
+func (d *Document) QueryGroup() *GroupQuery {
+ return NewDocumentClient(d.config).QueryGroup(d)
}
// QueryAttachments queries the "attachments" edge of the Document entity.
func (d *Document) QueryAttachments() *AttachmentQuery {
- return (&DocumentClient{config: d.config}).QueryAttachments(d)
+ return NewDocumentClient(d.config).QueryAttachments(d)
}
// Update returns a builder for updating this Document.
// Note that you need to call Document.Unwrap() before calling this method if this Document
// was returned from a transaction, and the transaction was committed or rolled back.
func (d *Document) Update() *DocumentUpdateOne {
- return (&DocumentClient{config: d.config}).UpdateOne(d)
+ return NewDocumentClient(d.config).UpdateOne(d)
}
// Unwrap unwraps the Document entity that was returned from a transaction after it was closed,
@@ -201,9 +195,3 @@ func (d *Document) String() string {
// Documents is a parsable slice of Document.
type Documents []*Document
-
-func (d Documents) config(cfg config) {
- for _i := range d {
- d[_i].config = cfg
- }
-}
diff --git a/backend/internal/data/ent/document/document.go b/backend/internal/data/ent/document/document.go
index bfc3881..95380f4 100644
--- a/backend/internal/data/ent/document/document.go
+++ b/backend/internal/data/ent/document/document.go
@@ -5,6 +5,8 @@ package document
import (
"time"
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -23,8 +25,6 @@ const (
FieldPath = "path"
// EdgeGroup holds the string denoting the group edge name in mutations.
EdgeGroup = "group"
- // EdgeDocumentTokens holds the string denoting the document_tokens edge name in mutations.
- EdgeDocumentTokens = "document_tokens"
// EdgeAttachments holds the string denoting the attachments edge name in mutations.
EdgeAttachments = "attachments"
// Table holds the table name of the document in the database.
@@ -36,13 +36,6 @@ const (
GroupInverseTable = "groups"
// GroupColumn is the table column denoting the group relation/edge.
GroupColumn = "group_documents"
- // DocumentTokensTable is the table that holds the document_tokens relation/edge.
- DocumentTokensTable = "document_tokens"
- // DocumentTokensInverseTable is the table name for the DocumentToken entity.
- // It exists in this package in order to avoid circular dependency with the "documenttoken" package.
- DocumentTokensInverseTable = "document_tokens"
- // DocumentTokensColumn is the table column denoting the document_tokens relation/edge.
- DocumentTokensColumn = "document_document_tokens"
// AttachmentsTable is the table that holds the attachments relation/edge.
AttachmentsTable = "attachments"
// AttachmentsInverseTable is the table name for the Attachment entity.
@@ -96,3 +89,66 @@ var (
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)
+
+// OrderOption defines the ordering options for the Document queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByTitle orders the results by the title field.
+func ByTitle(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldTitle, opts...).ToFunc()
+}
+
+// ByPath orders the results by the path field.
+func ByPath(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldPath, opts...).ToFunc()
+}
+
+// ByGroupField orders the results by group field.
+func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
+ }
+}
+
+// ByAttachmentsCount orders the results by attachments count.
+func ByAttachmentsCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newAttachmentsStep(), opts...)
+ }
+}
+
+// ByAttachments orders the results by attachments terms.
+func ByAttachments(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newAttachmentsStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+func newGroupStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(GroupInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
+ )
+}
+func newAttachmentsStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(AttachmentsInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn),
+ )
+}
diff --git a/backend/internal/data/ent/document/where.go b/backend/internal/data/ent/document/where.go
index dc02fa4..3e491ad 100644
--- a/backend/internal/data/ent/document/where.go
+++ b/backend/internal/data/ent/document/where.go
@@ -13,427 +13,277 @@ import (
// ID filters vertices based on their ID field.
func ID(id uuid.UUID) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.Document(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id uuid.UUID) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.Document(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id uuid.UUID) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldID), id))
- })
+ return predicate.Document(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...uuid.UUID) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.In(s.C(FieldID), v...))
- })
+ return predicate.Document(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...uuid.UUID) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.NotIn(s.C(FieldID), v...))
- })
+ return predicate.Document(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id uuid.UUID) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldID), id))
- })
+ return predicate.Document(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id uuid.UUID) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldID), id))
- })
+ return predicate.Document(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id uuid.UUID) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldID), id))
- })
+ return predicate.Document(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id uuid.UUID) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldID), id))
- })
+ return predicate.Document(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Document(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Document(sql.FieldEQ(FieldUpdatedAt, v))
}
// Title applies equality check predicate on the "title" field. It's identical to TitleEQ.
func Title(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldTitle), v))
- })
+ return predicate.Document(sql.FieldEQ(FieldTitle, v))
}
// Path applies equality check predicate on the "path" field. It's identical to PathEQ.
func Path(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldPath), v))
- })
+ return predicate.Document(sql.FieldEQ(FieldPath, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Document(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Document(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.Document {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldCreatedAt), v...))
- })
+ return predicate.Document(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.Document {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
- })
+ return predicate.Document(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldCreatedAt), v))
- })
+ return predicate.Document(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.Document(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldCreatedAt), v))
- })
+ return predicate.Document(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.Document(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Document(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Document(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.Document {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.Document(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.Document {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.Document(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Document(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Document(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Document(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Document(sql.FieldLTE(FieldUpdatedAt, v))
}
// TitleEQ applies the EQ predicate on the "title" field.
func TitleEQ(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldTitle), v))
- })
+ return predicate.Document(sql.FieldEQ(FieldTitle, v))
}
// TitleNEQ applies the NEQ predicate on the "title" field.
func TitleNEQ(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldTitle), v))
- })
+ return predicate.Document(sql.FieldNEQ(FieldTitle, v))
}
// TitleIn applies the In predicate on the "title" field.
func TitleIn(vs ...string) predicate.Document {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldTitle), v...))
- })
+ return predicate.Document(sql.FieldIn(FieldTitle, vs...))
}
// TitleNotIn applies the NotIn predicate on the "title" field.
func TitleNotIn(vs ...string) predicate.Document {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldTitle), v...))
- })
+ return predicate.Document(sql.FieldNotIn(FieldTitle, vs...))
}
// TitleGT applies the GT predicate on the "title" field.
func TitleGT(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldTitle), v))
- })
+ return predicate.Document(sql.FieldGT(FieldTitle, v))
}
// TitleGTE applies the GTE predicate on the "title" field.
func TitleGTE(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldTitle), v))
- })
+ return predicate.Document(sql.FieldGTE(FieldTitle, v))
}
// TitleLT applies the LT predicate on the "title" field.
func TitleLT(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldTitle), v))
- })
+ return predicate.Document(sql.FieldLT(FieldTitle, v))
}
// TitleLTE applies the LTE predicate on the "title" field.
func TitleLTE(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldTitle), v))
- })
+ return predicate.Document(sql.FieldLTE(FieldTitle, v))
}
// TitleContains applies the Contains predicate on the "title" field.
func TitleContains(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldTitle), v))
- })
+ return predicate.Document(sql.FieldContains(FieldTitle, v))
}
// TitleHasPrefix applies the HasPrefix predicate on the "title" field.
func TitleHasPrefix(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldTitle), v))
- })
+ return predicate.Document(sql.FieldHasPrefix(FieldTitle, v))
}
// TitleHasSuffix applies the HasSuffix predicate on the "title" field.
func TitleHasSuffix(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldTitle), v))
- })
+ return predicate.Document(sql.FieldHasSuffix(FieldTitle, v))
}
// TitleEqualFold applies the EqualFold predicate on the "title" field.
func TitleEqualFold(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldTitle), v))
- })
+ return predicate.Document(sql.FieldEqualFold(FieldTitle, v))
}
// TitleContainsFold applies the ContainsFold predicate on the "title" field.
func TitleContainsFold(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldTitle), v))
- })
+ return predicate.Document(sql.FieldContainsFold(FieldTitle, v))
}
// PathEQ applies the EQ predicate on the "path" field.
func PathEQ(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldPath), v))
- })
+ return predicate.Document(sql.FieldEQ(FieldPath, v))
}
// PathNEQ applies the NEQ predicate on the "path" field.
func PathNEQ(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldPath), v))
- })
+ return predicate.Document(sql.FieldNEQ(FieldPath, v))
}
// PathIn applies the In predicate on the "path" field.
func PathIn(vs ...string) predicate.Document {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldPath), v...))
- })
+ return predicate.Document(sql.FieldIn(FieldPath, vs...))
}
// PathNotIn applies the NotIn predicate on the "path" field.
func PathNotIn(vs ...string) predicate.Document {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldPath), v...))
- })
+ return predicate.Document(sql.FieldNotIn(FieldPath, vs...))
}
// PathGT applies the GT predicate on the "path" field.
func PathGT(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldPath), v))
- })
+ return predicate.Document(sql.FieldGT(FieldPath, v))
}
// PathGTE applies the GTE predicate on the "path" field.
func PathGTE(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldPath), v))
- })
+ return predicate.Document(sql.FieldGTE(FieldPath, v))
}
// PathLT applies the LT predicate on the "path" field.
func PathLT(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldPath), v))
- })
+ return predicate.Document(sql.FieldLT(FieldPath, v))
}
// PathLTE applies the LTE predicate on the "path" field.
func PathLTE(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldPath), v))
- })
+ return predicate.Document(sql.FieldLTE(FieldPath, v))
}
// PathContains applies the Contains predicate on the "path" field.
func PathContains(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldPath), v))
- })
+ return predicate.Document(sql.FieldContains(FieldPath, v))
}
// PathHasPrefix applies the HasPrefix predicate on the "path" field.
func PathHasPrefix(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldPath), v))
- })
+ return predicate.Document(sql.FieldHasPrefix(FieldPath, v))
}
// PathHasSuffix applies the HasSuffix predicate on the "path" field.
func PathHasSuffix(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldPath), v))
- })
+ return predicate.Document(sql.FieldHasSuffix(FieldPath, v))
}
// PathEqualFold applies the EqualFold predicate on the "path" field.
func PathEqualFold(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldPath), v))
- })
+ return predicate.Document(sql.FieldEqualFold(FieldPath, v))
}
// PathContainsFold applies the ContainsFold predicate on the "path" field.
func PathContainsFold(v string) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldPath), v))
- })
+ return predicate.Document(sql.FieldContainsFold(FieldPath, v))
}
// HasGroup applies the HasEdge predicate on the "group" edge.
@@ -441,7 +291,6 @@ func HasGroup() predicate.Document {
return predicate.Document(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(GroupTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -451,39 +300,7 @@ func HasGroup() predicate.Document {
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
func HasGroupWith(preds ...predicate.Group) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(GroupInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
- )
- sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
- for _, p := range preds {
- p(s)
- }
- })
- })
-}
-
-// HasDocumentTokens applies the HasEdge predicate on the "document_tokens" edge.
-func HasDocumentTokens() predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(DocumentTokensTable, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, DocumentTokensTable, DocumentTokensColumn),
- )
- sqlgraph.HasNeighbors(s, step)
- })
-}
-
-// HasDocumentTokensWith applies the HasEdge predicate on the "document_tokens" edge with a given conditions (other predicates).
-func HasDocumentTokensWith(preds ...predicate.DocumentToken) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(DocumentTokensInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, DocumentTokensTable, DocumentTokensColumn),
- )
+ step := newGroupStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -497,7 +314,6 @@ func HasAttachments() predicate.Document {
return predicate.Document(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(AttachmentsTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -507,11 +323,7 @@ func HasAttachments() predicate.Document {
// HasAttachmentsWith applies the HasEdge predicate on the "attachments" edge with a given conditions (other predicates).
func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(AttachmentsInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn),
- )
+ step := newAttachmentsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -522,32 +334,15 @@ func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Document {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Document) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for _, p := range predicates {
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.Document(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Document) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for i, p := range predicates {
- if i > 0 {
- s1.Or()
- }
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.Document(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Document) predicate.Document {
- return predicate.Document(func(s *sql.Selector) {
- p(s.Not())
- })
+ return predicate.Document(sql.NotPredicates(p))
}
diff --git a/backend/internal/data/ent/document_create.go b/backend/internal/data/ent/document_create.go
index b6577df..fe61e98 100644
--- a/backend/internal/data/ent/document_create.go
+++ b/backend/internal/data/ent/document_create.go
@@ -13,7 +13,6 @@ import (
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
"github.com/hay-kot/homebox/backend/internal/data/ent/document"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
)
@@ -89,21 +88,6 @@ func (dc *DocumentCreate) SetGroup(g *Group) *DocumentCreate {
return dc.SetGroupID(g.ID)
}
-// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by IDs.
-func (dc *DocumentCreate) AddDocumentTokenIDs(ids ...uuid.UUID) *DocumentCreate {
- dc.mutation.AddDocumentTokenIDs(ids...)
- return dc
-}
-
-// AddDocumentTokens adds the "document_tokens" edges to the DocumentToken entity.
-func (dc *DocumentCreate) AddDocumentTokens(d ...*DocumentToken) *DocumentCreate {
- ids := make([]uuid.UUID, len(d))
- for i := range d {
- ids[i] = d[i].ID
- }
- return dc.AddDocumentTokenIDs(ids...)
-}
-
// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs.
func (dc *DocumentCreate) AddAttachmentIDs(ids ...uuid.UUID) *DocumentCreate {
dc.mutation.AddAttachmentIDs(ids...)
@@ -126,50 +110,8 @@ func (dc *DocumentCreate) Mutation() *DocumentMutation {
// Save creates the Document in the database.
func (dc *DocumentCreate) Save(ctx context.Context) (*Document, error) {
- var (
- err error
- node *Document
- )
dc.defaults()
- if len(dc.hooks) == 0 {
- if err = dc.check(); err != nil {
- return nil, err
- }
- node, err = dc.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*DocumentMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = dc.check(); err != nil {
- return nil, err
- }
- dc.mutation = mutation
- if node, err = dc.sqlSave(ctx); err != nil {
- return nil, err
- }
- mutation.id = &node.ID
- mutation.done = true
- return node, err
- })
- for i := len(dc.hooks) - 1; i >= 0; i-- {
- if dc.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = dc.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, dc.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*Document)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from DocumentMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, dc.sqlSave, dc.mutation, dc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -241,6 +183,9 @@ func (dc *DocumentCreate) check() error {
}
func (dc *DocumentCreate) sqlSave(ctx context.Context) (*Document, error) {
+ if err := dc.check(); err != nil {
+ return nil, err
+ }
_node, _spec := dc.createSpec()
if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@@ -255,54 +200,34 @@ func (dc *DocumentCreate) sqlSave(ctx context.Context) (*Document, error) {
return nil, err
}
}
+ dc.mutation.id = &_node.ID
+ dc.mutation.done = true
return _node, nil
}
func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) {
var (
_node = &Document{config: dc.config}
- _spec = &sqlgraph.CreateSpec{
- Table: document.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
- }
+ _spec = sqlgraph.NewCreateSpec(document.Table, sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID))
)
if id, ok := dc.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = &id
}
if value, ok := dc.mutation.CreatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: document.FieldCreatedAt,
- })
+ _spec.SetField(document.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := dc.mutation.UpdatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: document.FieldUpdatedAt,
- })
+ _spec.SetField(document.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := dc.mutation.Title(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: document.FieldTitle,
- })
+ _spec.SetField(document.FieldTitle, field.TypeString, value)
_node.Title = value
}
if value, ok := dc.mutation.Path(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: document.FieldPath,
- })
+ _spec.SetField(document.FieldPath, field.TypeString, value)
_node.Path = value
}
if nodes := dc.mutation.GroupIDs(); len(nodes) > 0 {
@@ -313,10 +238,7 @@ func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) {
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -325,25 +247,6 @@ func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) {
_node.group_documents = &nodes[0]
_spec.Edges = append(_spec.Edges, edge)
}
- if nodes := dc.mutation.DocumentTokensIDs(); len(nodes) > 0 {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.O2M,
- Inverse: false,
- Table: document.DocumentTokensTable,
- Columns: []string{document.DocumentTokensColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: documenttoken.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _spec.Edges = append(_spec.Edges, edge)
- }
if nodes := dc.mutation.AttachmentsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
@@ -352,10 +255,7 @@ func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) {
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -369,11 +269,15 @@ func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) {
// DocumentCreateBulk is the builder for creating many Document entities in bulk.
type DocumentCreateBulk struct {
config
+ err error
builders []*DocumentCreate
}
// Save creates the Document entities in the database.
func (dcb *DocumentCreateBulk) Save(ctx context.Context) ([]*Document, error) {
+ if dcb.err != nil {
+ return nil, dcb.err
+ }
specs := make([]*sqlgraph.CreateSpec, len(dcb.builders))
nodes := make([]*Document, len(dcb.builders))
mutators := make([]Mutator, len(dcb.builders))
@@ -390,8 +294,8 @@ func (dcb *DocumentCreateBulk) Save(ctx context.Context) ([]*Document, error) {
return nil, err
}
builder.mutation = mutation
- nodes[i], specs[i] = builder.createSpec()
var err error
+ nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation)
} else {
diff --git a/backend/internal/data/ent/document_delete.go b/backend/internal/data/ent/document_delete.go
index 6e21bef..5901c03 100644
--- a/backend/internal/data/ent/document_delete.go
+++ b/backend/internal/data/ent/document_delete.go
@@ -4,7 +4,6 @@ package ent
import (
"context"
- "fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@@ -28,34 +27,7 @@ func (dd *DocumentDelete) Where(ps ...predicate.Document) *DocumentDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (dd *DocumentDelete) Exec(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
- if len(dd.hooks) == 0 {
- affected, err = dd.sqlExec(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*DocumentMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- dd.mutation = mutation
- affected, err = dd.sqlExec(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(dd.hooks) - 1; i >= 0; i-- {
- if dd.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = dd.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, dd.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, dd.sqlExec, dd.mutation, dd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@@ -68,15 +40,7 @@ func (dd *DocumentDelete) ExecX(ctx context.Context) int {
}
func (dd *DocumentDelete) sqlExec(ctx context.Context) (int, error) {
- _spec := &sqlgraph.DeleteSpec{
- Node: &sqlgraph.NodeSpec{
- Table: document.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewDeleteSpec(document.Table, sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID))
if ps := dd.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -88,6 +52,7 @@ func (dd *DocumentDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
+ dd.mutation.done = true
return affected, err
}
@@ -96,6 +61,12 @@ type DocumentDeleteOne struct {
dd *DocumentDelete
}
+// Where appends a list predicates to the DocumentDelete builder.
+func (ddo *DocumentDeleteOne) Where(ps ...predicate.Document) *DocumentDeleteOne {
+ ddo.dd.mutation.Where(ps...)
+ return ddo
+}
+
// Exec executes the deletion query.
func (ddo *DocumentDeleteOne) Exec(ctx context.Context) error {
n, err := ddo.dd.Exec(ctx)
@@ -111,5 +82,7 @@ func (ddo *DocumentDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (ddo *DocumentDeleteOne) ExecX(ctx context.Context) {
- ddo.dd.ExecX(ctx)
+ if err := ddo.Exec(ctx); err != nil {
+ panic(err)
+ }
}
diff --git a/backend/internal/data/ent/document_query.go b/backend/internal/data/ent/document_query.go
index 7505152..34f4801 100644
--- a/backend/internal/data/ent/document_query.go
+++ b/backend/internal/data/ent/document_query.go
@@ -14,7 +14,6 @@ import (
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
"github.com/hay-kot/homebox/backend/internal/data/ent/document"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
)
@@ -22,16 +21,13 @@ import (
// DocumentQuery is the builder for querying Document entities.
type DocumentQuery struct {
config
- limit *int
- offset *int
- unique *bool
- order []OrderFunc
- fields []string
- predicates []predicate.Document
- withGroup *GroupQuery
- withDocumentTokens *DocumentTokenQuery
- withAttachments *AttachmentQuery
- withFKs bool
+ ctx *QueryContext
+ order []document.OrderOption
+ inters []Interceptor
+ predicates []predicate.Document
+ withGroup *GroupQuery
+ withAttachments *AttachmentQuery
+ withFKs bool
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
@@ -43,34 +39,34 @@ func (dq *DocumentQuery) Where(ps ...predicate.Document) *DocumentQuery {
return dq
}
-// Limit adds a limit step to the query.
+// Limit the number of records to be returned by this query.
func (dq *DocumentQuery) Limit(limit int) *DocumentQuery {
- dq.limit = &limit
+ dq.ctx.Limit = &limit
return dq
}
-// Offset adds an offset step to the query.
+// Offset to start from.
func (dq *DocumentQuery) Offset(offset int) *DocumentQuery {
- dq.offset = &offset
+ dq.ctx.Offset = &offset
return dq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (dq *DocumentQuery) Unique(unique bool) *DocumentQuery {
- dq.unique = &unique
+ dq.ctx.Unique = &unique
return dq
}
-// Order adds an order step to the query.
-func (dq *DocumentQuery) Order(o ...OrderFunc) *DocumentQuery {
+// Order specifies how the records should be ordered.
+func (dq *DocumentQuery) Order(o ...document.OrderOption) *DocumentQuery {
dq.order = append(dq.order, o...)
return dq
}
// QueryGroup chains the current query on the "group" edge.
func (dq *DocumentQuery) QueryGroup() *GroupQuery {
- query := &GroupQuery{config: dq.config}
+ query := (&GroupClient{config: dq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := dq.prepareQuery(ctx); err != nil {
return nil, err
@@ -90,31 +86,9 @@ func (dq *DocumentQuery) QueryGroup() *GroupQuery {
return query
}
-// QueryDocumentTokens chains the current query on the "document_tokens" edge.
-func (dq *DocumentQuery) QueryDocumentTokens() *DocumentTokenQuery {
- query := &DocumentTokenQuery{config: dq.config}
- query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
- if err := dq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- selector := dq.sqlQuery(ctx)
- if err := selector.Err(); err != nil {
- return nil, err
- }
- step := sqlgraph.NewStep(
- sqlgraph.From(document.Table, document.FieldID, selector),
- sqlgraph.To(documenttoken.Table, documenttoken.FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, document.DocumentTokensTable, document.DocumentTokensColumn),
- )
- fromU = sqlgraph.SetNeighbors(dq.driver.Dialect(), step)
- return fromU, nil
- }
- return query
-}
-
// QueryAttachments chains the current query on the "attachments" edge.
func (dq *DocumentQuery) QueryAttachments() *AttachmentQuery {
- query := &AttachmentQuery{config: dq.config}
+ query := (&AttachmentClient{config: dq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := dq.prepareQuery(ctx); err != nil {
return nil, err
@@ -137,7 +111,7 @@ func (dq *DocumentQuery) QueryAttachments() *AttachmentQuery {
// First returns the first Document entity from the query.
// Returns a *NotFoundError when no Document was found.
func (dq *DocumentQuery) First(ctx context.Context) (*Document, error) {
- nodes, err := dq.Limit(1).All(ctx)
+ nodes, err := dq.Limit(1).All(setContextOp(ctx, dq.ctx, "First"))
if err != nil {
return nil, err
}
@@ -160,7 +134,7 @@ func (dq *DocumentQuery) FirstX(ctx context.Context) *Document {
// Returns a *NotFoundError when no Document ID was found.
func (dq *DocumentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = dq.Limit(1).IDs(ctx); err != nil {
+ if ids, err = dq.Limit(1).IDs(setContextOp(ctx, dq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@@ -183,7 +157,7 @@ func (dq *DocumentQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one Document entity is found.
// Returns a *NotFoundError when no Document entities are found.
func (dq *DocumentQuery) Only(ctx context.Context) (*Document, error) {
- nodes, err := dq.Limit(2).All(ctx)
+ nodes, err := dq.Limit(2).All(setContextOp(ctx, dq.ctx, "Only"))
if err != nil {
return nil, err
}
@@ -211,7 +185,7 @@ func (dq *DocumentQuery) OnlyX(ctx context.Context) *Document {
// Returns a *NotFoundError when no entities are found.
func (dq *DocumentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = dq.Limit(2).IDs(ctx); err != nil {
+ if ids, err = dq.Limit(2).IDs(setContextOp(ctx, dq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@@ -236,10 +210,12 @@ func (dq *DocumentQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Documents.
func (dq *DocumentQuery) All(ctx context.Context) ([]*Document, error) {
+ ctx = setContextOp(ctx, dq.ctx, "All")
if err := dq.prepareQuery(ctx); err != nil {
return nil, err
}
- return dq.sqlAll(ctx)
+ qr := querierAll[[]*Document, *DocumentQuery]()
+ return withInterceptors[[]*Document](ctx, dq, qr, dq.inters)
}
// AllX is like All, but panics if an error occurs.
@@ -252,9 +228,12 @@ func (dq *DocumentQuery) AllX(ctx context.Context) []*Document {
}
// IDs executes the query and returns a list of Document IDs.
-func (dq *DocumentQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
- var ids []uuid.UUID
- if err := dq.Select(document.FieldID).Scan(ctx, &ids); err != nil {
+func (dq *DocumentQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
+ if dq.ctx.Unique == nil && dq.path != nil {
+ dq.Unique(true)
+ }
+ ctx = setContextOp(ctx, dq.ctx, "IDs")
+ if err = dq.Select(document.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@@ -271,10 +250,11 @@ func (dq *DocumentQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query.
func (dq *DocumentQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, dq.ctx, "Count")
if err := dq.prepareQuery(ctx); err != nil {
return 0, err
}
- return dq.sqlCount(ctx)
+ return withInterceptors[int](ctx, dq, querierCount[*DocumentQuery](), dq.inters)
}
// CountX is like Count, but panics if an error occurs.
@@ -288,10 +268,15 @@ func (dq *DocumentQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (dq *DocumentQuery) Exist(ctx context.Context) (bool, error) {
- if err := dq.prepareQuery(ctx); err != nil {
- return false, err
+ ctx = setContextOp(ctx, dq.ctx, "Exist")
+ switch _, err := dq.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
}
- return dq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@@ -310,25 +295,23 @@ func (dq *DocumentQuery) Clone() *DocumentQuery {
return nil
}
return &DocumentQuery{
- config: dq.config,
- limit: dq.limit,
- offset: dq.offset,
- order: append([]OrderFunc{}, dq.order...),
- predicates: append([]predicate.Document{}, dq.predicates...),
- withGroup: dq.withGroup.Clone(),
- withDocumentTokens: dq.withDocumentTokens.Clone(),
- withAttachments: dq.withAttachments.Clone(),
+ config: dq.config,
+ ctx: dq.ctx.Clone(),
+ order: append([]document.OrderOption{}, dq.order...),
+ inters: append([]Interceptor{}, dq.inters...),
+ predicates: append([]predicate.Document{}, dq.predicates...),
+ withGroup: dq.withGroup.Clone(),
+ withAttachments: dq.withAttachments.Clone(),
// clone intermediate query.
- sql: dq.sql.Clone(),
- path: dq.path,
- unique: dq.unique,
+ sql: dq.sql.Clone(),
+ path: dq.path,
}
}
// WithGroup tells the query-builder to eager-load the nodes that are connected to
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
func (dq *DocumentQuery) WithGroup(opts ...func(*GroupQuery)) *DocumentQuery {
- query := &GroupQuery{config: dq.config}
+ query := (&GroupClient{config: dq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -336,21 +319,10 @@ func (dq *DocumentQuery) WithGroup(opts ...func(*GroupQuery)) *DocumentQuery {
return dq
}
-// WithDocumentTokens tells the query-builder to eager-load the nodes that are connected to
-// the "document_tokens" edge. The optional arguments are used to configure the query builder of the edge.
-func (dq *DocumentQuery) WithDocumentTokens(opts ...func(*DocumentTokenQuery)) *DocumentQuery {
- query := &DocumentTokenQuery{config: dq.config}
- for _, opt := range opts {
- opt(query)
- }
- dq.withDocumentTokens = query
- return dq
-}
-
// WithAttachments tells the query-builder to eager-load the nodes that are connected to
// the "attachments" edge. The optional arguments are used to configure the query builder of the edge.
func (dq *DocumentQuery) WithAttachments(opts ...func(*AttachmentQuery)) *DocumentQuery {
- query := &AttachmentQuery{config: dq.config}
+ query := (&AttachmentClient{config: dq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -373,16 +345,11 @@ func (dq *DocumentQuery) WithAttachments(opts ...func(*AttachmentQuery)) *Docume
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (dq *DocumentQuery) GroupBy(field string, fields ...string) *DocumentGroupBy {
- grbuild := &DocumentGroupBy{config: dq.config}
- grbuild.fields = append([]string{field}, fields...)
- grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
- if err := dq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- return dq.sqlQuery(ctx), nil
- }
+ dq.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &DocumentGroupBy{build: dq}
+ grbuild.flds = &dq.ctx.Fields
grbuild.label = document.Label
- grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
+ grbuild.scan = grbuild.Scan
return grbuild
}
@@ -399,15 +366,30 @@ func (dq *DocumentQuery) GroupBy(field string, fields ...string) *DocumentGroupB
// Select(document.FieldCreatedAt).
// Scan(ctx, &v)
func (dq *DocumentQuery) Select(fields ...string) *DocumentSelect {
- dq.fields = append(dq.fields, fields...)
- selbuild := &DocumentSelect{DocumentQuery: dq}
- selbuild.label = document.Label
- selbuild.flds, selbuild.scan = &dq.fields, selbuild.Scan
- return selbuild
+ dq.ctx.Fields = append(dq.ctx.Fields, fields...)
+ sbuild := &DocumentSelect{DocumentQuery: dq}
+ sbuild.label = document.Label
+ sbuild.flds, sbuild.scan = &dq.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a DocumentSelect configured with the given aggregations.
+func (dq *DocumentQuery) Aggregate(fns ...AggregateFunc) *DocumentSelect {
+ return dq.Select().Aggregate(fns...)
}
func (dq *DocumentQuery) prepareQuery(ctx context.Context) error {
- for _, f := range dq.fields {
+ for _, inter := range dq.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, dq); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range dq.ctx.Fields {
if !document.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@@ -427,9 +409,8 @@ func (dq *DocumentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Doc
nodes = []*Document{}
withFKs = dq.withFKs
_spec = dq.querySpec()
- loadedTypes = [3]bool{
+ loadedTypes = [2]bool{
dq.withGroup != nil,
- dq.withDocumentTokens != nil,
dq.withAttachments != nil,
}
)
@@ -463,13 +444,6 @@ func (dq *DocumentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Doc
return nil, err
}
}
- if query := dq.withDocumentTokens; query != nil {
- if err := dq.loadDocumentTokens(ctx, query, nodes,
- func(n *Document) { n.Edges.DocumentTokens = []*DocumentToken{} },
- func(n *Document, e *DocumentToken) { n.Edges.DocumentTokens = append(n.Edges.DocumentTokens, e) }); err != nil {
- return nil, err
- }
- }
if query := dq.withAttachments; query != nil {
if err := dq.loadAttachments(ctx, query, nodes,
func(n *Document) { n.Edges.Attachments = []*Attachment{} },
@@ -493,6 +467,9 @@ func (dq *DocumentQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
+ if len(ids) == 0 {
+ return nil
+ }
query.Where(group.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@@ -509,37 +486,6 @@ func (dq *DocumentQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes
}
return nil
}
-func (dq *DocumentQuery) loadDocumentTokens(ctx context.Context, query *DocumentTokenQuery, nodes []*Document, init func(*Document), assign func(*Document, *DocumentToken)) error {
- fks := make([]driver.Value, 0, len(nodes))
- nodeids := make(map[uuid.UUID]*Document)
- for i := range nodes {
- fks = append(fks, nodes[i].ID)
- nodeids[nodes[i].ID] = nodes[i]
- if init != nil {
- init(nodes[i])
- }
- }
- query.withFKs = true
- query.Where(predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.InValues(document.DocumentTokensColumn, fks...))
- }))
- neighbors, err := query.All(ctx)
- if err != nil {
- return err
- }
- for _, n := range neighbors {
- fk := n.document_document_tokens
- if fk == nil {
- return fmt.Errorf(`foreign-key "document_document_tokens" is nil for node %v`, n.ID)
- }
- node, ok := nodeids[*fk]
- if !ok {
- return fmt.Errorf(`unexpected foreign-key "document_document_tokens" returned %v for node %v`, *fk, n.ID)
- }
- assign(node, n)
- }
- return nil
-}
func (dq *DocumentQuery) loadAttachments(ctx context.Context, query *AttachmentQuery, nodes []*Document, init func(*Document), assign func(*Document, *Attachment)) error {
fks := make([]driver.Value, 0, len(nodes))
nodeids := make(map[uuid.UUID]*Document)
@@ -552,7 +498,7 @@ func (dq *DocumentQuery) loadAttachments(ctx context.Context, query *AttachmentQ
}
query.withFKs = true
query.Where(predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.InValues(document.AttachmentsColumn, fks...))
+ s.Where(sql.InValues(s.C(document.AttachmentsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -565,7 +511,7 @@ func (dq *DocumentQuery) loadAttachments(ctx context.Context, query *AttachmentQ
}
node, ok := nodeids[*fk]
if !ok {
- return fmt.Errorf(`unexpected foreign-key "document_attachments" returned %v for node %v`, *fk, n.ID)
+ return fmt.Errorf(`unexpected referenced foreign-key "document_attachments" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -574,41 +520,22 @@ func (dq *DocumentQuery) loadAttachments(ctx context.Context, query *AttachmentQ
func (dq *DocumentQuery) sqlCount(ctx context.Context) (int, error) {
_spec := dq.querySpec()
- _spec.Node.Columns = dq.fields
- if len(dq.fields) > 0 {
- _spec.Unique = dq.unique != nil && *dq.unique
+ _spec.Node.Columns = dq.ctx.Fields
+ if len(dq.ctx.Fields) > 0 {
+ _spec.Unique = dq.ctx.Unique != nil && *dq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, dq.driver, _spec)
}
-func (dq *DocumentQuery) sqlExist(ctx context.Context) (bool, error) {
- switch _, err := dq.FirstID(ctx); {
- case IsNotFound(err):
- return false, nil
- case err != nil:
- return false, fmt.Errorf("ent: check existence: %w", err)
- default:
- return true, nil
- }
-}
-
func (dq *DocumentQuery) querySpec() *sqlgraph.QuerySpec {
- _spec := &sqlgraph.QuerySpec{
- Node: &sqlgraph.NodeSpec{
- Table: document.Table,
- Columns: document.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
- },
- From: dq.sql,
- Unique: true,
- }
- if unique := dq.unique; unique != nil {
+ _spec := sqlgraph.NewQuerySpec(document.Table, document.Columns, sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID))
+ _spec.From = dq.sql
+ if unique := dq.ctx.Unique; unique != nil {
_spec.Unique = *unique
+ } else if dq.path != nil {
+ _spec.Unique = true
}
- if fields := dq.fields; len(fields) > 0 {
+ if fields := dq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, document.FieldID)
for i := range fields {
@@ -624,10 +551,10 @@ func (dq *DocumentQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
- if limit := dq.limit; limit != nil {
+ if limit := dq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
- if offset := dq.offset; offset != nil {
+ if offset := dq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := dq.order; len(ps) > 0 {
@@ -643,7 +570,7 @@ func (dq *DocumentQuery) querySpec() *sqlgraph.QuerySpec {
func (dq *DocumentQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(dq.driver.Dialect())
t1 := builder.Table(document.Table)
- columns := dq.fields
+ columns := dq.ctx.Fields
if len(columns) == 0 {
columns = document.Columns
}
@@ -652,7 +579,7 @@ func (dq *DocumentQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = dq.sql
selector.Select(selector.Columns(columns...)...)
}
- if dq.unique != nil && *dq.unique {
+ if dq.ctx.Unique != nil && *dq.ctx.Unique {
selector.Distinct()
}
for _, p := range dq.predicates {
@@ -661,12 +588,12 @@ func (dq *DocumentQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range dq.order {
p(selector)
}
- if offset := dq.offset; offset != nil {
+ if offset := dq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
- if limit := dq.limit; limit != nil {
+ if limit := dq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@@ -674,13 +601,8 @@ func (dq *DocumentQuery) sqlQuery(ctx context.Context) *sql.Selector {
// DocumentGroupBy is the group-by builder for Document entities.
type DocumentGroupBy struct {
- config
selector
- fields []string
- fns []AggregateFunc
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
- path func(context.Context) (*sql.Selector, error)
+ build *DocumentQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@@ -689,74 +611,77 @@ func (dgb *DocumentGroupBy) Aggregate(fns ...AggregateFunc) *DocumentGroupBy {
return dgb
}
-// Scan applies the group-by query and scans the result into the given value.
+// Scan applies the selector query and scans the result into the given value.
func (dgb *DocumentGroupBy) Scan(ctx context.Context, v any) error {
- query, err := dgb.path(ctx)
- if err != nil {
+ ctx = setContextOp(ctx, dgb.build.ctx, "GroupBy")
+ if err := dgb.build.prepareQuery(ctx); err != nil {
return err
}
- dgb.sql = query
- return dgb.sqlScan(ctx, v)
+ return scanWithInterceptors[*DocumentQuery, *DocumentGroupBy](ctx, dgb.build, dgb, dgb.build.inters, v)
}
-func (dgb *DocumentGroupBy) sqlScan(ctx context.Context, v any) error {
- for _, f := range dgb.fields {
- if !document.ValidColumn(f) {
- return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
- }
- }
- selector := dgb.sqlQuery()
- if err := selector.Err(); err != nil {
- return err
- }
- rows := &sql.Rows{}
- query, args := selector.Query()
- if err := dgb.driver.Query(ctx, query, args, rows); err != nil {
- return err
- }
- defer rows.Close()
- return sql.ScanSlice(rows, v)
-}
-
-func (dgb *DocumentGroupBy) sqlQuery() *sql.Selector {
- selector := dgb.sql.Select()
+func (dgb *DocumentGroupBy) sqlScan(ctx context.Context, root *DocumentQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(dgb.fns))
for _, fn := range dgb.fns {
aggregation = append(aggregation, fn(selector))
}
- // If no columns were selected in a custom aggregation function, the default
- // selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
- columns := make([]string, 0, len(dgb.fields)+len(dgb.fns))
- for _, f := range dgb.fields {
+ columns := make([]string, 0, len(*dgb.flds)+len(dgb.fns))
+ for _, f := range *dgb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
- return selector.GroupBy(selector.Columns(dgb.fields...)...)
+ selector.GroupBy(selector.Columns(*dgb.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := dgb.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
}
// DocumentSelect is the builder for selecting fields of Document entities.
type DocumentSelect struct {
*DocumentQuery
selector
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (ds *DocumentSelect) Aggregate(fns ...AggregateFunc) *DocumentSelect {
+ ds.fns = append(ds.fns, fns...)
+ return ds
}
// Scan applies the selector query and scans the result into the given value.
func (ds *DocumentSelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, ds.ctx, "Select")
if err := ds.prepareQuery(ctx); err != nil {
return err
}
- ds.sql = ds.DocumentQuery.sqlQuery(ctx)
- return ds.sqlScan(ctx, v)
+ return scanWithInterceptors[*DocumentQuery, *DocumentSelect](ctx, ds.DocumentQuery, ds, ds.inters, v)
}
-func (ds *DocumentSelect) sqlScan(ctx context.Context, v any) error {
+func (ds *DocumentSelect) sqlScan(ctx context.Context, root *DocumentQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(ds.fns))
+ for _, fn := range ds.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*ds.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
rows := &sql.Rows{}
- query, args := ds.sql.Query()
+ query, args := selector.Query()
if err := ds.driver.Query(ctx, query, args, rows); err != nil {
return err
}
diff --git a/backend/internal/data/ent/document_update.go b/backend/internal/data/ent/document_update.go
index c6dd9fe..23e6d9c 100644
--- a/backend/internal/data/ent/document_update.go
+++ b/backend/internal/data/ent/document_update.go
@@ -14,7 +14,6 @@ import (
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
"github.com/hay-kot/homebox/backend/internal/data/ent/document"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
)
@@ -44,12 +43,28 @@ func (du *DocumentUpdate) SetTitle(s string) *DocumentUpdate {
return du
}
+// SetNillableTitle sets the "title" field if the given value is not nil.
+func (du *DocumentUpdate) SetNillableTitle(s *string) *DocumentUpdate {
+ if s != nil {
+ du.SetTitle(*s)
+ }
+ return du
+}
+
// SetPath sets the "path" field.
func (du *DocumentUpdate) SetPath(s string) *DocumentUpdate {
du.mutation.SetPath(s)
return du
}
+// SetNillablePath sets the "path" field if the given value is not nil.
+func (du *DocumentUpdate) SetNillablePath(s *string) *DocumentUpdate {
+ if s != nil {
+ du.SetPath(*s)
+ }
+ return du
+}
+
// SetGroupID sets the "group" edge to the Group entity by ID.
func (du *DocumentUpdate) SetGroupID(id uuid.UUID) *DocumentUpdate {
du.mutation.SetGroupID(id)
@@ -61,21 +76,6 @@ func (du *DocumentUpdate) SetGroup(g *Group) *DocumentUpdate {
return du.SetGroupID(g.ID)
}
-// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by IDs.
-func (du *DocumentUpdate) AddDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdate {
- du.mutation.AddDocumentTokenIDs(ids...)
- return du
-}
-
-// AddDocumentTokens adds the "document_tokens" edges to the DocumentToken entity.
-func (du *DocumentUpdate) AddDocumentTokens(d ...*DocumentToken) *DocumentUpdate {
- ids := make([]uuid.UUID, len(d))
- for i := range d {
- ids[i] = d[i].ID
- }
- return du.AddDocumentTokenIDs(ids...)
-}
-
// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs.
func (du *DocumentUpdate) AddAttachmentIDs(ids ...uuid.UUID) *DocumentUpdate {
du.mutation.AddAttachmentIDs(ids...)
@@ -102,27 +102,6 @@ func (du *DocumentUpdate) ClearGroup() *DocumentUpdate {
return du
}
-// ClearDocumentTokens clears all "document_tokens" edges to the DocumentToken entity.
-func (du *DocumentUpdate) ClearDocumentTokens() *DocumentUpdate {
- du.mutation.ClearDocumentTokens()
- return du
-}
-
-// RemoveDocumentTokenIDs removes the "document_tokens" edge to DocumentToken entities by IDs.
-func (du *DocumentUpdate) RemoveDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdate {
- du.mutation.RemoveDocumentTokenIDs(ids...)
- return du
-}
-
-// RemoveDocumentTokens removes "document_tokens" edges to DocumentToken entities.
-func (du *DocumentUpdate) RemoveDocumentTokens(d ...*DocumentToken) *DocumentUpdate {
- ids := make([]uuid.UUID, len(d))
- for i := range d {
- ids[i] = d[i].ID
- }
- return du.RemoveDocumentTokenIDs(ids...)
-}
-
// ClearAttachments clears all "attachments" edges to the Attachment entity.
func (du *DocumentUpdate) ClearAttachments() *DocumentUpdate {
du.mutation.ClearAttachments()
@@ -146,41 +125,8 @@ func (du *DocumentUpdate) RemoveAttachments(a ...*Attachment) *DocumentUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (du *DocumentUpdate) Save(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
du.defaults()
- if len(du.hooks) == 0 {
- if err = du.check(); err != nil {
- return 0, err
- }
- affected, err = du.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*DocumentMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = du.check(); err != nil {
- return 0, err
- }
- du.mutation = mutation
- affected, err = du.sqlSave(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(du.hooks) - 1; i >= 0; i-- {
- if du.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = du.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, du.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, du.sqlSave, du.mutation, du.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -232,16 +178,10 @@ func (du *DocumentUpdate) check() error {
}
func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: document.Table,
- Columns: document.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
- },
+ if err := du.check(); err != nil {
+ return n, err
}
+ _spec := sqlgraph.NewUpdateSpec(document.Table, document.Columns, sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID))
if ps := du.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -250,25 +190,13 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := du.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: document.FieldUpdatedAt,
- })
+ _spec.SetField(document.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := du.mutation.Title(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: document.FieldTitle,
- })
+ _spec.SetField(document.FieldTitle, field.TypeString, value)
}
if value, ok := du.mutation.Path(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: document.FieldPath,
- })
+ _spec.SetField(document.FieldPath, field.TypeString, value)
}
if du.mutation.GroupCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -278,10 +206,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -294,64 +219,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _spec.Edges.Add = append(_spec.Edges.Add, edge)
- }
- if du.mutation.DocumentTokensCleared() {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.O2M,
- Inverse: false,
- Table: document.DocumentTokensTable,
- Columns: []string{document.DocumentTokensColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: documenttoken.FieldID,
- },
- },
- }
- _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
- }
- if nodes := du.mutation.RemovedDocumentTokensIDs(); len(nodes) > 0 && !du.mutation.DocumentTokensCleared() {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.O2M,
- Inverse: false,
- Table: document.DocumentTokensTable,
- Columns: []string{document.DocumentTokensColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: documenttoken.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
- }
- if nodes := du.mutation.DocumentTokensIDs(); len(nodes) > 0 {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.O2M,
- Inverse: false,
- Table: document.DocumentTokensTable,
- Columns: []string{document.DocumentTokensColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: documenttoken.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -367,10 +235,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -383,10 +248,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -402,10 +264,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -421,6 +280,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
+ du.mutation.done = true
return n, nil
}
@@ -444,12 +304,28 @@ func (duo *DocumentUpdateOne) SetTitle(s string) *DocumentUpdateOne {
return duo
}
+// SetNillableTitle sets the "title" field if the given value is not nil.
+func (duo *DocumentUpdateOne) SetNillableTitle(s *string) *DocumentUpdateOne {
+ if s != nil {
+ duo.SetTitle(*s)
+ }
+ return duo
+}
+
// SetPath sets the "path" field.
func (duo *DocumentUpdateOne) SetPath(s string) *DocumentUpdateOne {
duo.mutation.SetPath(s)
return duo
}
+// SetNillablePath sets the "path" field if the given value is not nil.
+func (duo *DocumentUpdateOne) SetNillablePath(s *string) *DocumentUpdateOne {
+ if s != nil {
+ duo.SetPath(*s)
+ }
+ return duo
+}
+
// SetGroupID sets the "group" edge to the Group entity by ID.
func (duo *DocumentUpdateOne) SetGroupID(id uuid.UUID) *DocumentUpdateOne {
duo.mutation.SetGroupID(id)
@@ -461,21 +337,6 @@ func (duo *DocumentUpdateOne) SetGroup(g *Group) *DocumentUpdateOne {
return duo.SetGroupID(g.ID)
}
-// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by IDs.
-func (duo *DocumentUpdateOne) AddDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdateOne {
- duo.mutation.AddDocumentTokenIDs(ids...)
- return duo
-}
-
-// AddDocumentTokens adds the "document_tokens" edges to the DocumentToken entity.
-func (duo *DocumentUpdateOne) AddDocumentTokens(d ...*DocumentToken) *DocumentUpdateOne {
- ids := make([]uuid.UUID, len(d))
- for i := range d {
- ids[i] = d[i].ID
- }
- return duo.AddDocumentTokenIDs(ids...)
-}
-
// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs.
func (duo *DocumentUpdateOne) AddAttachmentIDs(ids ...uuid.UUID) *DocumentUpdateOne {
duo.mutation.AddAttachmentIDs(ids...)
@@ -502,27 +363,6 @@ func (duo *DocumentUpdateOne) ClearGroup() *DocumentUpdateOne {
return duo
}
-// ClearDocumentTokens clears all "document_tokens" edges to the DocumentToken entity.
-func (duo *DocumentUpdateOne) ClearDocumentTokens() *DocumentUpdateOne {
- duo.mutation.ClearDocumentTokens()
- return duo
-}
-
-// RemoveDocumentTokenIDs removes the "document_tokens" edge to DocumentToken entities by IDs.
-func (duo *DocumentUpdateOne) RemoveDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdateOne {
- duo.mutation.RemoveDocumentTokenIDs(ids...)
- return duo
-}
-
-// RemoveDocumentTokens removes "document_tokens" edges to DocumentToken entities.
-func (duo *DocumentUpdateOne) RemoveDocumentTokens(d ...*DocumentToken) *DocumentUpdateOne {
- ids := make([]uuid.UUID, len(d))
- for i := range d {
- ids[i] = d[i].ID
- }
- return duo.RemoveDocumentTokenIDs(ids...)
-}
-
// ClearAttachments clears all "attachments" edges to the Attachment entity.
func (duo *DocumentUpdateOne) ClearAttachments() *DocumentUpdateOne {
duo.mutation.ClearAttachments()
@@ -544,6 +384,12 @@ func (duo *DocumentUpdateOne) RemoveAttachments(a ...*Attachment) *DocumentUpdat
return duo.RemoveAttachmentIDs(ids...)
}
+// Where appends a list predicates to the DocumentUpdate builder.
+func (duo *DocumentUpdateOne) Where(ps ...predicate.Document) *DocumentUpdateOne {
+ duo.mutation.Where(ps...)
+ return duo
+}
+
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (duo *DocumentUpdateOne) Select(field string, fields ...string) *DocumentUpdateOne {
@@ -553,47 +399,8 @@ func (duo *DocumentUpdateOne) Select(field string, fields ...string) *DocumentUp
// Save executes the query and returns the updated Document entity.
func (duo *DocumentUpdateOne) Save(ctx context.Context) (*Document, error) {
- var (
- err error
- node *Document
- )
duo.defaults()
- if len(duo.hooks) == 0 {
- if err = duo.check(); err != nil {
- return nil, err
- }
- node, err = duo.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*DocumentMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = duo.check(); err != nil {
- return nil, err
- }
- duo.mutation = mutation
- node, err = duo.sqlSave(ctx)
- mutation.done = true
- return node, err
- })
- for i := len(duo.hooks) - 1; i >= 0; i-- {
- if duo.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = duo.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, duo.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*Document)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from DocumentMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, duo.sqlSave, duo.mutation, duo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -645,16 +452,10 @@ func (duo *DocumentUpdateOne) check() error {
}
func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: document.Table,
- Columns: document.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
- },
+ if err := duo.check(); err != nil {
+ return _node, err
}
+ _spec := sqlgraph.NewUpdateSpec(document.Table, document.Columns, sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID))
id, ok := duo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Document.id" for update`)}
@@ -680,25 +481,13 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
}
}
if value, ok := duo.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: document.FieldUpdatedAt,
- })
+ _spec.SetField(document.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := duo.mutation.Title(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: document.FieldTitle,
- })
+ _spec.SetField(document.FieldTitle, field.TypeString, value)
}
if value, ok := duo.mutation.Path(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: document.FieldPath,
- })
+ _spec.SetField(document.FieldPath, field.TypeString, value)
}
if duo.mutation.GroupCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -708,10 +497,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -724,64 +510,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _spec.Edges.Add = append(_spec.Edges.Add, edge)
- }
- if duo.mutation.DocumentTokensCleared() {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.O2M,
- Inverse: false,
- Table: document.DocumentTokensTable,
- Columns: []string{document.DocumentTokensColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: documenttoken.FieldID,
- },
- },
- }
- _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
- }
- if nodes := duo.mutation.RemovedDocumentTokensIDs(); len(nodes) > 0 && !duo.mutation.DocumentTokensCleared() {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.O2M,
- Inverse: false,
- Table: document.DocumentTokensTable,
- Columns: []string{document.DocumentTokensColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: documenttoken.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
- }
- if nodes := duo.mutation.DocumentTokensIDs(); len(nodes) > 0 {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.O2M,
- Inverse: false,
- Table: document.DocumentTokensTable,
- Columns: []string{document.DocumentTokensColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: documenttoken.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -797,10 +526,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -813,10 +539,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -832,10 +555,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -854,5 +574,6 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
}
return nil, err
}
+ duo.mutation.done = true
return _node, nil
}
diff --git a/backend/internal/data/ent/documenttoken.go b/backend/internal/data/ent/documenttoken.go
deleted file mode 100644
index c484a9e..0000000
--- a/backend/internal/data/ent/documenttoken.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
- "fmt"
- "strings"
- "time"
-
- "entgo.io/ent/dialect/sql"
- "github.com/google/uuid"
- "github.com/hay-kot/homebox/backend/internal/data/ent/document"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
-)
-
-// DocumentToken is the model entity for the DocumentToken schema.
-type DocumentToken struct {
- config `json:"-"`
- // ID of the ent.
- ID uuid.UUID `json:"id,omitempty"`
- // CreatedAt holds the value of the "created_at" field.
- CreatedAt time.Time `json:"created_at,omitempty"`
- // UpdatedAt holds the value of the "updated_at" field.
- UpdatedAt time.Time `json:"updated_at,omitempty"`
- // Token holds the value of the "token" field.
- Token []byte `json:"token,omitempty"`
- // Uses holds the value of the "uses" field.
- Uses int `json:"uses,omitempty"`
- // ExpiresAt holds the value of the "expires_at" field.
- ExpiresAt time.Time `json:"expires_at,omitempty"`
- // Edges holds the relations/edges for other nodes in the graph.
- // The values are being populated by the DocumentTokenQuery when eager-loading is set.
- Edges DocumentTokenEdges `json:"edges"`
- document_document_tokens *uuid.UUID
-}
-
-// DocumentTokenEdges holds the relations/edges for other nodes in the graph.
-type DocumentTokenEdges struct {
- // Document holds the value of the document edge.
- Document *Document `json:"document,omitempty"`
- // loadedTypes holds the information for reporting if a
- // type was loaded (or requested) in eager-loading or not.
- loadedTypes [1]bool
-}
-
-// DocumentOrErr returns the Document value or an error if the edge
-// was not loaded in eager-loading, or loaded but was not found.
-func (e DocumentTokenEdges) DocumentOrErr() (*Document, error) {
- if e.loadedTypes[0] {
- if e.Document == nil {
- // Edge was loaded but was not found.
- return nil, &NotFoundError{label: document.Label}
- }
- return e.Document, nil
- }
- return nil, &NotLoadedError{edge: "document"}
-}
-
-// scanValues returns the types for scanning values from sql.Rows.
-func (*DocumentToken) scanValues(columns []string) ([]any, error) {
- values := make([]any, len(columns))
- for i := range columns {
- switch columns[i] {
- case documenttoken.FieldToken:
- values[i] = new([]byte)
- case documenttoken.FieldUses:
- values[i] = new(sql.NullInt64)
- case documenttoken.FieldCreatedAt, documenttoken.FieldUpdatedAt, documenttoken.FieldExpiresAt:
- values[i] = new(sql.NullTime)
- case documenttoken.FieldID:
- values[i] = new(uuid.UUID)
- case documenttoken.ForeignKeys[0]: // document_document_tokens
- values[i] = &sql.NullScanner{S: new(uuid.UUID)}
- default:
- return nil, fmt.Errorf("unexpected column %q for type DocumentToken", columns[i])
- }
- }
- return values, nil
-}
-
-// assignValues assigns the values that were returned from sql.Rows (after scanning)
-// to the DocumentToken fields.
-func (dt *DocumentToken) assignValues(columns []string, values []any) error {
- if m, n := len(values), len(columns); m < n {
- return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
- }
- for i := range columns {
- switch columns[i] {
- case documenttoken.FieldID:
- if value, ok := values[i].(*uuid.UUID); !ok {
- return fmt.Errorf("unexpected type %T for field id", values[i])
- } else if value != nil {
- dt.ID = *value
- }
- case documenttoken.FieldCreatedAt:
- if value, ok := values[i].(*sql.NullTime); !ok {
- return fmt.Errorf("unexpected type %T for field created_at", values[i])
- } else if value.Valid {
- dt.CreatedAt = value.Time
- }
- case documenttoken.FieldUpdatedAt:
- if value, ok := values[i].(*sql.NullTime); !ok {
- return fmt.Errorf("unexpected type %T for field updated_at", values[i])
- } else if value.Valid {
- dt.UpdatedAt = value.Time
- }
- case documenttoken.FieldToken:
- if value, ok := values[i].(*[]byte); !ok {
- return fmt.Errorf("unexpected type %T for field token", values[i])
- } else if value != nil {
- dt.Token = *value
- }
- case documenttoken.FieldUses:
- if value, ok := values[i].(*sql.NullInt64); !ok {
- return fmt.Errorf("unexpected type %T for field uses", values[i])
- } else if value.Valid {
- dt.Uses = int(value.Int64)
- }
- case documenttoken.FieldExpiresAt:
- if value, ok := values[i].(*sql.NullTime); !ok {
- return fmt.Errorf("unexpected type %T for field expires_at", values[i])
- } else if value.Valid {
- dt.ExpiresAt = value.Time
- }
- case documenttoken.ForeignKeys[0]:
- if value, ok := values[i].(*sql.NullScanner); !ok {
- return fmt.Errorf("unexpected type %T for field document_document_tokens", values[i])
- } else if value.Valid {
- dt.document_document_tokens = new(uuid.UUID)
- *dt.document_document_tokens = *value.S.(*uuid.UUID)
- }
- }
- }
- return nil
-}
-
-// QueryDocument queries the "document" edge of the DocumentToken entity.
-func (dt *DocumentToken) QueryDocument() *DocumentQuery {
- return (&DocumentTokenClient{config: dt.config}).QueryDocument(dt)
-}
-
-// Update returns a builder for updating this DocumentToken.
-// Note that you need to call DocumentToken.Unwrap() before calling this method if this DocumentToken
-// was returned from a transaction, and the transaction was committed or rolled back.
-func (dt *DocumentToken) Update() *DocumentTokenUpdateOne {
- return (&DocumentTokenClient{config: dt.config}).UpdateOne(dt)
-}
-
-// Unwrap unwraps the DocumentToken entity that was returned from a transaction after it was closed,
-// so that all future queries will be executed through the driver which created the transaction.
-func (dt *DocumentToken) Unwrap() *DocumentToken {
- _tx, ok := dt.config.driver.(*txDriver)
- if !ok {
- panic("ent: DocumentToken is not a transactional entity")
- }
- dt.config.driver = _tx.drv
- return dt
-}
-
-// String implements the fmt.Stringer.
-func (dt *DocumentToken) String() string {
- var builder strings.Builder
- builder.WriteString("DocumentToken(")
- builder.WriteString(fmt.Sprintf("id=%v, ", dt.ID))
- builder.WriteString("created_at=")
- builder.WriteString(dt.CreatedAt.Format(time.ANSIC))
- builder.WriteString(", ")
- builder.WriteString("updated_at=")
- builder.WriteString(dt.UpdatedAt.Format(time.ANSIC))
- builder.WriteString(", ")
- builder.WriteString("token=")
- builder.WriteString(fmt.Sprintf("%v", dt.Token))
- builder.WriteString(", ")
- builder.WriteString("uses=")
- builder.WriteString(fmt.Sprintf("%v", dt.Uses))
- builder.WriteString(", ")
- builder.WriteString("expires_at=")
- builder.WriteString(dt.ExpiresAt.Format(time.ANSIC))
- builder.WriteByte(')')
- return builder.String()
-}
-
-// DocumentTokens is a parsable slice of DocumentToken.
-type DocumentTokens []*DocumentToken
-
-func (dt DocumentTokens) config(cfg config) {
- for _i := range dt {
- dt[_i].config = cfg
- }
-}
diff --git a/backend/internal/data/ent/documenttoken/documenttoken.go b/backend/internal/data/ent/documenttoken/documenttoken.go
deleted file mode 100644
index ce05656..0000000
--- a/backend/internal/data/ent/documenttoken/documenttoken.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package documenttoken
-
-import (
- "time"
-
- "github.com/google/uuid"
-)
-
-const (
- // Label holds the string label denoting the documenttoken type in the database.
- Label = "document_token"
- // FieldID holds the string denoting the id field in the database.
- FieldID = "id"
- // FieldCreatedAt holds the string denoting the created_at field in the database.
- FieldCreatedAt = "created_at"
- // FieldUpdatedAt holds the string denoting the updated_at field in the database.
- FieldUpdatedAt = "updated_at"
- // FieldToken holds the string denoting the token field in the database.
- FieldToken = "token"
- // FieldUses holds the string denoting the uses field in the database.
- FieldUses = "uses"
- // FieldExpiresAt holds the string denoting the expires_at field in the database.
- FieldExpiresAt = "expires_at"
- // EdgeDocument holds the string denoting the document edge name in mutations.
- EdgeDocument = "document"
- // Table holds the table name of the documenttoken in the database.
- Table = "document_tokens"
- // DocumentTable is the table that holds the document relation/edge.
- DocumentTable = "document_tokens"
- // DocumentInverseTable is the table name for the Document entity.
- // It exists in this package in order to avoid circular dependency with the "document" package.
- DocumentInverseTable = "documents"
- // DocumentColumn is the table column denoting the document relation/edge.
- DocumentColumn = "document_document_tokens"
-)
-
-// Columns holds all SQL columns for documenttoken fields.
-var Columns = []string{
- FieldID,
- FieldCreatedAt,
- FieldUpdatedAt,
- FieldToken,
- FieldUses,
- FieldExpiresAt,
-}
-
-// ForeignKeys holds the SQL foreign-keys that are owned by the "document_tokens"
-// table and are not defined as standalone fields in the schema.
-var ForeignKeys = []string{
- "document_document_tokens",
-}
-
-// ValidColumn reports if the column name is valid (part of the table columns).
-func ValidColumn(column string) bool {
- for i := range Columns {
- if column == Columns[i] {
- return true
- }
- }
- for i := range ForeignKeys {
- if column == ForeignKeys[i] {
- return true
- }
- }
- return false
-}
-
-var (
- // DefaultCreatedAt holds the default value on creation for the "created_at" field.
- DefaultCreatedAt func() time.Time
- // DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
- DefaultUpdatedAt func() time.Time
- // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
- UpdateDefaultUpdatedAt func() time.Time
- // TokenValidator is a validator for the "token" field. It is called by the builders before save.
- TokenValidator func([]byte) error
- // DefaultUses holds the default value on creation for the "uses" field.
- DefaultUses int
- // DefaultExpiresAt holds the default value on creation for the "expires_at" field.
- DefaultExpiresAt func() time.Time
- // DefaultID holds the default value on creation for the "id" field.
- DefaultID func() uuid.UUID
-)
diff --git a/backend/internal/data/ent/documenttoken/where.go b/backend/internal/data/ent/documenttoken/where.go
deleted file mode 100644
index 32dbb39..0000000
--- a/backend/internal/data/ent/documenttoken/where.go
+++ /dev/null
@@ -1,498 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package documenttoken
-
-import (
- "time"
-
- "entgo.io/ent/dialect/sql"
- "entgo.io/ent/dialect/sql/sqlgraph"
- "github.com/google/uuid"
- "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
-)
-
-// ID filters vertices based on their ID field.
-func ID(id uuid.UUID) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
-}
-
-// IDEQ applies the EQ predicate on the ID field.
-func IDEQ(id uuid.UUID) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
-}
-
-// IDNEQ applies the NEQ predicate on the ID field.
-func IDNEQ(id uuid.UUID) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldID), id))
- })
-}
-
-// IDIn applies the In predicate on the ID field.
-func IDIn(ids ...uuid.UUID) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.In(s.C(FieldID), v...))
- })
-}
-
-// IDNotIn applies the NotIn predicate on the ID field.
-func IDNotIn(ids ...uuid.UUID) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.NotIn(s.C(FieldID), v...))
- })
-}
-
-// IDGT applies the GT predicate on the ID field.
-func IDGT(id uuid.UUID) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldID), id))
- })
-}
-
-// IDGTE applies the GTE predicate on the ID field.
-func IDGTE(id uuid.UUID) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldID), id))
- })
-}
-
-// IDLT applies the LT predicate on the ID field.
-func IDLT(id uuid.UUID) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldID), id))
- })
-}
-
-// IDLTE applies the LTE predicate on the ID field.
-func IDLTE(id uuid.UUID) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldID), id))
- })
-}
-
-// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
-func CreatedAt(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
-}
-
-// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
-func UpdatedAt(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
-}
-
-// Token applies equality check predicate on the "token" field. It's identical to TokenEQ.
-func Token(v []byte) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldToken), v))
- })
-}
-
-// Uses applies equality check predicate on the "uses" field. It's identical to UsesEQ.
-func Uses(v int) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUses), v))
- })
-}
-
-// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ.
-func ExpiresAt(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldExpiresAt), v))
- })
-}
-
-// CreatedAtEQ applies the EQ predicate on the "created_at" field.
-func CreatedAtEQ(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
-}
-
-// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
-func CreatedAtNEQ(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
- })
-}
-
-// CreatedAtIn applies the In predicate on the "created_at" field.
-func CreatedAtIn(vs ...time.Time) predicate.DocumentToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldCreatedAt), v...))
- })
-}
-
-// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
-func CreatedAtNotIn(vs ...time.Time) predicate.DocumentToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
- })
-}
-
-// CreatedAtGT applies the GT predicate on the "created_at" field.
-func CreatedAtGT(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldCreatedAt), v))
- })
-}
-
-// CreatedAtGTE applies the GTE predicate on the "created_at" field.
-func CreatedAtGTE(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldCreatedAt), v))
- })
-}
-
-// CreatedAtLT applies the LT predicate on the "created_at" field.
-func CreatedAtLT(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldCreatedAt), v))
- })
-}
-
-// CreatedAtLTE applies the LTE predicate on the "created_at" field.
-func CreatedAtLTE(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldCreatedAt), v))
- })
-}
-
-// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
-func UpdatedAtEQ(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
-}
-
-// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
-func UpdatedAtNEQ(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
- })
-}
-
-// UpdatedAtIn applies the In predicate on the "updated_at" field.
-func UpdatedAtIn(vs ...time.Time) predicate.DocumentToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldUpdatedAt), v...))
- })
-}
-
-// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
-func UpdatedAtNotIn(vs ...time.Time) predicate.DocumentToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
- })
-}
-
-// UpdatedAtGT applies the GT predicate on the "updated_at" field.
-func UpdatedAtGT(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldUpdatedAt), v))
- })
-}
-
-// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
-func UpdatedAtGTE(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
- })
-}
-
-// UpdatedAtLT applies the LT predicate on the "updated_at" field.
-func UpdatedAtLT(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldUpdatedAt), v))
- })
-}
-
-// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
-func UpdatedAtLTE(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
- })
-}
-
-// TokenEQ applies the EQ predicate on the "token" field.
-func TokenEQ(v []byte) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldToken), v))
- })
-}
-
-// TokenNEQ applies the NEQ predicate on the "token" field.
-func TokenNEQ(v []byte) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldToken), v))
- })
-}
-
-// TokenIn applies the In predicate on the "token" field.
-func TokenIn(vs ...[]byte) predicate.DocumentToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldToken), v...))
- })
-}
-
-// TokenNotIn applies the NotIn predicate on the "token" field.
-func TokenNotIn(vs ...[]byte) predicate.DocumentToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldToken), v...))
- })
-}
-
-// TokenGT applies the GT predicate on the "token" field.
-func TokenGT(v []byte) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldToken), v))
- })
-}
-
-// TokenGTE applies the GTE predicate on the "token" field.
-func TokenGTE(v []byte) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldToken), v))
- })
-}
-
-// TokenLT applies the LT predicate on the "token" field.
-func TokenLT(v []byte) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldToken), v))
- })
-}
-
-// TokenLTE applies the LTE predicate on the "token" field.
-func TokenLTE(v []byte) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldToken), v))
- })
-}
-
-// UsesEQ applies the EQ predicate on the "uses" field.
-func UsesEQ(v int) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUses), v))
- })
-}
-
-// UsesNEQ applies the NEQ predicate on the "uses" field.
-func UsesNEQ(v int) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldUses), v))
- })
-}
-
-// UsesIn applies the In predicate on the "uses" field.
-func UsesIn(vs ...int) predicate.DocumentToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldUses), v...))
- })
-}
-
-// UsesNotIn applies the NotIn predicate on the "uses" field.
-func UsesNotIn(vs ...int) predicate.DocumentToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldUses), v...))
- })
-}
-
-// UsesGT applies the GT predicate on the "uses" field.
-func UsesGT(v int) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldUses), v))
- })
-}
-
-// UsesGTE applies the GTE predicate on the "uses" field.
-func UsesGTE(v int) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldUses), v))
- })
-}
-
-// UsesLT applies the LT predicate on the "uses" field.
-func UsesLT(v int) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldUses), v))
- })
-}
-
-// UsesLTE applies the LTE predicate on the "uses" field.
-func UsesLTE(v int) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldUses), v))
- })
-}
-
-// ExpiresAtEQ applies the EQ predicate on the "expires_at" field.
-func ExpiresAtEQ(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldExpiresAt), v))
- })
-}
-
-// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field.
-func ExpiresAtNEQ(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldExpiresAt), v))
- })
-}
-
-// ExpiresAtIn applies the In predicate on the "expires_at" field.
-func ExpiresAtIn(vs ...time.Time) predicate.DocumentToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldExpiresAt), v...))
- })
-}
-
-// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field.
-func ExpiresAtNotIn(vs ...time.Time) predicate.DocumentToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldExpiresAt), v...))
- })
-}
-
-// ExpiresAtGT applies the GT predicate on the "expires_at" field.
-func ExpiresAtGT(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldExpiresAt), v))
- })
-}
-
-// ExpiresAtGTE applies the GTE predicate on the "expires_at" field.
-func ExpiresAtGTE(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldExpiresAt), v))
- })
-}
-
-// ExpiresAtLT applies the LT predicate on the "expires_at" field.
-func ExpiresAtLT(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldExpiresAt), v))
- })
-}
-
-// ExpiresAtLTE applies the LTE predicate on the "expires_at" field.
-func ExpiresAtLTE(v time.Time) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldExpiresAt), v))
- })
-}
-
-// HasDocument applies the HasEdge predicate on the "document" edge.
-func HasDocument() predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(DocumentTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn),
- )
- sqlgraph.HasNeighbors(s, step)
- })
-}
-
-// HasDocumentWith applies the HasEdge predicate on the "document" edge with a given conditions (other predicates).
-func HasDocumentWith(preds ...predicate.Document) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(DocumentInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn),
- )
- sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
- for _, p := range preds {
- p(s)
- }
- })
- })
-}
-
-// And groups predicates with the AND operator between them.
-func And(predicates ...predicate.DocumentToken) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for _, p := range predicates {
- p(s1)
- }
- s.Where(s1.P())
- })
-}
-
-// Or groups predicates with the OR operator between them.
-func Or(predicates ...predicate.DocumentToken) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for i, p := range predicates {
- if i > 0 {
- s1.Or()
- }
- p(s1)
- }
- s.Where(s1.P())
- })
-}
-
-// Not applies the not operator on the given predicate.
-func Not(p predicate.DocumentToken) predicate.DocumentToken {
- return predicate.DocumentToken(func(s *sql.Selector) {
- p(s.Not())
- })
-}
diff --git a/backend/internal/data/ent/documenttoken_create.go b/backend/internal/data/ent/documenttoken_create.go
deleted file mode 100644
index 2b29079..0000000
--- a/backend/internal/data/ent/documenttoken_create.go
+++ /dev/null
@@ -1,418 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
- "context"
- "errors"
- "fmt"
- "time"
-
- "entgo.io/ent/dialect/sql/sqlgraph"
- "entgo.io/ent/schema/field"
- "github.com/google/uuid"
- "github.com/hay-kot/homebox/backend/internal/data/ent/document"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
-)
-
-// DocumentTokenCreate is the builder for creating a DocumentToken entity.
-type DocumentTokenCreate struct {
- config
- mutation *DocumentTokenMutation
- hooks []Hook
-}
-
-// SetCreatedAt sets the "created_at" field.
-func (dtc *DocumentTokenCreate) SetCreatedAt(t time.Time) *DocumentTokenCreate {
- dtc.mutation.SetCreatedAt(t)
- return dtc
-}
-
-// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
-func (dtc *DocumentTokenCreate) SetNillableCreatedAt(t *time.Time) *DocumentTokenCreate {
- if t != nil {
- dtc.SetCreatedAt(*t)
- }
- return dtc
-}
-
-// SetUpdatedAt sets the "updated_at" field.
-func (dtc *DocumentTokenCreate) SetUpdatedAt(t time.Time) *DocumentTokenCreate {
- dtc.mutation.SetUpdatedAt(t)
- return dtc
-}
-
-// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
-func (dtc *DocumentTokenCreate) SetNillableUpdatedAt(t *time.Time) *DocumentTokenCreate {
- if t != nil {
- dtc.SetUpdatedAt(*t)
- }
- return dtc
-}
-
-// SetToken sets the "token" field.
-func (dtc *DocumentTokenCreate) SetToken(b []byte) *DocumentTokenCreate {
- dtc.mutation.SetToken(b)
- return dtc
-}
-
-// SetUses sets the "uses" field.
-func (dtc *DocumentTokenCreate) SetUses(i int) *DocumentTokenCreate {
- dtc.mutation.SetUses(i)
- return dtc
-}
-
-// SetNillableUses sets the "uses" field if the given value is not nil.
-func (dtc *DocumentTokenCreate) SetNillableUses(i *int) *DocumentTokenCreate {
- if i != nil {
- dtc.SetUses(*i)
- }
- return dtc
-}
-
-// SetExpiresAt sets the "expires_at" field.
-func (dtc *DocumentTokenCreate) SetExpiresAt(t time.Time) *DocumentTokenCreate {
- dtc.mutation.SetExpiresAt(t)
- return dtc
-}
-
-// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
-func (dtc *DocumentTokenCreate) SetNillableExpiresAt(t *time.Time) *DocumentTokenCreate {
- if t != nil {
- dtc.SetExpiresAt(*t)
- }
- return dtc
-}
-
-// SetID sets the "id" field.
-func (dtc *DocumentTokenCreate) SetID(u uuid.UUID) *DocumentTokenCreate {
- dtc.mutation.SetID(u)
- return dtc
-}
-
-// SetNillableID sets the "id" field if the given value is not nil.
-func (dtc *DocumentTokenCreate) SetNillableID(u *uuid.UUID) *DocumentTokenCreate {
- if u != nil {
- dtc.SetID(*u)
- }
- return dtc
-}
-
-// SetDocumentID sets the "document" edge to the Document entity by ID.
-func (dtc *DocumentTokenCreate) SetDocumentID(id uuid.UUID) *DocumentTokenCreate {
- dtc.mutation.SetDocumentID(id)
- return dtc
-}
-
-// SetNillableDocumentID sets the "document" edge to the Document entity by ID if the given value is not nil.
-func (dtc *DocumentTokenCreate) SetNillableDocumentID(id *uuid.UUID) *DocumentTokenCreate {
- if id != nil {
- dtc = dtc.SetDocumentID(*id)
- }
- return dtc
-}
-
-// SetDocument sets the "document" edge to the Document entity.
-func (dtc *DocumentTokenCreate) SetDocument(d *Document) *DocumentTokenCreate {
- return dtc.SetDocumentID(d.ID)
-}
-
-// Mutation returns the DocumentTokenMutation object of the builder.
-func (dtc *DocumentTokenCreate) Mutation() *DocumentTokenMutation {
- return dtc.mutation
-}
-
-// Save creates the DocumentToken in the database.
-func (dtc *DocumentTokenCreate) Save(ctx context.Context) (*DocumentToken, error) {
- var (
- err error
- node *DocumentToken
- )
- dtc.defaults()
- if len(dtc.hooks) == 0 {
- if err = dtc.check(); err != nil {
- return nil, err
- }
- node, err = dtc.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*DocumentTokenMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = dtc.check(); err != nil {
- return nil, err
- }
- dtc.mutation = mutation
- if node, err = dtc.sqlSave(ctx); err != nil {
- return nil, err
- }
- mutation.id = &node.ID
- mutation.done = true
- return node, err
- })
- for i := len(dtc.hooks) - 1; i >= 0; i-- {
- if dtc.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = dtc.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, dtc.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*DocumentToken)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from DocumentTokenMutation", v)
- }
- node = nv
- }
- return node, err
-}
-
-// SaveX calls Save and panics if Save returns an error.
-func (dtc *DocumentTokenCreate) SaveX(ctx context.Context) *DocumentToken {
- v, err := dtc.Save(ctx)
- if err != nil {
- panic(err)
- }
- return v
-}
-
-// Exec executes the query.
-func (dtc *DocumentTokenCreate) Exec(ctx context.Context) error {
- _, err := dtc.Save(ctx)
- return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (dtc *DocumentTokenCreate) ExecX(ctx context.Context) {
- if err := dtc.Exec(ctx); err != nil {
- panic(err)
- }
-}
-
-// defaults sets the default values of the builder before save.
-func (dtc *DocumentTokenCreate) defaults() {
- if _, ok := dtc.mutation.CreatedAt(); !ok {
- v := documenttoken.DefaultCreatedAt()
- dtc.mutation.SetCreatedAt(v)
- }
- if _, ok := dtc.mutation.UpdatedAt(); !ok {
- v := documenttoken.DefaultUpdatedAt()
- dtc.mutation.SetUpdatedAt(v)
- }
- if _, ok := dtc.mutation.Uses(); !ok {
- v := documenttoken.DefaultUses
- dtc.mutation.SetUses(v)
- }
- if _, ok := dtc.mutation.ExpiresAt(); !ok {
- v := documenttoken.DefaultExpiresAt()
- dtc.mutation.SetExpiresAt(v)
- }
- if _, ok := dtc.mutation.ID(); !ok {
- v := documenttoken.DefaultID()
- dtc.mutation.SetID(v)
- }
-}
-
-// check runs all checks and user-defined validators on the builder.
-func (dtc *DocumentTokenCreate) check() error {
- if _, ok := dtc.mutation.CreatedAt(); !ok {
- return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "DocumentToken.created_at"`)}
- }
- if _, ok := dtc.mutation.UpdatedAt(); !ok {
- return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "DocumentToken.updated_at"`)}
- }
- if _, ok := dtc.mutation.Token(); !ok {
- return &ValidationError{Name: "token", err: errors.New(`ent: missing required field "DocumentToken.token"`)}
- }
- if v, ok := dtc.mutation.Token(); ok {
- if err := documenttoken.TokenValidator(v); err != nil {
- return &ValidationError{Name: "token", err: fmt.Errorf(`ent: validator failed for field "DocumentToken.token": %w`, err)}
- }
- }
- if _, ok := dtc.mutation.Uses(); !ok {
- return &ValidationError{Name: "uses", err: errors.New(`ent: missing required field "DocumentToken.uses"`)}
- }
- if _, ok := dtc.mutation.ExpiresAt(); !ok {
- return &ValidationError{Name: "expires_at", err: errors.New(`ent: missing required field "DocumentToken.expires_at"`)}
- }
- return nil
-}
-
-func (dtc *DocumentTokenCreate) sqlSave(ctx context.Context) (*DocumentToken, error) {
- _node, _spec := dtc.createSpec()
- if err := sqlgraph.CreateNode(ctx, dtc.driver, _spec); err != nil {
- if sqlgraph.IsConstraintError(err) {
- err = &ConstraintError{msg: err.Error(), wrap: err}
- }
- return nil, err
- }
- if _spec.ID.Value != nil {
- if id, ok := _spec.ID.Value.(*uuid.UUID); ok {
- _node.ID = *id
- } else if err := _node.ID.Scan(_spec.ID.Value); err != nil {
- return nil, err
- }
- }
- return _node, nil
-}
-
-func (dtc *DocumentTokenCreate) createSpec() (*DocumentToken, *sqlgraph.CreateSpec) {
- var (
- _node = &DocumentToken{config: dtc.config}
- _spec = &sqlgraph.CreateSpec{
- Table: documenttoken.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: documenttoken.FieldID,
- },
- }
- )
- if id, ok := dtc.mutation.ID(); ok {
- _node.ID = id
- _spec.ID.Value = &id
- }
- if value, ok := dtc.mutation.CreatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: documenttoken.FieldCreatedAt,
- })
- _node.CreatedAt = value
- }
- if value, ok := dtc.mutation.UpdatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: documenttoken.FieldUpdatedAt,
- })
- _node.UpdatedAt = value
- }
- if value, ok := dtc.mutation.Token(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeBytes,
- Value: value,
- Column: documenttoken.FieldToken,
- })
- _node.Token = value
- }
- if value, ok := dtc.mutation.Uses(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: documenttoken.FieldUses,
- })
- _node.Uses = value
- }
- if value, ok := dtc.mutation.ExpiresAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: documenttoken.FieldExpiresAt,
- })
- _node.ExpiresAt = value
- }
- if nodes := dtc.mutation.DocumentIDs(); len(nodes) > 0 {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: documenttoken.DocumentTable,
- Columns: []string{documenttoken.DocumentColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _node.document_document_tokens = &nodes[0]
- _spec.Edges = append(_spec.Edges, edge)
- }
- return _node, _spec
-}
-
-// DocumentTokenCreateBulk is the builder for creating many DocumentToken entities in bulk.
-type DocumentTokenCreateBulk struct {
- config
- builders []*DocumentTokenCreate
-}
-
-// Save creates the DocumentToken entities in the database.
-func (dtcb *DocumentTokenCreateBulk) Save(ctx context.Context) ([]*DocumentToken, error) {
- specs := make([]*sqlgraph.CreateSpec, len(dtcb.builders))
- nodes := make([]*DocumentToken, len(dtcb.builders))
- mutators := make([]Mutator, len(dtcb.builders))
- for i := range dtcb.builders {
- func(i int, root context.Context) {
- builder := dtcb.builders[i]
- builder.defaults()
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*DocumentTokenMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err := builder.check(); err != nil {
- return nil, err
- }
- builder.mutation = mutation
- nodes[i], specs[i] = builder.createSpec()
- var err error
- if i < len(mutators)-1 {
- _, err = mutators[i+1].Mutate(root, dtcb.builders[i+1].mutation)
- } else {
- spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
- // Invoke the actual operation on the latest mutation in the chain.
- if err = sqlgraph.BatchCreate(ctx, dtcb.driver, spec); err != nil {
- if sqlgraph.IsConstraintError(err) {
- err = &ConstraintError{msg: err.Error(), wrap: err}
- }
- }
- }
- if err != nil {
- return nil, err
- }
- mutation.id = &nodes[i].ID
- mutation.done = true
- return nodes[i], nil
- })
- for i := len(builder.hooks) - 1; i >= 0; i-- {
- mut = builder.hooks[i](mut)
- }
- mutators[i] = mut
- }(i, ctx)
- }
- if len(mutators) > 0 {
- if _, err := mutators[0].Mutate(ctx, dtcb.builders[0].mutation); err != nil {
- return nil, err
- }
- }
- return nodes, nil
-}
-
-// SaveX is like Save, but panics if an error occurs.
-func (dtcb *DocumentTokenCreateBulk) SaveX(ctx context.Context) []*DocumentToken {
- v, err := dtcb.Save(ctx)
- if err != nil {
- panic(err)
- }
- return v
-}
-
-// Exec executes the query.
-func (dtcb *DocumentTokenCreateBulk) Exec(ctx context.Context) error {
- _, err := dtcb.Save(ctx)
- return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (dtcb *DocumentTokenCreateBulk) ExecX(ctx context.Context) {
- if err := dtcb.Exec(ctx); err != nil {
- panic(err)
- }
-}
diff --git a/backend/internal/data/ent/documenttoken_delete.go b/backend/internal/data/ent/documenttoken_delete.go
deleted file mode 100644
index 722ec1b..0000000
--- a/backend/internal/data/ent/documenttoken_delete.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
- "context"
- "fmt"
-
- "entgo.io/ent/dialect/sql"
- "entgo.io/ent/dialect/sql/sqlgraph"
- "entgo.io/ent/schema/field"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
- "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
-)
-
-// DocumentTokenDelete is the builder for deleting a DocumentToken entity.
-type DocumentTokenDelete struct {
- config
- hooks []Hook
- mutation *DocumentTokenMutation
-}
-
-// Where appends a list predicates to the DocumentTokenDelete builder.
-func (dtd *DocumentTokenDelete) Where(ps ...predicate.DocumentToken) *DocumentTokenDelete {
- dtd.mutation.Where(ps...)
- return dtd
-}
-
-// Exec executes the deletion query and returns how many vertices were deleted.
-func (dtd *DocumentTokenDelete) Exec(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
- if len(dtd.hooks) == 0 {
- affected, err = dtd.sqlExec(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*DocumentTokenMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- dtd.mutation = mutation
- affected, err = dtd.sqlExec(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(dtd.hooks) - 1; i >= 0; i-- {
- if dtd.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = dtd.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, dtd.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (dtd *DocumentTokenDelete) ExecX(ctx context.Context) int {
- n, err := dtd.Exec(ctx)
- if err != nil {
- panic(err)
- }
- return n
-}
-
-func (dtd *DocumentTokenDelete) sqlExec(ctx context.Context) (int, error) {
- _spec := &sqlgraph.DeleteSpec{
- Node: &sqlgraph.NodeSpec{
- Table: documenttoken.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: documenttoken.FieldID,
- },
- },
- }
- if ps := dtd.mutation.predicates; len(ps) > 0 {
- _spec.Predicate = func(selector *sql.Selector) {
- for i := range ps {
- ps[i](selector)
- }
- }
- }
- affected, err := sqlgraph.DeleteNodes(ctx, dtd.driver, _spec)
- if err != nil && sqlgraph.IsConstraintError(err) {
- err = &ConstraintError{msg: err.Error(), wrap: err}
- }
- return affected, err
-}
-
-// DocumentTokenDeleteOne is the builder for deleting a single DocumentToken entity.
-type DocumentTokenDeleteOne struct {
- dtd *DocumentTokenDelete
-}
-
-// Exec executes the deletion query.
-func (dtdo *DocumentTokenDeleteOne) Exec(ctx context.Context) error {
- n, err := dtdo.dtd.Exec(ctx)
- switch {
- case err != nil:
- return err
- case n == 0:
- return &NotFoundError{documenttoken.Label}
- default:
- return nil
- }
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (dtdo *DocumentTokenDeleteOne) ExecX(ctx context.Context) {
- dtdo.dtd.ExecX(ctx)
-}
diff --git a/backend/internal/data/ent/documenttoken_query.go b/backend/internal/data/ent/documenttoken_query.go
deleted file mode 100644
index 6c5386c..0000000
--- a/backend/internal/data/ent/documenttoken_query.go
+++ /dev/null
@@ -1,614 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
- "context"
- "fmt"
- "math"
-
- "entgo.io/ent/dialect/sql"
- "entgo.io/ent/dialect/sql/sqlgraph"
- "entgo.io/ent/schema/field"
- "github.com/google/uuid"
- "github.com/hay-kot/homebox/backend/internal/data/ent/document"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
- "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
-)
-
-// DocumentTokenQuery is the builder for querying DocumentToken entities.
-type DocumentTokenQuery struct {
- config
- limit *int
- offset *int
- unique *bool
- order []OrderFunc
- fields []string
- predicates []predicate.DocumentToken
- withDocument *DocumentQuery
- withFKs bool
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
- path func(context.Context) (*sql.Selector, error)
-}
-
-// Where adds a new predicate for the DocumentTokenQuery builder.
-func (dtq *DocumentTokenQuery) Where(ps ...predicate.DocumentToken) *DocumentTokenQuery {
- dtq.predicates = append(dtq.predicates, ps...)
- return dtq
-}
-
-// Limit adds a limit step to the query.
-func (dtq *DocumentTokenQuery) Limit(limit int) *DocumentTokenQuery {
- dtq.limit = &limit
- return dtq
-}
-
-// Offset adds an offset step to the query.
-func (dtq *DocumentTokenQuery) Offset(offset int) *DocumentTokenQuery {
- dtq.offset = &offset
- return dtq
-}
-
-// Unique configures the query builder to filter duplicate records on query.
-// By default, unique is set to true, and can be disabled using this method.
-func (dtq *DocumentTokenQuery) Unique(unique bool) *DocumentTokenQuery {
- dtq.unique = &unique
- return dtq
-}
-
-// Order adds an order step to the query.
-func (dtq *DocumentTokenQuery) Order(o ...OrderFunc) *DocumentTokenQuery {
- dtq.order = append(dtq.order, o...)
- return dtq
-}
-
-// QueryDocument chains the current query on the "document" edge.
-func (dtq *DocumentTokenQuery) QueryDocument() *DocumentQuery {
- query := &DocumentQuery{config: dtq.config}
- query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
- if err := dtq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- selector := dtq.sqlQuery(ctx)
- if err := selector.Err(); err != nil {
- return nil, err
- }
- step := sqlgraph.NewStep(
- sqlgraph.From(documenttoken.Table, documenttoken.FieldID, selector),
- sqlgraph.To(document.Table, document.FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, documenttoken.DocumentTable, documenttoken.DocumentColumn),
- )
- fromU = sqlgraph.SetNeighbors(dtq.driver.Dialect(), step)
- return fromU, nil
- }
- return query
-}
-
-// First returns the first DocumentToken entity from the query.
-// Returns a *NotFoundError when no DocumentToken was found.
-func (dtq *DocumentTokenQuery) First(ctx context.Context) (*DocumentToken, error) {
- nodes, err := dtq.Limit(1).All(ctx)
- if err != nil {
- return nil, err
- }
- if len(nodes) == 0 {
- return nil, &NotFoundError{documenttoken.Label}
- }
- return nodes[0], nil
-}
-
-// FirstX is like First, but panics if an error occurs.
-func (dtq *DocumentTokenQuery) FirstX(ctx context.Context) *DocumentToken {
- node, err := dtq.First(ctx)
- if err != nil && !IsNotFound(err) {
- panic(err)
- }
- return node
-}
-
-// FirstID returns the first DocumentToken ID from the query.
-// Returns a *NotFoundError when no DocumentToken ID was found.
-func (dtq *DocumentTokenQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
- var ids []uuid.UUID
- if ids, err = dtq.Limit(1).IDs(ctx); err != nil {
- return
- }
- if len(ids) == 0 {
- err = &NotFoundError{documenttoken.Label}
- return
- }
- return ids[0], nil
-}
-
-// FirstIDX is like FirstID, but panics if an error occurs.
-func (dtq *DocumentTokenQuery) FirstIDX(ctx context.Context) uuid.UUID {
- id, err := dtq.FirstID(ctx)
- if err != nil && !IsNotFound(err) {
- panic(err)
- }
- return id
-}
-
-// Only returns a single DocumentToken entity found by the query, ensuring it only returns one.
-// Returns a *NotSingularError when more than one DocumentToken entity is found.
-// Returns a *NotFoundError when no DocumentToken entities are found.
-func (dtq *DocumentTokenQuery) Only(ctx context.Context) (*DocumentToken, error) {
- nodes, err := dtq.Limit(2).All(ctx)
- if err != nil {
- return nil, err
- }
- switch len(nodes) {
- case 1:
- return nodes[0], nil
- case 0:
- return nil, &NotFoundError{documenttoken.Label}
- default:
- return nil, &NotSingularError{documenttoken.Label}
- }
-}
-
-// OnlyX is like Only, but panics if an error occurs.
-func (dtq *DocumentTokenQuery) OnlyX(ctx context.Context) *DocumentToken {
- node, err := dtq.Only(ctx)
- if err != nil {
- panic(err)
- }
- return node
-}
-
-// OnlyID is like Only, but returns the only DocumentToken ID in the query.
-// Returns a *NotSingularError when more than one DocumentToken ID is found.
-// Returns a *NotFoundError when no entities are found.
-func (dtq *DocumentTokenQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
- var ids []uuid.UUID
- if ids, err = dtq.Limit(2).IDs(ctx); err != nil {
- return
- }
- switch len(ids) {
- case 1:
- id = ids[0]
- case 0:
- err = &NotFoundError{documenttoken.Label}
- default:
- err = &NotSingularError{documenttoken.Label}
- }
- return
-}
-
-// OnlyIDX is like OnlyID, but panics if an error occurs.
-func (dtq *DocumentTokenQuery) OnlyIDX(ctx context.Context) uuid.UUID {
- id, err := dtq.OnlyID(ctx)
- if err != nil {
- panic(err)
- }
- return id
-}
-
-// All executes the query and returns a list of DocumentTokens.
-func (dtq *DocumentTokenQuery) All(ctx context.Context) ([]*DocumentToken, error) {
- if err := dtq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- return dtq.sqlAll(ctx)
-}
-
-// AllX is like All, but panics if an error occurs.
-func (dtq *DocumentTokenQuery) AllX(ctx context.Context) []*DocumentToken {
- nodes, err := dtq.All(ctx)
- if err != nil {
- panic(err)
- }
- return nodes
-}
-
-// IDs executes the query and returns a list of DocumentToken IDs.
-func (dtq *DocumentTokenQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
- var ids []uuid.UUID
- if err := dtq.Select(documenttoken.FieldID).Scan(ctx, &ids); err != nil {
- return nil, err
- }
- return ids, nil
-}
-
-// IDsX is like IDs, but panics if an error occurs.
-func (dtq *DocumentTokenQuery) IDsX(ctx context.Context) []uuid.UUID {
- ids, err := dtq.IDs(ctx)
- if err != nil {
- panic(err)
- }
- return ids
-}
-
-// Count returns the count of the given query.
-func (dtq *DocumentTokenQuery) Count(ctx context.Context) (int, error) {
- if err := dtq.prepareQuery(ctx); err != nil {
- return 0, err
- }
- return dtq.sqlCount(ctx)
-}
-
-// CountX is like Count, but panics if an error occurs.
-func (dtq *DocumentTokenQuery) CountX(ctx context.Context) int {
- count, err := dtq.Count(ctx)
- if err != nil {
- panic(err)
- }
- return count
-}
-
-// Exist returns true if the query has elements in the graph.
-func (dtq *DocumentTokenQuery) Exist(ctx context.Context) (bool, error) {
- if err := dtq.prepareQuery(ctx); err != nil {
- return false, err
- }
- return dtq.sqlExist(ctx)
-}
-
-// ExistX is like Exist, but panics if an error occurs.
-func (dtq *DocumentTokenQuery) ExistX(ctx context.Context) bool {
- exist, err := dtq.Exist(ctx)
- if err != nil {
- panic(err)
- }
- return exist
-}
-
-// Clone returns a duplicate of the DocumentTokenQuery builder, including all associated steps. It can be
-// used to prepare common query builders and use them differently after the clone is made.
-func (dtq *DocumentTokenQuery) Clone() *DocumentTokenQuery {
- if dtq == nil {
- return nil
- }
- return &DocumentTokenQuery{
- config: dtq.config,
- limit: dtq.limit,
- offset: dtq.offset,
- order: append([]OrderFunc{}, dtq.order...),
- predicates: append([]predicate.DocumentToken{}, dtq.predicates...),
- withDocument: dtq.withDocument.Clone(),
- // clone intermediate query.
- sql: dtq.sql.Clone(),
- path: dtq.path,
- unique: dtq.unique,
- }
-}
-
-// WithDocument tells the query-builder to eager-load the nodes that are connected to
-// the "document" edge. The optional arguments are used to configure the query builder of the edge.
-func (dtq *DocumentTokenQuery) WithDocument(opts ...func(*DocumentQuery)) *DocumentTokenQuery {
- query := &DocumentQuery{config: dtq.config}
- for _, opt := range opts {
- opt(query)
- }
- dtq.withDocument = query
- return dtq
-}
-
-// GroupBy is used to group vertices by one or more fields/columns.
-// It is often used with aggregate functions, like: count, max, mean, min, sum.
-//
-// Example:
-//
-// var v []struct {
-// CreatedAt time.Time `json:"created_at,omitempty"`
-// Count int `json:"count,omitempty"`
-// }
-//
-// client.DocumentToken.Query().
-// GroupBy(documenttoken.FieldCreatedAt).
-// Aggregate(ent.Count()).
-// Scan(ctx, &v)
-func (dtq *DocumentTokenQuery) GroupBy(field string, fields ...string) *DocumentTokenGroupBy {
- grbuild := &DocumentTokenGroupBy{config: dtq.config}
- grbuild.fields = append([]string{field}, fields...)
- grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
- if err := dtq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- return dtq.sqlQuery(ctx), nil
- }
- grbuild.label = documenttoken.Label
- grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
- return grbuild
-}
-
-// Select allows the selection one or more fields/columns for the given query,
-// instead of selecting all fields in the entity.
-//
-// Example:
-//
-// var v []struct {
-// CreatedAt time.Time `json:"created_at,omitempty"`
-// }
-//
-// client.DocumentToken.Query().
-// Select(documenttoken.FieldCreatedAt).
-// Scan(ctx, &v)
-func (dtq *DocumentTokenQuery) Select(fields ...string) *DocumentTokenSelect {
- dtq.fields = append(dtq.fields, fields...)
- selbuild := &DocumentTokenSelect{DocumentTokenQuery: dtq}
- selbuild.label = documenttoken.Label
- selbuild.flds, selbuild.scan = &dtq.fields, selbuild.Scan
- return selbuild
-}
-
-func (dtq *DocumentTokenQuery) prepareQuery(ctx context.Context) error {
- for _, f := range dtq.fields {
- if !documenttoken.ValidColumn(f) {
- return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
- }
- }
- if dtq.path != nil {
- prev, err := dtq.path(ctx)
- if err != nil {
- return err
- }
- dtq.sql = prev
- }
- return nil
-}
-
-func (dtq *DocumentTokenQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DocumentToken, error) {
- var (
- nodes = []*DocumentToken{}
- withFKs = dtq.withFKs
- _spec = dtq.querySpec()
- loadedTypes = [1]bool{
- dtq.withDocument != nil,
- }
- )
- if dtq.withDocument != nil {
- withFKs = true
- }
- if withFKs {
- _spec.Node.Columns = append(_spec.Node.Columns, documenttoken.ForeignKeys...)
- }
- _spec.ScanValues = func(columns []string) ([]any, error) {
- return (*DocumentToken).scanValues(nil, columns)
- }
- _spec.Assign = func(columns []string, values []any) error {
- node := &DocumentToken{config: dtq.config}
- nodes = append(nodes, node)
- node.Edges.loadedTypes = loadedTypes
- return node.assignValues(columns, values)
- }
- for i := range hooks {
- hooks[i](ctx, _spec)
- }
- if err := sqlgraph.QueryNodes(ctx, dtq.driver, _spec); err != nil {
- return nil, err
- }
- if len(nodes) == 0 {
- return nodes, nil
- }
- if query := dtq.withDocument; query != nil {
- if err := dtq.loadDocument(ctx, query, nodes, nil,
- func(n *DocumentToken, e *Document) { n.Edges.Document = e }); err != nil {
- return nil, err
- }
- }
- return nodes, nil
-}
-
-func (dtq *DocumentTokenQuery) loadDocument(ctx context.Context, query *DocumentQuery, nodes []*DocumentToken, init func(*DocumentToken), assign func(*DocumentToken, *Document)) error {
- ids := make([]uuid.UUID, 0, len(nodes))
- nodeids := make(map[uuid.UUID][]*DocumentToken)
- for i := range nodes {
- if nodes[i].document_document_tokens == nil {
- continue
- }
- fk := *nodes[i].document_document_tokens
- if _, ok := nodeids[fk]; !ok {
- ids = append(ids, fk)
- }
- nodeids[fk] = append(nodeids[fk], nodes[i])
- }
- query.Where(document.IDIn(ids...))
- neighbors, err := query.All(ctx)
- if err != nil {
- return err
- }
- for _, n := range neighbors {
- nodes, ok := nodeids[n.ID]
- if !ok {
- return fmt.Errorf(`unexpected foreign-key "document_document_tokens" returned %v`, n.ID)
- }
- for i := range nodes {
- assign(nodes[i], n)
- }
- }
- return nil
-}
-
-func (dtq *DocumentTokenQuery) sqlCount(ctx context.Context) (int, error) {
- _spec := dtq.querySpec()
- _spec.Node.Columns = dtq.fields
- if len(dtq.fields) > 0 {
- _spec.Unique = dtq.unique != nil && *dtq.unique
- }
- return sqlgraph.CountNodes(ctx, dtq.driver, _spec)
-}
-
-func (dtq *DocumentTokenQuery) sqlExist(ctx context.Context) (bool, error) {
- switch _, err := dtq.FirstID(ctx); {
- case IsNotFound(err):
- return false, nil
- case err != nil:
- return false, fmt.Errorf("ent: check existence: %w", err)
- default:
- return true, nil
- }
-}
-
-func (dtq *DocumentTokenQuery) querySpec() *sqlgraph.QuerySpec {
- _spec := &sqlgraph.QuerySpec{
- Node: &sqlgraph.NodeSpec{
- Table: documenttoken.Table,
- Columns: documenttoken.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: documenttoken.FieldID,
- },
- },
- From: dtq.sql,
- Unique: true,
- }
- if unique := dtq.unique; unique != nil {
- _spec.Unique = *unique
- }
- if fields := dtq.fields; len(fields) > 0 {
- _spec.Node.Columns = make([]string, 0, len(fields))
- _spec.Node.Columns = append(_spec.Node.Columns, documenttoken.FieldID)
- for i := range fields {
- if fields[i] != documenttoken.FieldID {
- _spec.Node.Columns = append(_spec.Node.Columns, fields[i])
- }
- }
- }
- if ps := dtq.predicates; len(ps) > 0 {
- _spec.Predicate = func(selector *sql.Selector) {
- for i := range ps {
- ps[i](selector)
- }
- }
- }
- if limit := dtq.limit; limit != nil {
- _spec.Limit = *limit
- }
- if offset := dtq.offset; offset != nil {
- _spec.Offset = *offset
- }
- if ps := dtq.order; len(ps) > 0 {
- _spec.Order = func(selector *sql.Selector) {
- for i := range ps {
- ps[i](selector)
- }
- }
- }
- return _spec
-}
-
-func (dtq *DocumentTokenQuery) sqlQuery(ctx context.Context) *sql.Selector {
- builder := sql.Dialect(dtq.driver.Dialect())
- t1 := builder.Table(documenttoken.Table)
- columns := dtq.fields
- if len(columns) == 0 {
- columns = documenttoken.Columns
- }
- selector := builder.Select(t1.Columns(columns...)...).From(t1)
- if dtq.sql != nil {
- selector = dtq.sql
- selector.Select(selector.Columns(columns...)...)
- }
- if dtq.unique != nil && *dtq.unique {
- selector.Distinct()
- }
- for _, p := range dtq.predicates {
- p(selector)
- }
- for _, p := range dtq.order {
- p(selector)
- }
- if offset := dtq.offset; offset != nil {
- // limit is mandatory for offset clause. We start
- // with default value, and override it below if needed.
- selector.Offset(*offset).Limit(math.MaxInt32)
- }
- if limit := dtq.limit; limit != nil {
- selector.Limit(*limit)
- }
- return selector
-}
-
-// DocumentTokenGroupBy is the group-by builder for DocumentToken entities.
-type DocumentTokenGroupBy struct {
- config
- selector
- fields []string
- fns []AggregateFunc
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
- path func(context.Context) (*sql.Selector, error)
-}
-
-// Aggregate adds the given aggregation functions to the group-by query.
-func (dtgb *DocumentTokenGroupBy) Aggregate(fns ...AggregateFunc) *DocumentTokenGroupBy {
- dtgb.fns = append(dtgb.fns, fns...)
- return dtgb
-}
-
-// Scan applies the group-by query and scans the result into the given value.
-func (dtgb *DocumentTokenGroupBy) Scan(ctx context.Context, v any) error {
- query, err := dtgb.path(ctx)
- if err != nil {
- return err
- }
- dtgb.sql = query
- return dtgb.sqlScan(ctx, v)
-}
-
-func (dtgb *DocumentTokenGroupBy) sqlScan(ctx context.Context, v any) error {
- for _, f := range dtgb.fields {
- if !documenttoken.ValidColumn(f) {
- return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
- }
- }
- selector := dtgb.sqlQuery()
- if err := selector.Err(); err != nil {
- return err
- }
- rows := &sql.Rows{}
- query, args := selector.Query()
- if err := dtgb.driver.Query(ctx, query, args, rows); err != nil {
- return err
- }
- defer rows.Close()
- return sql.ScanSlice(rows, v)
-}
-
-func (dtgb *DocumentTokenGroupBy) sqlQuery() *sql.Selector {
- selector := dtgb.sql.Select()
- aggregation := make([]string, 0, len(dtgb.fns))
- for _, fn := range dtgb.fns {
- aggregation = append(aggregation, fn(selector))
- }
- // If no columns were selected in a custom aggregation function, the default
- // selection is the fields used for "group-by", and the aggregation functions.
- if len(selector.SelectedColumns()) == 0 {
- columns := make([]string, 0, len(dtgb.fields)+len(dtgb.fns))
- for _, f := range dtgb.fields {
- columns = append(columns, selector.C(f))
- }
- columns = append(columns, aggregation...)
- selector.Select(columns...)
- }
- return selector.GroupBy(selector.Columns(dtgb.fields...)...)
-}
-
-// DocumentTokenSelect is the builder for selecting fields of DocumentToken entities.
-type DocumentTokenSelect struct {
- *DocumentTokenQuery
- selector
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
-}
-
-// Scan applies the selector query and scans the result into the given value.
-func (dts *DocumentTokenSelect) Scan(ctx context.Context, v any) error {
- if err := dts.prepareQuery(ctx); err != nil {
- return err
- }
- dts.sql = dts.DocumentTokenQuery.sqlQuery(ctx)
- return dts.sqlScan(ctx, v)
-}
-
-func (dts *DocumentTokenSelect) sqlScan(ctx context.Context, v any) error {
- rows := &sql.Rows{}
- query, args := dts.sql.Query()
- if err := dts.driver.Query(ctx, query, args, rows); err != nil {
- return err
- }
- defer rows.Close()
- return sql.ScanSlice(rows, v)
-}
diff --git a/backend/internal/data/ent/documenttoken_update.go b/backend/internal/data/ent/documenttoken_update.go
deleted file mode 100644
index c6b5e77..0000000
--- a/backend/internal/data/ent/documenttoken_update.go
+++ /dev/null
@@ -1,582 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
- "context"
- "errors"
- "fmt"
- "time"
-
- "entgo.io/ent/dialect/sql"
- "entgo.io/ent/dialect/sql/sqlgraph"
- "entgo.io/ent/schema/field"
- "github.com/google/uuid"
- "github.com/hay-kot/homebox/backend/internal/data/ent/document"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
- "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
-)
-
-// DocumentTokenUpdate is the builder for updating DocumentToken entities.
-type DocumentTokenUpdate struct {
- config
- hooks []Hook
- mutation *DocumentTokenMutation
-}
-
-// Where appends a list predicates to the DocumentTokenUpdate builder.
-func (dtu *DocumentTokenUpdate) Where(ps ...predicate.DocumentToken) *DocumentTokenUpdate {
- dtu.mutation.Where(ps...)
- return dtu
-}
-
-// SetUpdatedAt sets the "updated_at" field.
-func (dtu *DocumentTokenUpdate) SetUpdatedAt(t time.Time) *DocumentTokenUpdate {
- dtu.mutation.SetUpdatedAt(t)
- return dtu
-}
-
-// SetToken sets the "token" field.
-func (dtu *DocumentTokenUpdate) SetToken(b []byte) *DocumentTokenUpdate {
- dtu.mutation.SetToken(b)
- return dtu
-}
-
-// SetUses sets the "uses" field.
-func (dtu *DocumentTokenUpdate) SetUses(i int) *DocumentTokenUpdate {
- dtu.mutation.ResetUses()
- dtu.mutation.SetUses(i)
- return dtu
-}
-
-// SetNillableUses sets the "uses" field if the given value is not nil.
-func (dtu *DocumentTokenUpdate) SetNillableUses(i *int) *DocumentTokenUpdate {
- if i != nil {
- dtu.SetUses(*i)
- }
- return dtu
-}
-
-// AddUses adds i to the "uses" field.
-func (dtu *DocumentTokenUpdate) AddUses(i int) *DocumentTokenUpdate {
- dtu.mutation.AddUses(i)
- return dtu
-}
-
-// SetExpiresAt sets the "expires_at" field.
-func (dtu *DocumentTokenUpdate) SetExpiresAt(t time.Time) *DocumentTokenUpdate {
- dtu.mutation.SetExpiresAt(t)
- return dtu
-}
-
-// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
-func (dtu *DocumentTokenUpdate) SetNillableExpiresAt(t *time.Time) *DocumentTokenUpdate {
- if t != nil {
- dtu.SetExpiresAt(*t)
- }
- return dtu
-}
-
-// SetDocumentID sets the "document" edge to the Document entity by ID.
-func (dtu *DocumentTokenUpdate) SetDocumentID(id uuid.UUID) *DocumentTokenUpdate {
- dtu.mutation.SetDocumentID(id)
- return dtu
-}
-
-// SetNillableDocumentID sets the "document" edge to the Document entity by ID if the given value is not nil.
-func (dtu *DocumentTokenUpdate) SetNillableDocumentID(id *uuid.UUID) *DocumentTokenUpdate {
- if id != nil {
- dtu = dtu.SetDocumentID(*id)
- }
- return dtu
-}
-
-// SetDocument sets the "document" edge to the Document entity.
-func (dtu *DocumentTokenUpdate) SetDocument(d *Document) *DocumentTokenUpdate {
- return dtu.SetDocumentID(d.ID)
-}
-
-// Mutation returns the DocumentTokenMutation object of the builder.
-func (dtu *DocumentTokenUpdate) Mutation() *DocumentTokenMutation {
- return dtu.mutation
-}
-
-// ClearDocument clears the "document" edge to the Document entity.
-func (dtu *DocumentTokenUpdate) ClearDocument() *DocumentTokenUpdate {
- dtu.mutation.ClearDocument()
- return dtu
-}
-
-// Save executes the query and returns the number of nodes affected by the update operation.
-func (dtu *DocumentTokenUpdate) Save(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
- dtu.defaults()
- if len(dtu.hooks) == 0 {
- if err = dtu.check(); err != nil {
- return 0, err
- }
- affected, err = dtu.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*DocumentTokenMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = dtu.check(); err != nil {
- return 0, err
- }
- dtu.mutation = mutation
- affected, err = dtu.sqlSave(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(dtu.hooks) - 1; i >= 0; i-- {
- if dtu.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = dtu.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, dtu.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
-}
-
-// SaveX is like Save, but panics if an error occurs.
-func (dtu *DocumentTokenUpdate) SaveX(ctx context.Context) int {
- affected, err := dtu.Save(ctx)
- if err != nil {
- panic(err)
- }
- return affected
-}
-
-// Exec executes the query.
-func (dtu *DocumentTokenUpdate) Exec(ctx context.Context) error {
- _, err := dtu.Save(ctx)
- return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (dtu *DocumentTokenUpdate) ExecX(ctx context.Context) {
- if err := dtu.Exec(ctx); err != nil {
- panic(err)
- }
-}
-
-// defaults sets the default values of the builder before save.
-func (dtu *DocumentTokenUpdate) defaults() {
- if _, ok := dtu.mutation.UpdatedAt(); !ok {
- v := documenttoken.UpdateDefaultUpdatedAt()
- dtu.mutation.SetUpdatedAt(v)
- }
-}
-
-// check runs all checks and user-defined validators on the builder.
-func (dtu *DocumentTokenUpdate) check() error {
- if v, ok := dtu.mutation.Token(); ok {
- if err := documenttoken.TokenValidator(v); err != nil {
- return &ValidationError{Name: "token", err: fmt.Errorf(`ent: validator failed for field "DocumentToken.token": %w`, err)}
- }
- }
- return nil
-}
-
-func (dtu *DocumentTokenUpdate) sqlSave(ctx context.Context) (n int, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: documenttoken.Table,
- Columns: documenttoken.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: documenttoken.FieldID,
- },
- },
- }
- if ps := dtu.mutation.predicates; len(ps) > 0 {
- _spec.Predicate = func(selector *sql.Selector) {
- for i := range ps {
- ps[i](selector)
- }
- }
- }
- if value, ok := dtu.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: documenttoken.FieldUpdatedAt,
- })
- }
- if value, ok := dtu.mutation.Token(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBytes,
- Value: value,
- Column: documenttoken.FieldToken,
- })
- }
- if value, ok := dtu.mutation.Uses(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: documenttoken.FieldUses,
- })
- }
- if value, ok := dtu.mutation.AddedUses(); ok {
- _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: documenttoken.FieldUses,
- })
- }
- if value, ok := dtu.mutation.ExpiresAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: documenttoken.FieldExpiresAt,
- })
- }
- if dtu.mutation.DocumentCleared() {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: documenttoken.DocumentTable,
- Columns: []string{documenttoken.DocumentColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
- },
- }
- _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
- }
- if nodes := dtu.mutation.DocumentIDs(); len(nodes) > 0 {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: documenttoken.DocumentTable,
- Columns: []string{documenttoken.DocumentColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _spec.Edges.Add = append(_spec.Edges.Add, edge)
- }
- if n, err = sqlgraph.UpdateNodes(ctx, dtu.driver, _spec); err != nil {
- if _, ok := err.(*sqlgraph.NotFoundError); ok {
- err = &NotFoundError{documenttoken.Label}
- } else if sqlgraph.IsConstraintError(err) {
- err = &ConstraintError{msg: err.Error(), wrap: err}
- }
- return 0, err
- }
- return n, nil
-}
-
-// DocumentTokenUpdateOne is the builder for updating a single DocumentToken entity.
-type DocumentTokenUpdateOne struct {
- config
- fields []string
- hooks []Hook
- mutation *DocumentTokenMutation
-}
-
-// SetUpdatedAt sets the "updated_at" field.
-func (dtuo *DocumentTokenUpdateOne) SetUpdatedAt(t time.Time) *DocumentTokenUpdateOne {
- dtuo.mutation.SetUpdatedAt(t)
- return dtuo
-}
-
-// SetToken sets the "token" field.
-func (dtuo *DocumentTokenUpdateOne) SetToken(b []byte) *DocumentTokenUpdateOne {
- dtuo.mutation.SetToken(b)
- return dtuo
-}
-
-// SetUses sets the "uses" field.
-func (dtuo *DocumentTokenUpdateOne) SetUses(i int) *DocumentTokenUpdateOne {
- dtuo.mutation.ResetUses()
- dtuo.mutation.SetUses(i)
- return dtuo
-}
-
-// SetNillableUses sets the "uses" field if the given value is not nil.
-func (dtuo *DocumentTokenUpdateOne) SetNillableUses(i *int) *DocumentTokenUpdateOne {
- if i != nil {
- dtuo.SetUses(*i)
- }
- return dtuo
-}
-
-// AddUses adds i to the "uses" field.
-func (dtuo *DocumentTokenUpdateOne) AddUses(i int) *DocumentTokenUpdateOne {
- dtuo.mutation.AddUses(i)
- return dtuo
-}
-
-// SetExpiresAt sets the "expires_at" field.
-func (dtuo *DocumentTokenUpdateOne) SetExpiresAt(t time.Time) *DocumentTokenUpdateOne {
- dtuo.mutation.SetExpiresAt(t)
- return dtuo
-}
-
-// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
-func (dtuo *DocumentTokenUpdateOne) SetNillableExpiresAt(t *time.Time) *DocumentTokenUpdateOne {
- if t != nil {
- dtuo.SetExpiresAt(*t)
- }
- return dtuo
-}
-
-// SetDocumentID sets the "document" edge to the Document entity by ID.
-func (dtuo *DocumentTokenUpdateOne) SetDocumentID(id uuid.UUID) *DocumentTokenUpdateOne {
- dtuo.mutation.SetDocumentID(id)
- return dtuo
-}
-
-// SetNillableDocumentID sets the "document" edge to the Document entity by ID if the given value is not nil.
-func (dtuo *DocumentTokenUpdateOne) SetNillableDocumentID(id *uuid.UUID) *DocumentTokenUpdateOne {
- if id != nil {
- dtuo = dtuo.SetDocumentID(*id)
- }
- return dtuo
-}
-
-// SetDocument sets the "document" edge to the Document entity.
-func (dtuo *DocumentTokenUpdateOne) SetDocument(d *Document) *DocumentTokenUpdateOne {
- return dtuo.SetDocumentID(d.ID)
-}
-
-// Mutation returns the DocumentTokenMutation object of the builder.
-func (dtuo *DocumentTokenUpdateOne) Mutation() *DocumentTokenMutation {
- return dtuo.mutation
-}
-
-// ClearDocument clears the "document" edge to the Document entity.
-func (dtuo *DocumentTokenUpdateOne) ClearDocument() *DocumentTokenUpdateOne {
- dtuo.mutation.ClearDocument()
- return dtuo
-}
-
-// Select allows selecting one or more fields (columns) of the returned entity.
-// The default is selecting all fields defined in the entity schema.
-func (dtuo *DocumentTokenUpdateOne) Select(field string, fields ...string) *DocumentTokenUpdateOne {
- dtuo.fields = append([]string{field}, fields...)
- return dtuo
-}
-
-// Save executes the query and returns the updated DocumentToken entity.
-func (dtuo *DocumentTokenUpdateOne) Save(ctx context.Context) (*DocumentToken, error) {
- var (
- err error
- node *DocumentToken
- )
- dtuo.defaults()
- if len(dtuo.hooks) == 0 {
- if err = dtuo.check(); err != nil {
- return nil, err
- }
- node, err = dtuo.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*DocumentTokenMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = dtuo.check(); err != nil {
- return nil, err
- }
- dtuo.mutation = mutation
- node, err = dtuo.sqlSave(ctx)
- mutation.done = true
- return node, err
- })
- for i := len(dtuo.hooks) - 1; i >= 0; i-- {
- if dtuo.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = dtuo.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, dtuo.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*DocumentToken)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from DocumentTokenMutation", v)
- }
- node = nv
- }
- return node, err
-}
-
-// SaveX is like Save, but panics if an error occurs.
-func (dtuo *DocumentTokenUpdateOne) SaveX(ctx context.Context) *DocumentToken {
- node, err := dtuo.Save(ctx)
- if err != nil {
- panic(err)
- }
- return node
-}
-
-// Exec executes the query on the entity.
-func (dtuo *DocumentTokenUpdateOne) Exec(ctx context.Context) error {
- _, err := dtuo.Save(ctx)
- return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (dtuo *DocumentTokenUpdateOne) ExecX(ctx context.Context) {
- if err := dtuo.Exec(ctx); err != nil {
- panic(err)
- }
-}
-
-// defaults sets the default values of the builder before save.
-func (dtuo *DocumentTokenUpdateOne) defaults() {
- if _, ok := dtuo.mutation.UpdatedAt(); !ok {
- v := documenttoken.UpdateDefaultUpdatedAt()
- dtuo.mutation.SetUpdatedAt(v)
- }
-}
-
-// check runs all checks and user-defined validators on the builder.
-func (dtuo *DocumentTokenUpdateOne) check() error {
- if v, ok := dtuo.mutation.Token(); ok {
- if err := documenttoken.TokenValidator(v); err != nil {
- return &ValidationError{Name: "token", err: fmt.Errorf(`ent: validator failed for field "DocumentToken.token": %w`, err)}
- }
- }
- return nil
-}
-
-func (dtuo *DocumentTokenUpdateOne) sqlSave(ctx context.Context) (_node *DocumentToken, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: documenttoken.Table,
- Columns: documenttoken.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: documenttoken.FieldID,
- },
- },
- }
- id, ok := dtuo.mutation.ID()
- if !ok {
- return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DocumentToken.id" for update`)}
- }
- _spec.Node.ID.Value = id
- if fields := dtuo.fields; len(fields) > 0 {
- _spec.Node.Columns = make([]string, 0, len(fields))
- _spec.Node.Columns = append(_spec.Node.Columns, documenttoken.FieldID)
- for _, f := range fields {
- if !documenttoken.ValidColumn(f) {
- return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
- }
- if f != documenttoken.FieldID {
- _spec.Node.Columns = append(_spec.Node.Columns, f)
- }
- }
- }
- if ps := dtuo.mutation.predicates; len(ps) > 0 {
- _spec.Predicate = func(selector *sql.Selector) {
- for i := range ps {
- ps[i](selector)
- }
- }
- }
- if value, ok := dtuo.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: documenttoken.FieldUpdatedAt,
- })
- }
- if value, ok := dtuo.mutation.Token(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBytes,
- Value: value,
- Column: documenttoken.FieldToken,
- })
- }
- if value, ok := dtuo.mutation.Uses(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: documenttoken.FieldUses,
- })
- }
- if value, ok := dtuo.mutation.AddedUses(); ok {
- _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: documenttoken.FieldUses,
- })
- }
- if value, ok := dtuo.mutation.ExpiresAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: documenttoken.FieldExpiresAt,
- })
- }
- if dtuo.mutation.DocumentCleared() {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: documenttoken.DocumentTable,
- Columns: []string{documenttoken.DocumentColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
- },
- }
- _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
- }
- if nodes := dtuo.mutation.DocumentIDs(); len(nodes) > 0 {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: documenttoken.DocumentTable,
- Columns: []string{documenttoken.DocumentColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _spec.Edges.Add = append(_spec.Edges.Add, edge)
- }
- _node = &DocumentToken{config: dtuo.config}
- _spec.Assign = _node.assignValues
- _spec.ScanValues = _node.scanValues
- if err = sqlgraph.UpdateNode(ctx, dtuo.driver, _spec); err != nil {
- if _, ok := err.(*sqlgraph.NotFoundError); ok {
- err = &NotFoundError{documenttoken.Label}
- } else if sqlgraph.IsConstraintError(err) {
- err = &ConstraintError{msg: err.Error(), wrap: err}
- }
- return nil, err
- }
- return _node, nil
-}
diff --git a/backend/internal/data/ent/ent.go b/backend/internal/data/ent/ent.go
index e2552db..6e52ac8 100644
--- a/backend/internal/data/ent/ent.go
+++ b/backend/internal/data/ent/ent.go
@@ -6,73 +6,108 @@ import (
"context"
"errors"
"fmt"
+ "reflect"
+ "sync"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
"github.com/hay-kot/homebox/backend/internal/data/ent/document"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
"github.com/hay-kot/homebox/backend/internal/data/ent/groupinvitationtoken"
"github.com/hay-kot/homebox/backend/internal/data/ent/item"
"github.com/hay-kot/homebox/backend/internal/data/ent/itemfield"
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
)
// ent aliases to avoid import conflicts in user's code.
type (
- Op = ent.Op
- Hook = ent.Hook
- Value = ent.Value
- Query = ent.Query
- Policy = ent.Policy
- Mutator = ent.Mutator
- Mutation = ent.Mutation
- MutateFunc = ent.MutateFunc
+ Op = ent.Op
+ Hook = ent.Hook
+ Value = ent.Value
+ Query = ent.Query
+ QueryContext = ent.QueryContext
+ Querier = ent.Querier
+ QuerierFunc = ent.QuerierFunc
+ Interceptor = ent.Interceptor
+ InterceptFunc = ent.InterceptFunc
+ Traverser = ent.Traverser
+ TraverseFunc = ent.TraverseFunc
+ Policy = ent.Policy
+ Mutator = ent.Mutator
+ Mutation = ent.Mutation
+ MutateFunc = ent.MutateFunc
)
+type clientCtxKey struct{}
+
+// FromContext returns a Client stored inside a context, or nil if there isn't one.
+func FromContext(ctx context.Context) *Client {
+ c, _ := ctx.Value(clientCtxKey{}).(*Client)
+ return c
+}
+
+// NewContext returns a new context with the given Client attached.
+func NewContext(parent context.Context, c *Client) context.Context {
+ return context.WithValue(parent, clientCtxKey{}, c)
+}
+
+type txCtxKey struct{}
+
+// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
+func TxFromContext(ctx context.Context) *Tx {
+ tx, _ := ctx.Value(txCtxKey{}).(*Tx)
+ return tx
+}
+
+// NewTxContext returns a new context with the given Tx attached.
+func NewTxContext(parent context.Context, tx *Tx) context.Context {
+ return context.WithValue(parent, txCtxKey{}, tx)
+}
+
// OrderFunc applies an ordering on the sql selector.
+// Deprecated: Use Asc/Desc functions or the package builders instead.
type OrderFunc func(*sql.Selector)
-// columnChecker returns a function indicates if the column exists in the given column.
-func columnChecker(table string) func(string) error {
- checks := map[string]func(string) bool{
- attachment.Table: attachment.ValidColumn,
- authtokens.Table: authtokens.ValidColumn,
- document.Table: document.ValidColumn,
- documenttoken.Table: documenttoken.ValidColumn,
- group.Table: group.ValidColumn,
- groupinvitationtoken.Table: groupinvitationtoken.ValidColumn,
- item.Table: item.ValidColumn,
- itemfield.Table: itemfield.ValidColumn,
- label.Table: label.ValidColumn,
- location.Table: location.ValidColumn,
- user.Table: user.ValidColumn,
- }
- check, ok := checks[table]
- if !ok {
- return func(string) error {
- return fmt.Errorf("unknown table %q", table)
- }
- }
- return func(column string) error {
- if !check(column) {
- return fmt.Errorf("unknown column %q for table %q", column, table)
- }
- return nil
- }
+var (
+ initCheck sync.Once
+ columnCheck sql.ColumnCheck
+)
+
+// columnChecker checks if the column exists in the given table.
+func checkColumn(table, column string) error {
+ initCheck.Do(func() {
+ columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
+ attachment.Table: attachment.ValidColumn,
+ authroles.Table: authroles.ValidColumn,
+ authtokens.Table: authtokens.ValidColumn,
+ document.Table: document.ValidColumn,
+ group.Table: group.ValidColumn,
+ groupinvitationtoken.Table: groupinvitationtoken.ValidColumn,
+ item.Table: item.ValidColumn,
+ itemfield.Table: itemfield.ValidColumn,
+ label.Table: label.ValidColumn,
+ location.Table: location.ValidColumn,
+ maintenanceentry.Table: maintenanceentry.ValidColumn,
+ notifier.Table: notifier.ValidColumn,
+ user.Table: user.ValidColumn,
+ })
+ })
+ return columnCheck(table, column)
}
// Asc applies the given fields in ASC order.
-func Asc(fields ...string) OrderFunc {
+func Asc(fields ...string) func(*sql.Selector) {
return func(s *sql.Selector) {
- check := columnChecker(s.TableName())
for _, f := range fields {
- if err := check(f); err != nil {
+ if err := checkColumn(s.TableName(), f); err != nil {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
}
s.OrderBy(sql.Asc(s.C(f)))
@@ -81,11 +116,10 @@ func Asc(fields ...string) OrderFunc {
}
// Desc applies the given fields in DESC order.
-func Desc(fields ...string) OrderFunc {
+func Desc(fields ...string) func(*sql.Selector) {
return func(s *sql.Selector) {
- check := columnChecker(s.TableName())
for _, f := range fields {
- if err := check(f); err != nil {
+ if err := checkColumn(s.TableName(), f); err != nil {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
}
s.OrderBy(sql.Desc(s.C(f)))
@@ -117,8 +151,7 @@ func Count() AggregateFunc {
// Max applies the "max" aggregation function on the given field of each group.
func Max(field string) AggregateFunc {
return func(s *sql.Selector) string {
- check := columnChecker(s.TableName())
- if err := check(field); err != nil {
+ if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
@@ -129,8 +162,7 @@ func Max(field string) AggregateFunc {
// Mean applies the "mean" aggregation function on the given field of each group.
func Mean(field string) AggregateFunc {
return func(s *sql.Selector) string {
- check := columnChecker(s.TableName())
- if err := check(field); err != nil {
+ if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
@@ -141,8 +173,7 @@ func Mean(field string) AggregateFunc {
// Min applies the "min" aggregation function on the given field of each group.
func Min(field string) AggregateFunc {
return func(s *sql.Selector) string {
- check := columnChecker(s.TableName())
- if err := check(field); err != nil {
+ if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
@@ -153,8 +184,7 @@ func Min(field string) AggregateFunc {
// Sum applies the "sum" aggregation function on the given field of each group.
func Sum(field string) AggregateFunc {
return func(s *sql.Selector) string {
- check := columnChecker(s.TableName())
- if err := check(field); err != nil {
+ if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
@@ -283,6 +313,7 @@ func IsConstraintError(err error) bool {
type selector struct {
label string
flds *[]string
+ fns []AggregateFunc
scan func(context.Context, any) error
}
@@ -481,5 +512,121 @@ func (s *selector) BoolX(ctx context.Context) bool {
return v
}
+// withHooks invokes the builder operation with the given hooks, if any.
+func withHooks[V Value, M any, PM interface {
+ *M
+ Mutation
+}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) {
+ if len(hooks) == 0 {
+ return exec(ctx)
+ }
+ var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+ mutationT, ok := any(m).(PM)
+ if !ok {
+ return nil, fmt.Errorf("unexpected mutation type %T", m)
+ }
+ // Set the mutation to the builder.
+ *mutation = *mutationT
+ return exec(ctx)
+ })
+ for i := len(hooks) - 1; i >= 0; i-- {
+ if hooks[i] == nil {
+ return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+ }
+ mut = hooks[i](mut)
+ }
+ v, err := mut.Mutate(ctx, mutation)
+ if err != nil {
+ return value, err
+ }
+ nv, ok := v.(V)
+ if !ok {
+ return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation)
+ }
+ return nv, nil
+}
+
+// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist.
+func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context {
+ if ent.QueryFromContext(ctx) == nil {
+ qc.Op = op
+ ctx = ent.NewQueryContext(ctx, qc)
+ }
+ return ctx
+}
+
+func querierAll[V Value, Q interface {
+ sqlAll(context.Context, ...queryHook) (V, error)
+}]() Querier {
+ return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
+ query, ok := q.(Q)
+ if !ok {
+ return nil, fmt.Errorf("unexpected query type %T", q)
+ }
+ return query.sqlAll(ctx)
+ })
+}
+
+func querierCount[Q interface {
+ sqlCount(context.Context) (int, error)
+}]() Querier {
+ return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
+ query, ok := q.(Q)
+ if !ok {
+ return nil, fmt.Errorf("unexpected query type %T", q)
+ }
+ return query.sqlCount(ctx)
+ })
+}
+
+func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) {
+ for i := len(inters) - 1; i >= 0; i-- {
+ qr = inters[i].Intercept(qr)
+ }
+ rv, err := qr.Query(ctx, q)
+ if err != nil {
+ return v, err
+ }
+ vt, ok := rv.(V)
+ if !ok {
+ return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v)
+ }
+ return vt, nil
+}
+
+func scanWithInterceptors[Q1 ent.Query, Q2 interface {
+ sqlScan(context.Context, Q1, any) error
+}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error {
+ rv := reflect.ValueOf(v)
+ var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
+ query, ok := q.(Q1)
+ if !ok {
+ return nil, fmt.Errorf("unexpected query type %T", q)
+ }
+ if err := selectOrGroup.sqlScan(ctx, query, v); err != nil {
+ return nil, err
+ }
+ if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() {
+ return rv.Elem().Interface(), nil
+ }
+ return v, nil
+ })
+ for i := len(inters) - 1; i >= 0; i-- {
+ qr = inters[i].Intercept(qr)
+ }
+ vv, err := qr.Query(ctx, rootQuery)
+ if err != nil {
+ return err
+ }
+ switch rv2 := reflect.ValueOf(vv); {
+ case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer:
+ case rv.Type() == rv2.Type():
+ rv.Elem().Set(rv2.Elem())
+ case rv.Elem().Type() == rv2.Type():
+ rv.Elem().Set(rv2)
+ }
+ return nil
+}
+
// queryHook describes an internal hook for the different sqlAll methods.
type queryHook func(context.Context, *sqlgraph.QuerySpec)
diff --git a/backend/internal/data/ent/generate.go b/backend/internal/data/ent/generate.go
index eb03ded..7b8b727 100644
--- a/backend/internal/data/ent/generate.go
+++ b/backend/internal/data/ent/generate.go
@@ -1,3 +1,3 @@
package ent
-//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/versioned-migration ./schema
+//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/versioned-migration ./schema --template=./schema/templates/has_id.tmpl
diff --git a/backend/internal/data/ent/group.go b/backend/internal/data/ent/group.go
index 8140104..69c67de 100644
--- a/backend/internal/data/ent/group.go
+++ b/backend/internal/data/ent/group.go
@@ -7,6 +7,7 @@ import (
"strings"
"time"
+ "entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
@@ -24,10 +25,11 @@ type Group struct {
// Name holds the value of the "name" field.
Name string `json:"name,omitempty"`
// Currency holds the value of the "currency" field.
- Currency group.Currency `json:"currency,omitempty"`
+ Currency string `json:"currency,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the GroupQuery when eager-loading is set.
- Edges GroupEdges `json:"edges"`
+ Edges GroupEdges `json:"edges"`
+ selectValues sql.SelectValues
}
// GroupEdges holds the relations/edges for other nodes in the graph.
@@ -44,9 +46,11 @@ type GroupEdges struct {
Documents []*Document `json:"documents,omitempty"`
// InvitationTokens holds the value of the invitation_tokens edge.
InvitationTokens []*GroupInvitationToken `json:"invitation_tokens,omitempty"`
+ // Notifiers holds the value of the notifiers edge.
+ Notifiers []*Notifier `json:"notifiers,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
- loadedTypes [6]bool
+ loadedTypes [7]bool
}
// UsersOrErr returns the Users value or an error if the edge
@@ -103,6 +107,15 @@ func (e GroupEdges) InvitationTokensOrErr() ([]*GroupInvitationToken, error) {
return nil, &NotLoadedError{edge: "invitation_tokens"}
}
+// NotifiersOrErr returns the Notifiers value or an error if the edge
+// was not loaded in eager-loading.
+func (e GroupEdges) NotifiersOrErr() ([]*Notifier, error) {
+ if e.loadedTypes[6] {
+ return e.Notifiers, nil
+ }
+ return nil, &NotLoadedError{edge: "notifiers"}
+}
+
// scanValues returns the types for scanning values from sql.Rows.
func (*Group) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
@@ -115,7 +128,7 @@ func (*Group) scanValues(columns []string) ([]any, error) {
case group.FieldID:
values[i] = new(uuid.UUID)
default:
- return nil, fmt.Errorf("unexpected column %q for type Group", columns[i])
+ values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -157,48 +170,61 @@ func (gr *Group) assignValues(columns []string, values []any) error {
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field currency", values[i])
} else if value.Valid {
- gr.Currency = group.Currency(value.String)
+ gr.Currency = value.String
}
+ default:
+ gr.selectValues.Set(columns[i], values[i])
}
}
return nil
}
+// Value returns the ent.Value that was dynamically selected and assigned to the Group.
+// This includes values selected through modifiers, order, etc.
+func (gr *Group) Value(name string) (ent.Value, error) {
+ return gr.selectValues.Get(name)
+}
+
// QueryUsers queries the "users" edge of the Group entity.
func (gr *Group) QueryUsers() *UserQuery {
- return (&GroupClient{config: gr.config}).QueryUsers(gr)
+ return NewGroupClient(gr.config).QueryUsers(gr)
}
// QueryLocations queries the "locations" edge of the Group entity.
func (gr *Group) QueryLocations() *LocationQuery {
- return (&GroupClient{config: gr.config}).QueryLocations(gr)
+ return NewGroupClient(gr.config).QueryLocations(gr)
}
// QueryItems queries the "items" edge of the Group entity.
func (gr *Group) QueryItems() *ItemQuery {
- return (&GroupClient{config: gr.config}).QueryItems(gr)
+ return NewGroupClient(gr.config).QueryItems(gr)
}
// QueryLabels queries the "labels" edge of the Group entity.
func (gr *Group) QueryLabels() *LabelQuery {
- return (&GroupClient{config: gr.config}).QueryLabels(gr)
+ return NewGroupClient(gr.config).QueryLabels(gr)
}
// QueryDocuments queries the "documents" edge of the Group entity.
func (gr *Group) QueryDocuments() *DocumentQuery {
- return (&GroupClient{config: gr.config}).QueryDocuments(gr)
+ return NewGroupClient(gr.config).QueryDocuments(gr)
}
// QueryInvitationTokens queries the "invitation_tokens" edge of the Group entity.
func (gr *Group) QueryInvitationTokens() *GroupInvitationTokenQuery {
- return (&GroupClient{config: gr.config}).QueryInvitationTokens(gr)
+ return NewGroupClient(gr.config).QueryInvitationTokens(gr)
+}
+
+// QueryNotifiers queries the "notifiers" edge of the Group entity.
+func (gr *Group) QueryNotifiers() *NotifierQuery {
+ return NewGroupClient(gr.config).QueryNotifiers(gr)
}
// Update returns a builder for updating this Group.
// Note that you need to call Group.Unwrap() before calling this method if this Group
// was returned from a transaction, and the transaction was committed or rolled back.
func (gr *Group) Update() *GroupUpdateOne {
- return (&GroupClient{config: gr.config}).UpdateOne(gr)
+ return NewGroupClient(gr.config).UpdateOne(gr)
}
// Unwrap unwraps the Group entity that was returned from a transaction after it was closed,
@@ -227,16 +253,10 @@ func (gr *Group) String() string {
builder.WriteString(gr.Name)
builder.WriteString(", ")
builder.WriteString("currency=")
- builder.WriteString(fmt.Sprintf("%v", gr.Currency))
+ builder.WriteString(gr.Currency)
builder.WriteByte(')')
return builder.String()
}
// Groups is a parsable slice of Group.
type Groups []*Group
-
-func (gr Groups) config(cfg config) {
- for _i := range gr {
- gr[_i].config = cfg
- }
-}
diff --git a/backend/internal/data/ent/group/group.go b/backend/internal/data/ent/group/group.go
index c90a04c..32cb101 100644
--- a/backend/internal/data/ent/group/group.go
+++ b/backend/internal/data/ent/group/group.go
@@ -3,9 +3,10 @@
package group
import (
- "fmt"
"time"
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -34,6 +35,8 @@ const (
EdgeDocuments = "documents"
// EdgeInvitationTokens holds the string denoting the invitation_tokens edge name in mutations.
EdgeInvitationTokens = "invitation_tokens"
+ // EdgeNotifiers holds the string denoting the notifiers edge name in mutations.
+ EdgeNotifiers = "notifiers"
// Table holds the table name of the group in the database.
Table = "groups"
// UsersTable is the table that holds the users relation/edge.
@@ -78,6 +81,13 @@ const (
InvitationTokensInverseTable = "group_invitation_tokens"
// InvitationTokensColumn is the table column denoting the invitation_tokens relation/edge.
InvitationTokensColumn = "group_invitation_tokens"
+ // NotifiersTable is the table that holds the notifiers relation/edge.
+ NotifiersTable = "notifiers"
+ // NotifiersInverseTable is the table name for the Notifier entity.
+ // It exists in this package in order to avoid circular dependency with the "notifier" package.
+ NotifiersInverseTable = "notifiers"
+ // NotifiersColumn is the table column denoting the notifiers relation/edge.
+ NotifiersColumn = "group_id"
)
// Columns holds all SQL columns for group fields.
@@ -108,39 +118,183 @@ var (
UpdateDefaultUpdatedAt func() time.Time
// NameValidator is a validator for the "name" field. It is called by the builders before save.
NameValidator func(string) error
+ // DefaultCurrency holds the default value on creation for the "currency" field.
+ DefaultCurrency string
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)
-// Currency defines the type for the "currency" enum field.
-type Currency string
+// OrderOption defines the ordering options for the Group queries.
+type OrderOption func(*sql.Selector)
-// CurrencyUsd is the default value of the Currency enum.
-const DefaultCurrency = CurrencyUsd
-
-// Currency values.
-const (
- CurrencyUsd Currency = "usd"
- CurrencyEur Currency = "eur"
- CurrencyGbp Currency = "gbp"
- CurrencyJpy Currency = "jpy"
- CurrencyZar Currency = "zar"
- CurrencyAud Currency = "aud"
- CurrencyNok Currency = "nok"
- CurrencySek Currency = "sek"
- CurrencyDkk Currency = "dkk"
-)
-
-func (c Currency) String() string {
- return string(c)
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
}
-// CurrencyValidator is a validator for the "currency" field enum values. It is called by the builders before save.
-func CurrencyValidator(c Currency) error {
- switch c {
- case CurrencyUsd, CurrencyEur, CurrencyGbp, CurrencyJpy, CurrencyZar, CurrencyAud, CurrencyNok, CurrencySek, CurrencyDkk:
- return nil
- default:
- return fmt.Errorf("group: invalid enum value for currency field: %q", c)
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByName orders the results by the name field.
+func ByName(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldName, opts...).ToFunc()
+}
+
+// ByCurrency orders the results by the currency field.
+func ByCurrency(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCurrency, opts...).ToFunc()
+}
+
+// ByUsersCount orders the results by users count.
+func ByUsersCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newUsersStep(), opts...)
}
}
+
+// ByUsers orders the results by users terms.
+func ByUsers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newUsersStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+
+// ByLocationsCount orders the results by locations count.
+func ByLocationsCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newLocationsStep(), opts...)
+ }
+}
+
+// ByLocations orders the results by locations terms.
+func ByLocations(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newLocationsStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+
+// ByItemsCount orders the results by items count.
+func ByItemsCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newItemsStep(), opts...)
+ }
+}
+
+// ByItems orders the results by items terms.
+func ByItems(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newItemsStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+
+// ByLabelsCount orders the results by labels count.
+func ByLabelsCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newLabelsStep(), opts...)
+ }
+}
+
+// ByLabels orders the results by labels terms.
+func ByLabels(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newLabelsStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+
+// ByDocumentsCount orders the results by documents count.
+func ByDocumentsCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newDocumentsStep(), opts...)
+ }
+}
+
+// ByDocuments orders the results by documents terms.
+func ByDocuments(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newDocumentsStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+
+// ByInvitationTokensCount orders the results by invitation_tokens count.
+func ByInvitationTokensCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newInvitationTokensStep(), opts...)
+ }
+}
+
+// ByInvitationTokens orders the results by invitation_tokens terms.
+func ByInvitationTokens(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newInvitationTokensStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+
+// ByNotifiersCount orders the results by notifiers count.
+func ByNotifiersCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newNotifiersStep(), opts...)
+ }
+}
+
+// ByNotifiers orders the results by notifiers terms.
+func ByNotifiers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newNotifiersStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+func newUsersStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(UsersInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, UsersTable, UsersColumn),
+ )
+}
+func newLocationsStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(LocationsInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, LocationsTable, LocationsColumn),
+ )
+}
+func newItemsStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(ItemsInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn),
+ )
+}
+func newLabelsStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(LabelsInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, LabelsTable, LabelsColumn),
+ )
+}
+func newDocumentsStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(DocumentsInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn),
+ )
+}
+func newInvitationTokensStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(InvitationTokensInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, InvitationTokensTable, InvitationTokensColumn),
+ )
+}
+func newNotifiersStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(NotifiersInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn),
+ )
+}
diff --git a/backend/internal/data/ent/group/where.go b/backend/internal/data/ent/group/where.go
index 62106cc..d18faa7 100644
--- a/backend/internal/data/ent/group/where.go
+++ b/backend/internal/data/ent/group/where.go
@@ -13,357 +13,277 @@ import (
// ID filters vertices based on their ID field.
func ID(id uuid.UUID) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.Group(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id uuid.UUID) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.Group(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id uuid.UUID) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldID), id))
- })
+ return predicate.Group(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...uuid.UUID) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.In(s.C(FieldID), v...))
- })
+ return predicate.Group(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...uuid.UUID) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.NotIn(s.C(FieldID), v...))
- })
+ return predicate.Group(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id uuid.UUID) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldID), id))
- })
+ return predicate.Group(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id uuid.UUID) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldID), id))
- })
+ return predicate.Group(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id uuid.UUID) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldID), id))
- })
+ return predicate.Group(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id uuid.UUID) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldID), id))
- })
+ return predicate.Group(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Group(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Group(sql.FieldEQ(FieldUpdatedAt, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldName), v))
- })
+ return predicate.Group(sql.FieldEQ(FieldName, v))
+}
+
+// Currency applies equality check predicate on the "currency" field. It's identical to CurrencyEQ.
+func Currency(v string) predicate.Group {
+ return predicate.Group(sql.FieldEQ(FieldCurrency, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Group(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Group(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.Group {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldCreatedAt), v...))
- })
+ return predicate.Group(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.Group {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
- })
+ return predicate.Group(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldCreatedAt), v))
- })
+ return predicate.Group(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.Group(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldCreatedAt), v))
- })
+ return predicate.Group(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.Group(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Group(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Group(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.Group {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.Group(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.Group {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.Group(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Group(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Group(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Group(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Group(sql.FieldLTE(FieldUpdatedAt, v))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldName), v))
- })
+ return predicate.Group(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldName), v))
- })
+ return predicate.Group(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.Group {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldName), v...))
- })
+ return predicate.Group(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.Group {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldName), v...))
- })
+ return predicate.Group(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldName), v))
- })
+ return predicate.Group(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldName), v))
- })
+ return predicate.Group(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldName), v))
- })
+ return predicate.Group(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldName), v))
- })
+ return predicate.Group(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldName), v))
- })
+ return predicate.Group(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldName), v))
- })
+ return predicate.Group(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldName), v))
- })
+ return predicate.Group(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldName), v))
- })
+ return predicate.Group(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldName), v))
- })
+ return predicate.Group(sql.FieldContainsFold(FieldName, v))
}
// CurrencyEQ applies the EQ predicate on the "currency" field.
-func CurrencyEQ(v Currency) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCurrency), v))
- })
+func CurrencyEQ(v string) predicate.Group {
+ return predicate.Group(sql.FieldEQ(FieldCurrency, v))
}
// CurrencyNEQ applies the NEQ predicate on the "currency" field.
-func CurrencyNEQ(v Currency) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldCurrency), v))
- })
+func CurrencyNEQ(v string) predicate.Group {
+ return predicate.Group(sql.FieldNEQ(FieldCurrency, v))
}
// CurrencyIn applies the In predicate on the "currency" field.
-func CurrencyIn(vs ...Currency) predicate.Group {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldCurrency), v...))
- })
+func CurrencyIn(vs ...string) predicate.Group {
+ return predicate.Group(sql.FieldIn(FieldCurrency, vs...))
}
// CurrencyNotIn applies the NotIn predicate on the "currency" field.
-func CurrencyNotIn(vs ...Currency) predicate.Group {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Group(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldCurrency), v...))
- })
+func CurrencyNotIn(vs ...string) predicate.Group {
+ return predicate.Group(sql.FieldNotIn(FieldCurrency, vs...))
+}
+
+// CurrencyGT applies the GT predicate on the "currency" field.
+func CurrencyGT(v string) predicate.Group {
+ return predicate.Group(sql.FieldGT(FieldCurrency, v))
+}
+
+// CurrencyGTE applies the GTE predicate on the "currency" field.
+func CurrencyGTE(v string) predicate.Group {
+ return predicate.Group(sql.FieldGTE(FieldCurrency, v))
+}
+
+// CurrencyLT applies the LT predicate on the "currency" field.
+func CurrencyLT(v string) predicate.Group {
+ return predicate.Group(sql.FieldLT(FieldCurrency, v))
+}
+
+// CurrencyLTE applies the LTE predicate on the "currency" field.
+func CurrencyLTE(v string) predicate.Group {
+ return predicate.Group(sql.FieldLTE(FieldCurrency, v))
+}
+
+// CurrencyContains applies the Contains predicate on the "currency" field.
+func CurrencyContains(v string) predicate.Group {
+ return predicate.Group(sql.FieldContains(FieldCurrency, v))
+}
+
+// CurrencyHasPrefix applies the HasPrefix predicate on the "currency" field.
+func CurrencyHasPrefix(v string) predicate.Group {
+ return predicate.Group(sql.FieldHasPrefix(FieldCurrency, v))
+}
+
+// CurrencyHasSuffix applies the HasSuffix predicate on the "currency" field.
+func CurrencyHasSuffix(v string) predicate.Group {
+ return predicate.Group(sql.FieldHasSuffix(FieldCurrency, v))
+}
+
+// CurrencyEqualFold applies the EqualFold predicate on the "currency" field.
+func CurrencyEqualFold(v string) predicate.Group {
+ return predicate.Group(sql.FieldEqualFold(FieldCurrency, v))
+}
+
+// CurrencyContainsFold applies the ContainsFold predicate on the "currency" field.
+func CurrencyContainsFold(v string) predicate.Group {
+ return predicate.Group(sql.FieldContainsFold(FieldCurrency, v))
}
// HasUsers applies the HasEdge predicate on the "users" edge.
@@ -371,7 +291,6 @@ func HasUsers() predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(UsersTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, UsersTable, UsersColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -381,11 +300,7 @@ func HasUsers() predicate.Group {
// HasUsersWith applies the HasEdge predicate on the "users" edge with a given conditions (other predicates).
func HasUsersWith(preds ...predicate.User) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(UsersInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, UsersTable, UsersColumn),
- )
+ step := newUsersStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -399,7 +314,6 @@ func HasLocations() predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(LocationsTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, LocationsTable, LocationsColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -409,11 +323,7 @@ func HasLocations() predicate.Group {
// HasLocationsWith applies the HasEdge predicate on the "locations" edge with a given conditions (other predicates).
func HasLocationsWith(preds ...predicate.Location) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(LocationsInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, LocationsTable, LocationsColumn),
- )
+ step := newLocationsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -427,7 +337,6 @@ func HasItems() predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(ItemsTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -437,11 +346,7 @@ func HasItems() predicate.Group {
// HasItemsWith applies the HasEdge predicate on the "items" edge with a given conditions (other predicates).
func HasItemsWith(preds ...predicate.Item) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(ItemsInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn),
- )
+ step := newItemsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -455,7 +360,6 @@ func HasLabels() predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(LabelsTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, LabelsTable, LabelsColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -465,11 +369,7 @@ func HasLabels() predicate.Group {
// HasLabelsWith applies the HasEdge predicate on the "labels" edge with a given conditions (other predicates).
func HasLabelsWith(preds ...predicate.Label) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(LabelsInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, LabelsTable, LabelsColumn),
- )
+ step := newLabelsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -483,7 +383,6 @@ func HasDocuments() predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(DocumentsTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -493,11 +392,7 @@ func HasDocuments() predicate.Group {
// HasDocumentsWith applies the HasEdge predicate on the "documents" edge with a given conditions (other predicates).
func HasDocumentsWith(preds ...predicate.Document) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(DocumentsInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn),
- )
+ step := newDocumentsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -511,7 +406,6 @@ func HasInvitationTokens() predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(InvitationTokensTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, InvitationTokensTable, InvitationTokensColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -520,12 +414,31 @@ func HasInvitationTokens() predicate.Group {
// HasInvitationTokensWith applies the HasEdge predicate on the "invitation_tokens" edge with a given conditions (other predicates).
func HasInvitationTokensWith(preds ...predicate.GroupInvitationToken) predicate.Group {
+ return predicate.Group(func(s *sql.Selector) {
+ step := newInvitationTokensStep()
+ sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+ for _, p := range preds {
+ p(s)
+ }
+ })
+ })
+}
+
+// HasNotifiers applies the HasEdge predicate on the "notifiers" edge.
+func HasNotifiers() predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(InvitationTokensInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, InvitationTokensTable, InvitationTokensColumn),
+ sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn),
)
+ sqlgraph.HasNeighbors(s, step)
+ })
+}
+
+// HasNotifiersWith applies the HasEdge predicate on the "notifiers" edge with a given conditions (other predicates).
+func HasNotifiersWith(preds ...predicate.Notifier) predicate.Group {
+ return predicate.Group(func(s *sql.Selector) {
+ step := newNotifiersStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -536,32 +449,15 @@ func HasInvitationTokensWith(preds ...predicate.GroupInvitationToken) predicate.
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Group) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for _, p := range predicates {
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.Group(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Group) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for i, p := range predicates {
- if i > 0 {
- s1.Or()
- }
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.Group(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Group) predicate.Group {
- return predicate.Group(func(s *sql.Selector) {
- p(s.Not())
- })
+ return predicate.Group(sql.NotPredicates(p))
}
diff --git a/backend/internal/data/ent/group_create.go b/backend/internal/data/ent/group_create.go
index 37ca739..be56ba0 100644
--- a/backend/internal/data/ent/group_create.go
+++ b/backend/internal/data/ent/group_create.go
@@ -17,6 +17,7 @@ import (
"github.com/hay-kot/homebox/backend/internal/data/ent/item"
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
)
@@ -62,15 +63,15 @@ func (gc *GroupCreate) SetName(s string) *GroupCreate {
}
// SetCurrency sets the "currency" field.
-func (gc *GroupCreate) SetCurrency(gr group.Currency) *GroupCreate {
- gc.mutation.SetCurrency(gr)
+func (gc *GroupCreate) SetCurrency(s string) *GroupCreate {
+ gc.mutation.SetCurrency(s)
return gc
}
// SetNillableCurrency sets the "currency" field if the given value is not nil.
-func (gc *GroupCreate) SetNillableCurrency(gr *group.Currency) *GroupCreate {
- if gr != nil {
- gc.SetCurrency(*gr)
+func (gc *GroupCreate) SetNillableCurrency(s *string) *GroupCreate {
+ if s != nil {
+ gc.SetCurrency(*s)
}
return gc
}
@@ -179,6 +180,21 @@ func (gc *GroupCreate) AddInvitationTokens(g ...*GroupInvitationToken) *GroupCre
return gc.AddInvitationTokenIDs(ids...)
}
+// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs.
+func (gc *GroupCreate) AddNotifierIDs(ids ...uuid.UUID) *GroupCreate {
+ gc.mutation.AddNotifierIDs(ids...)
+ return gc
+}
+
+// AddNotifiers adds the "notifiers" edges to the Notifier entity.
+func (gc *GroupCreate) AddNotifiers(n ...*Notifier) *GroupCreate {
+ ids := make([]uuid.UUID, len(n))
+ for i := range n {
+ ids[i] = n[i].ID
+ }
+ return gc.AddNotifierIDs(ids...)
+}
+
// Mutation returns the GroupMutation object of the builder.
func (gc *GroupCreate) Mutation() *GroupMutation {
return gc.mutation
@@ -186,50 +202,8 @@ func (gc *GroupCreate) Mutation() *GroupMutation {
// Save creates the Group in the database.
func (gc *GroupCreate) Save(ctx context.Context) (*Group, error) {
- var (
- err error
- node *Group
- )
gc.defaults()
- if len(gc.hooks) == 0 {
- if err = gc.check(); err != nil {
- return nil, err
- }
- node, err = gc.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*GroupMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = gc.check(); err != nil {
- return nil, err
- }
- gc.mutation = mutation
- if node, err = gc.sqlSave(ctx); err != nil {
- return nil, err
- }
- mutation.id = &node.ID
- mutation.done = true
- return node, err
- })
- for i := len(gc.hooks) - 1; i >= 0; i-- {
- if gc.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = gc.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, gc.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*Group)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from GroupMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, gc.sqlSave, gc.mutation, gc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -293,15 +267,13 @@ func (gc *GroupCreate) check() error {
if _, ok := gc.mutation.Currency(); !ok {
return &ValidationError{Name: "currency", err: errors.New(`ent: missing required field "Group.currency"`)}
}
- if v, ok := gc.mutation.Currency(); ok {
- if err := group.CurrencyValidator(v); err != nil {
- return &ValidationError{Name: "currency", err: fmt.Errorf(`ent: validator failed for field "Group.currency": %w`, err)}
- }
- }
return nil
}
func (gc *GroupCreate) sqlSave(ctx context.Context) (*Group, error) {
+ if err := gc.check(); err != nil {
+ return nil, err
+ }
_node, _spec := gc.createSpec()
if err := sqlgraph.CreateNode(ctx, gc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@@ -316,54 +288,34 @@ func (gc *GroupCreate) sqlSave(ctx context.Context) (*Group, error) {
return nil, err
}
}
+ gc.mutation.id = &_node.ID
+ gc.mutation.done = true
return _node, nil
}
func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
var (
_node = &Group{config: gc.config}
- _spec = &sqlgraph.CreateSpec{
- Table: group.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
- }
+ _spec = sqlgraph.NewCreateSpec(group.Table, sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID))
)
if id, ok := gc.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = &id
}
if value, ok := gc.mutation.CreatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: group.FieldCreatedAt,
- })
+ _spec.SetField(group.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := gc.mutation.UpdatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: group.FieldUpdatedAt,
- })
+ _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := gc.mutation.Name(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: group.FieldName,
- })
+ _spec.SetField(group.FieldName, field.TypeString, value)
_node.Name = value
}
if value, ok := gc.mutation.Currency(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeEnum,
- Value: value,
- Column: group.FieldCurrency,
- })
+ _spec.SetField(group.FieldCurrency, field.TypeString, value)
_node.Currency = value
}
if nodes := gc.mutation.UsersIDs(); len(nodes) > 0 {
@@ -374,10 +326,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -393,10 +342,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -412,10 +358,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -431,10 +374,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -450,10 +390,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -469,10 +406,23 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: groupinvitationtoken.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges = append(_spec.Edges, edge)
+ }
+ if nodes := gc.mutation.NotifiersIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.NotifiersTable,
+ Columns: []string{group.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -486,11 +436,15 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
// GroupCreateBulk is the builder for creating many Group entities in bulk.
type GroupCreateBulk struct {
config
+ err error
builders []*GroupCreate
}
// Save creates the Group entities in the database.
func (gcb *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) {
+ if gcb.err != nil {
+ return nil, gcb.err
+ }
specs := make([]*sqlgraph.CreateSpec, len(gcb.builders))
nodes := make([]*Group, len(gcb.builders))
mutators := make([]Mutator, len(gcb.builders))
@@ -507,8 +461,8 @@ func (gcb *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) {
return nil, err
}
builder.mutation = mutation
- nodes[i], specs[i] = builder.createSpec()
var err error
+ nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, gcb.builders[i+1].mutation)
} else {
diff --git a/backend/internal/data/ent/group_delete.go b/backend/internal/data/ent/group_delete.go
index 4bcefc8..b8c3e59 100644
--- a/backend/internal/data/ent/group_delete.go
+++ b/backend/internal/data/ent/group_delete.go
@@ -4,7 +4,6 @@ package ent
import (
"context"
- "fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@@ -28,34 +27,7 @@ func (gd *GroupDelete) Where(ps ...predicate.Group) *GroupDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (gd *GroupDelete) Exec(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
- if len(gd.hooks) == 0 {
- affected, err = gd.sqlExec(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*GroupMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- gd.mutation = mutation
- affected, err = gd.sqlExec(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(gd.hooks) - 1; i >= 0; i-- {
- if gd.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = gd.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, gd.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, gd.sqlExec, gd.mutation, gd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@@ -68,15 +40,7 @@ func (gd *GroupDelete) ExecX(ctx context.Context) int {
}
func (gd *GroupDelete) sqlExec(ctx context.Context) (int, error) {
- _spec := &sqlgraph.DeleteSpec{
- Node: &sqlgraph.NodeSpec{
- Table: group.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewDeleteSpec(group.Table, sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID))
if ps := gd.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -88,6 +52,7 @@ func (gd *GroupDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
+ gd.mutation.done = true
return affected, err
}
@@ -96,6 +61,12 @@ type GroupDeleteOne struct {
gd *GroupDelete
}
+// Where appends a list predicates to the GroupDelete builder.
+func (gdo *GroupDeleteOne) Where(ps ...predicate.Group) *GroupDeleteOne {
+ gdo.gd.mutation.Where(ps...)
+ return gdo
+}
+
// Exec executes the deletion query.
func (gdo *GroupDeleteOne) Exec(ctx context.Context) error {
n, err := gdo.gd.Exec(ctx)
@@ -111,5 +82,7 @@ func (gdo *GroupDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (gdo *GroupDeleteOne) ExecX(ctx context.Context) {
- gdo.gd.ExecX(ctx)
+ if err := gdo.Exec(ctx); err != nil {
+ panic(err)
+ }
}
diff --git a/backend/internal/data/ent/group_query.go b/backend/internal/data/ent/group_query.go
index 796f81d..f17bd3b 100644
--- a/backend/internal/data/ent/group_query.go
+++ b/backend/internal/data/ent/group_query.go
@@ -18,6 +18,7 @@ import (
"github.com/hay-kot/homebox/backend/internal/data/ent/item"
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
)
@@ -25,11 +26,9 @@ import (
// GroupQuery is the builder for querying Group entities.
type GroupQuery struct {
config
- limit *int
- offset *int
- unique *bool
- order []OrderFunc
- fields []string
+ ctx *QueryContext
+ order []group.OrderOption
+ inters []Interceptor
predicates []predicate.Group
withUsers *UserQuery
withLocations *LocationQuery
@@ -37,6 +36,7 @@ type GroupQuery struct {
withLabels *LabelQuery
withDocuments *DocumentQuery
withInvitationTokens *GroupInvitationTokenQuery
+ withNotifiers *NotifierQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
@@ -48,34 +48,34 @@ func (gq *GroupQuery) Where(ps ...predicate.Group) *GroupQuery {
return gq
}
-// Limit adds a limit step to the query.
+// Limit the number of records to be returned by this query.
func (gq *GroupQuery) Limit(limit int) *GroupQuery {
- gq.limit = &limit
+ gq.ctx.Limit = &limit
return gq
}
-// Offset adds an offset step to the query.
+// Offset to start from.
func (gq *GroupQuery) Offset(offset int) *GroupQuery {
- gq.offset = &offset
+ gq.ctx.Offset = &offset
return gq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (gq *GroupQuery) Unique(unique bool) *GroupQuery {
- gq.unique = &unique
+ gq.ctx.Unique = &unique
return gq
}
-// Order adds an order step to the query.
-func (gq *GroupQuery) Order(o ...OrderFunc) *GroupQuery {
+// Order specifies how the records should be ordered.
+func (gq *GroupQuery) Order(o ...group.OrderOption) *GroupQuery {
gq.order = append(gq.order, o...)
return gq
}
// QueryUsers chains the current query on the "users" edge.
func (gq *GroupQuery) QueryUsers() *UserQuery {
- query := &UserQuery{config: gq.config}
+ query := (&UserClient{config: gq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := gq.prepareQuery(ctx); err != nil {
return nil, err
@@ -97,7 +97,7 @@ func (gq *GroupQuery) QueryUsers() *UserQuery {
// QueryLocations chains the current query on the "locations" edge.
func (gq *GroupQuery) QueryLocations() *LocationQuery {
- query := &LocationQuery{config: gq.config}
+ query := (&LocationClient{config: gq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := gq.prepareQuery(ctx); err != nil {
return nil, err
@@ -119,7 +119,7 @@ func (gq *GroupQuery) QueryLocations() *LocationQuery {
// QueryItems chains the current query on the "items" edge.
func (gq *GroupQuery) QueryItems() *ItemQuery {
- query := &ItemQuery{config: gq.config}
+ query := (&ItemClient{config: gq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := gq.prepareQuery(ctx); err != nil {
return nil, err
@@ -141,7 +141,7 @@ func (gq *GroupQuery) QueryItems() *ItemQuery {
// QueryLabels chains the current query on the "labels" edge.
func (gq *GroupQuery) QueryLabels() *LabelQuery {
- query := &LabelQuery{config: gq.config}
+ query := (&LabelClient{config: gq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := gq.prepareQuery(ctx); err != nil {
return nil, err
@@ -163,7 +163,7 @@ func (gq *GroupQuery) QueryLabels() *LabelQuery {
// QueryDocuments chains the current query on the "documents" edge.
func (gq *GroupQuery) QueryDocuments() *DocumentQuery {
- query := &DocumentQuery{config: gq.config}
+ query := (&DocumentClient{config: gq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := gq.prepareQuery(ctx); err != nil {
return nil, err
@@ -185,7 +185,7 @@ func (gq *GroupQuery) QueryDocuments() *DocumentQuery {
// QueryInvitationTokens chains the current query on the "invitation_tokens" edge.
func (gq *GroupQuery) QueryInvitationTokens() *GroupInvitationTokenQuery {
- query := &GroupInvitationTokenQuery{config: gq.config}
+ query := (&GroupInvitationTokenClient{config: gq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := gq.prepareQuery(ctx); err != nil {
return nil, err
@@ -205,10 +205,32 @@ func (gq *GroupQuery) QueryInvitationTokens() *GroupInvitationTokenQuery {
return query
}
+// QueryNotifiers chains the current query on the "notifiers" edge.
+func (gq *GroupQuery) QueryNotifiers() *NotifierQuery {
+ query := (&NotifierClient{config: gq.config}).Query()
+ query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+ if err := gq.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ selector := gq.sqlQuery(ctx)
+ if err := selector.Err(); err != nil {
+ return nil, err
+ }
+ step := sqlgraph.NewStep(
+ sqlgraph.From(group.Table, group.FieldID, selector),
+ sqlgraph.To(notifier.Table, notifier.FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, group.NotifiersTable, group.NotifiersColumn),
+ )
+ fromU = sqlgraph.SetNeighbors(gq.driver.Dialect(), step)
+ return fromU, nil
+ }
+ return query
+}
+
// First returns the first Group entity from the query.
// Returns a *NotFoundError when no Group was found.
func (gq *GroupQuery) First(ctx context.Context) (*Group, error) {
- nodes, err := gq.Limit(1).All(ctx)
+ nodes, err := gq.Limit(1).All(setContextOp(ctx, gq.ctx, "First"))
if err != nil {
return nil, err
}
@@ -231,7 +253,7 @@ func (gq *GroupQuery) FirstX(ctx context.Context) *Group {
// Returns a *NotFoundError when no Group ID was found.
func (gq *GroupQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = gq.Limit(1).IDs(ctx); err != nil {
+ if ids, err = gq.Limit(1).IDs(setContextOp(ctx, gq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@@ -254,7 +276,7 @@ func (gq *GroupQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one Group entity is found.
// Returns a *NotFoundError when no Group entities are found.
func (gq *GroupQuery) Only(ctx context.Context) (*Group, error) {
- nodes, err := gq.Limit(2).All(ctx)
+ nodes, err := gq.Limit(2).All(setContextOp(ctx, gq.ctx, "Only"))
if err != nil {
return nil, err
}
@@ -282,7 +304,7 @@ func (gq *GroupQuery) OnlyX(ctx context.Context) *Group {
// Returns a *NotFoundError when no entities are found.
func (gq *GroupQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = gq.Limit(2).IDs(ctx); err != nil {
+ if ids, err = gq.Limit(2).IDs(setContextOp(ctx, gq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@@ -307,10 +329,12 @@ func (gq *GroupQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Groups.
func (gq *GroupQuery) All(ctx context.Context) ([]*Group, error) {
+ ctx = setContextOp(ctx, gq.ctx, "All")
if err := gq.prepareQuery(ctx); err != nil {
return nil, err
}
- return gq.sqlAll(ctx)
+ qr := querierAll[[]*Group, *GroupQuery]()
+ return withInterceptors[[]*Group](ctx, gq, qr, gq.inters)
}
// AllX is like All, but panics if an error occurs.
@@ -323,9 +347,12 @@ func (gq *GroupQuery) AllX(ctx context.Context) []*Group {
}
// IDs executes the query and returns a list of Group IDs.
-func (gq *GroupQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
- var ids []uuid.UUID
- if err := gq.Select(group.FieldID).Scan(ctx, &ids); err != nil {
+func (gq *GroupQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
+ if gq.ctx.Unique == nil && gq.path != nil {
+ gq.Unique(true)
+ }
+ ctx = setContextOp(ctx, gq.ctx, "IDs")
+ if err = gq.Select(group.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@@ -342,10 +369,11 @@ func (gq *GroupQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query.
func (gq *GroupQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, gq.ctx, "Count")
if err := gq.prepareQuery(ctx); err != nil {
return 0, err
}
- return gq.sqlCount(ctx)
+ return withInterceptors[int](ctx, gq, querierCount[*GroupQuery](), gq.inters)
}
// CountX is like Count, but panics if an error occurs.
@@ -359,10 +387,15 @@ func (gq *GroupQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (gq *GroupQuery) Exist(ctx context.Context) (bool, error) {
- if err := gq.prepareQuery(ctx); err != nil {
- return false, err
+ ctx = setContextOp(ctx, gq.ctx, "Exist")
+ switch _, err := gq.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
}
- return gq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@@ -382,9 +415,9 @@ func (gq *GroupQuery) Clone() *GroupQuery {
}
return &GroupQuery{
config: gq.config,
- limit: gq.limit,
- offset: gq.offset,
- order: append([]OrderFunc{}, gq.order...),
+ ctx: gq.ctx.Clone(),
+ order: append([]group.OrderOption{}, gq.order...),
+ inters: append([]Interceptor{}, gq.inters...),
predicates: append([]predicate.Group{}, gq.predicates...),
withUsers: gq.withUsers.Clone(),
withLocations: gq.withLocations.Clone(),
@@ -392,17 +425,17 @@ func (gq *GroupQuery) Clone() *GroupQuery {
withLabels: gq.withLabels.Clone(),
withDocuments: gq.withDocuments.Clone(),
withInvitationTokens: gq.withInvitationTokens.Clone(),
+ withNotifiers: gq.withNotifiers.Clone(),
// clone intermediate query.
- sql: gq.sql.Clone(),
- path: gq.path,
- unique: gq.unique,
+ sql: gq.sql.Clone(),
+ path: gq.path,
}
}
// WithUsers tells the query-builder to eager-load the nodes that are connected to
// the "users" edge. The optional arguments are used to configure the query builder of the edge.
func (gq *GroupQuery) WithUsers(opts ...func(*UserQuery)) *GroupQuery {
- query := &UserQuery{config: gq.config}
+ query := (&UserClient{config: gq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -413,7 +446,7 @@ func (gq *GroupQuery) WithUsers(opts ...func(*UserQuery)) *GroupQuery {
// WithLocations tells the query-builder to eager-load the nodes that are connected to
// the "locations" edge. The optional arguments are used to configure the query builder of the edge.
func (gq *GroupQuery) WithLocations(opts ...func(*LocationQuery)) *GroupQuery {
- query := &LocationQuery{config: gq.config}
+ query := (&LocationClient{config: gq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -424,7 +457,7 @@ func (gq *GroupQuery) WithLocations(opts ...func(*LocationQuery)) *GroupQuery {
// WithItems tells the query-builder to eager-load the nodes that are connected to
// the "items" edge. The optional arguments are used to configure the query builder of the edge.
func (gq *GroupQuery) WithItems(opts ...func(*ItemQuery)) *GroupQuery {
- query := &ItemQuery{config: gq.config}
+ query := (&ItemClient{config: gq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -435,7 +468,7 @@ func (gq *GroupQuery) WithItems(opts ...func(*ItemQuery)) *GroupQuery {
// WithLabels tells the query-builder to eager-load the nodes that are connected to
// the "labels" edge. The optional arguments are used to configure the query builder of the edge.
func (gq *GroupQuery) WithLabels(opts ...func(*LabelQuery)) *GroupQuery {
- query := &LabelQuery{config: gq.config}
+ query := (&LabelClient{config: gq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -446,7 +479,7 @@ func (gq *GroupQuery) WithLabels(opts ...func(*LabelQuery)) *GroupQuery {
// WithDocuments tells the query-builder to eager-load the nodes that are connected to
// the "documents" edge. The optional arguments are used to configure the query builder of the edge.
func (gq *GroupQuery) WithDocuments(opts ...func(*DocumentQuery)) *GroupQuery {
- query := &DocumentQuery{config: gq.config}
+ query := (&DocumentClient{config: gq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -457,7 +490,7 @@ func (gq *GroupQuery) WithDocuments(opts ...func(*DocumentQuery)) *GroupQuery {
// WithInvitationTokens tells the query-builder to eager-load the nodes that are connected to
// the "invitation_tokens" edge. The optional arguments are used to configure the query builder of the edge.
func (gq *GroupQuery) WithInvitationTokens(opts ...func(*GroupInvitationTokenQuery)) *GroupQuery {
- query := &GroupInvitationTokenQuery{config: gq.config}
+ query := (&GroupInvitationTokenClient{config: gq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -465,6 +498,17 @@ func (gq *GroupQuery) WithInvitationTokens(opts ...func(*GroupInvitationTokenQue
return gq
}
+// WithNotifiers tells the query-builder to eager-load the nodes that are connected to
+// the "notifiers" edge. The optional arguments are used to configure the query builder of the edge.
+func (gq *GroupQuery) WithNotifiers(opts ...func(*NotifierQuery)) *GroupQuery {
+ query := (&NotifierClient{config: gq.config}).Query()
+ for _, opt := range opts {
+ opt(query)
+ }
+ gq.withNotifiers = query
+ return gq
+}
+
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
@@ -480,16 +524,11 @@ func (gq *GroupQuery) WithInvitationTokens(opts ...func(*GroupInvitationTokenQue
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (gq *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy {
- grbuild := &GroupGroupBy{config: gq.config}
- grbuild.fields = append([]string{field}, fields...)
- grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
- if err := gq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- return gq.sqlQuery(ctx), nil
- }
+ gq.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &GroupGroupBy{build: gq}
+ grbuild.flds = &gq.ctx.Fields
grbuild.label = group.Label
- grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
+ grbuild.scan = grbuild.Scan
return grbuild
}
@@ -506,15 +545,30 @@ func (gq *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy {
// Select(group.FieldCreatedAt).
// Scan(ctx, &v)
func (gq *GroupQuery) Select(fields ...string) *GroupSelect {
- gq.fields = append(gq.fields, fields...)
- selbuild := &GroupSelect{GroupQuery: gq}
- selbuild.label = group.Label
- selbuild.flds, selbuild.scan = &gq.fields, selbuild.Scan
- return selbuild
+ gq.ctx.Fields = append(gq.ctx.Fields, fields...)
+ sbuild := &GroupSelect{GroupQuery: gq}
+ sbuild.label = group.Label
+ sbuild.flds, sbuild.scan = &gq.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a GroupSelect configured with the given aggregations.
+func (gq *GroupQuery) Aggregate(fns ...AggregateFunc) *GroupSelect {
+ return gq.Select().Aggregate(fns...)
}
func (gq *GroupQuery) prepareQuery(ctx context.Context) error {
- for _, f := range gq.fields {
+ for _, inter := range gq.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, gq); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range gq.ctx.Fields {
if !group.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@@ -533,13 +587,14 @@ func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group,
var (
nodes = []*Group{}
_spec = gq.querySpec()
- loadedTypes = [6]bool{
+ loadedTypes = [7]bool{
gq.withUsers != nil,
gq.withLocations != nil,
gq.withItems != nil,
gq.withLabels != nil,
gq.withDocuments != nil,
gq.withInvitationTokens != nil,
+ gq.withNotifiers != nil,
}
)
_spec.ScanValues = func(columns []string) ([]any, error) {
@@ -604,6 +659,13 @@ func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group,
return nil, err
}
}
+ if query := gq.withNotifiers; query != nil {
+ if err := gq.loadNotifiers(ctx, query, nodes,
+ func(n *Group) { n.Edges.Notifiers = []*Notifier{} },
+ func(n *Group, e *Notifier) { n.Edges.Notifiers = append(n.Edges.Notifiers, e) }); err != nil {
+ return nil, err
+ }
+ }
return nodes, nil
}
@@ -619,7 +681,7 @@ func (gq *GroupQuery) loadUsers(ctx context.Context, query *UserQuery, nodes []*
}
query.withFKs = true
query.Where(predicate.User(func(s *sql.Selector) {
- s.Where(sql.InValues(group.UsersColumn, fks...))
+ s.Where(sql.InValues(s.C(group.UsersColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -632,7 +694,7 @@ func (gq *GroupQuery) loadUsers(ctx context.Context, query *UserQuery, nodes []*
}
node, ok := nodeids[*fk]
if !ok {
- return fmt.Errorf(`unexpected foreign-key "group_users" returned %v for node %v`, *fk, n.ID)
+ return fmt.Errorf(`unexpected referenced foreign-key "group_users" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -650,7 +712,7 @@ func (gq *GroupQuery) loadLocations(ctx context.Context, query *LocationQuery, n
}
query.withFKs = true
query.Where(predicate.Location(func(s *sql.Selector) {
- s.Where(sql.InValues(group.LocationsColumn, fks...))
+ s.Where(sql.InValues(s.C(group.LocationsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -663,7 +725,7 @@ func (gq *GroupQuery) loadLocations(ctx context.Context, query *LocationQuery, n
}
node, ok := nodeids[*fk]
if !ok {
- return fmt.Errorf(`unexpected foreign-key "group_locations" returned %v for node %v`, *fk, n.ID)
+ return fmt.Errorf(`unexpected referenced foreign-key "group_locations" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -681,7 +743,7 @@ func (gq *GroupQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*
}
query.withFKs = true
query.Where(predicate.Item(func(s *sql.Selector) {
- s.Where(sql.InValues(group.ItemsColumn, fks...))
+ s.Where(sql.InValues(s.C(group.ItemsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -694,7 +756,7 @@ func (gq *GroupQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*
}
node, ok := nodeids[*fk]
if !ok {
- return fmt.Errorf(`unexpected foreign-key "group_items" returned %v for node %v`, *fk, n.ID)
+ return fmt.Errorf(`unexpected referenced foreign-key "group_items" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -712,7 +774,7 @@ func (gq *GroupQuery) loadLabels(ctx context.Context, query *LabelQuery, nodes [
}
query.withFKs = true
query.Where(predicate.Label(func(s *sql.Selector) {
- s.Where(sql.InValues(group.LabelsColumn, fks...))
+ s.Where(sql.InValues(s.C(group.LabelsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -725,7 +787,7 @@ func (gq *GroupQuery) loadLabels(ctx context.Context, query *LabelQuery, nodes [
}
node, ok := nodeids[*fk]
if !ok {
- return fmt.Errorf(`unexpected foreign-key "group_labels" returned %v for node %v`, *fk, n.ID)
+ return fmt.Errorf(`unexpected referenced foreign-key "group_labels" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -743,7 +805,7 @@ func (gq *GroupQuery) loadDocuments(ctx context.Context, query *DocumentQuery, n
}
query.withFKs = true
query.Where(predicate.Document(func(s *sql.Selector) {
- s.Where(sql.InValues(group.DocumentsColumn, fks...))
+ s.Where(sql.InValues(s.C(group.DocumentsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -756,7 +818,7 @@ func (gq *GroupQuery) loadDocuments(ctx context.Context, query *DocumentQuery, n
}
node, ok := nodeids[*fk]
if !ok {
- return fmt.Errorf(`unexpected foreign-key "group_documents" returned %v for node %v`, *fk, n.ID)
+ return fmt.Errorf(`unexpected referenced foreign-key "group_documents" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -774,7 +836,7 @@ func (gq *GroupQuery) loadInvitationTokens(ctx context.Context, query *GroupInvi
}
query.withFKs = true
query.Where(predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.InValues(group.InvitationTokensColumn, fks...))
+ s.Where(sql.InValues(s.C(group.InvitationTokensColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -787,7 +849,37 @@ func (gq *GroupQuery) loadInvitationTokens(ctx context.Context, query *GroupInvi
}
node, ok := nodeids[*fk]
if !ok {
- return fmt.Errorf(`unexpected foreign-key "group_invitation_tokens" returned %v for node %v`, *fk, n.ID)
+ return fmt.Errorf(`unexpected referenced foreign-key "group_invitation_tokens" returned %v for node %v`, *fk, n.ID)
+ }
+ assign(node, n)
+ }
+ return nil
+}
+func (gq *GroupQuery) loadNotifiers(ctx context.Context, query *NotifierQuery, nodes []*Group, init func(*Group), assign func(*Group, *Notifier)) error {
+ fks := make([]driver.Value, 0, len(nodes))
+ nodeids := make(map[uuid.UUID]*Group)
+ for i := range nodes {
+ fks = append(fks, nodes[i].ID)
+ nodeids[nodes[i].ID] = nodes[i]
+ if init != nil {
+ init(nodes[i])
+ }
+ }
+ if len(query.ctx.Fields) > 0 {
+ query.ctx.AppendFieldOnce(notifier.FieldGroupID)
+ }
+ query.Where(predicate.Notifier(func(s *sql.Selector) {
+ s.Where(sql.InValues(s.C(group.NotifiersColumn), fks...))
+ }))
+ neighbors, err := query.All(ctx)
+ if err != nil {
+ return err
+ }
+ for _, n := range neighbors {
+ fk := n.GroupID
+ node, ok := nodeids[fk]
+ if !ok {
+ return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}
@@ -796,41 +888,22 @@ func (gq *GroupQuery) loadInvitationTokens(ctx context.Context, query *GroupInvi
func (gq *GroupQuery) sqlCount(ctx context.Context) (int, error) {
_spec := gq.querySpec()
- _spec.Node.Columns = gq.fields
- if len(gq.fields) > 0 {
- _spec.Unique = gq.unique != nil && *gq.unique
+ _spec.Node.Columns = gq.ctx.Fields
+ if len(gq.ctx.Fields) > 0 {
+ _spec.Unique = gq.ctx.Unique != nil && *gq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, gq.driver, _spec)
}
-func (gq *GroupQuery) sqlExist(ctx context.Context) (bool, error) {
- switch _, err := gq.FirstID(ctx); {
- case IsNotFound(err):
- return false, nil
- case err != nil:
- return false, fmt.Errorf("ent: check existence: %w", err)
- default:
- return true, nil
- }
-}
-
func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec {
- _spec := &sqlgraph.QuerySpec{
- Node: &sqlgraph.NodeSpec{
- Table: group.Table,
- Columns: group.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
- },
- From: gq.sql,
- Unique: true,
- }
- if unique := gq.unique; unique != nil {
+ _spec := sqlgraph.NewQuerySpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID))
+ _spec.From = gq.sql
+ if unique := gq.ctx.Unique; unique != nil {
_spec.Unique = *unique
+ } else if gq.path != nil {
+ _spec.Unique = true
}
- if fields := gq.fields; len(fields) > 0 {
+ if fields := gq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, group.FieldID)
for i := range fields {
@@ -846,10 +919,10 @@ func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
- if limit := gq.limit; limit != nil {
+ if limit := gq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
- if offset := gq.offset; offset != nil {
+ if offset := gq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := gq.order; len(ps) > 0 {
@@ -865,7 +938,7 @@ func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec {
func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(gq.driver.Dialect())
t1 := builder.Table(group.Table)
- columns := gq.fields
+ columns := gq.ctx.Fields
if len(columns) == 0 {
columns = group.Columns
}
@@ -874,7 +947,7 @@ func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = gq.sql
selector.Select(selector.Columns(columns...)...)
}
- if gq.unique != nil && *gq.unique {
+ if gq.ctx.Unique != nil && *gq.ctx.Unique {
selector.Distinct()
}
for _, p := range gq.predicates {
@@ -883,12 +956,12 @@ func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range gq.order {
p(selector)
}
- if offset := gq.offset; offset != nil {
+ if offset := gq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
- if limit := gq.limit; limit != nil {
+ if limit := gq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@@ -896,13 +969,8 @@ func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
// GroupGroupBy is the group-by builder for Group entities.
type GroupGroupBy struct {
- config
selector
- fields []string
- fns []AggregateFunc
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
- path func(context.Context) (*sql.Selector, error)
+ build *GroupQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@@ -911,74 +979,77 @@ func (ggb *GroupGroupBy) Aggregate(fns ...AggregateFunc) *GroupGroupBy {
return ggb
}
-// Scan applies the group-by query and scans the result into the given value.
+// Scan applies the selector query and scans the result into the given value.
func (ggb *GroupGroupBy) Scan(ctx context.Context, v any) error {
- query, err := ggb.path(ctx)
- if err != nil {
+ ctx = setContextOp(ctx, ggb.build.ctx, "GroupBy")
+ if err := ggb.build.prepareQuery(ctx); err != nil {
return err
}
- ggb.sql = query
- return ggb.sqlScan(ctx, v)
+ return scanWithInterceptors[*GroupQuery, *GroupGroupBy](ctx, ggb.build, ggb, ggb.build.inters, v)
}
-func (ggb *GroupGroupBy) sqlScan(ctx context.Context, v any) error {
- for _, f := range ggb.fields {
- if !group.ValidColumn(f) {
- return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
- }
- }
- selector := ggb.sqlQuery()
- if err := selector.Err(); err != nil {
- return err
- }
- rows := &sql.Rows{}
- query, args := selector.Query()
- if err := ggb.driver.Query(ctx, query, args, rows); err != nil {
- return err
- }
- defer rows.Close()
- return sql.ScanSlice(rows, v)
-}
-
-func (ggb *GroupGroupBy) sqlQuery() *sql.Selector {
- selector := ggb.sql.Select()
+func (ggb *GroupGroupBy) sqlScan(ctx context.Context, root *GroupQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(ggb.fns))
for _, fn := range ggb.fns {
aggregation = append(aggregation, fn(selector))
}
- // If no columns were selected in a custom aggregation function, the default
- // selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
- columns := make([]string, 0, len(ggb.fields)+len(ggb.fns))
- for _, f := range ggb.fields {
+ columns := make([]string, 0, len(*ggb.flds)+len(ggb.fns))
+ for _, f := range *ggb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
- return selector.GroupBy(selector.Columns(ggb.fields...)...)
+ selector.GroupBy(selector.Columns(*ggb.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := ggb.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
}
// GroupSelect is the builder for selecting fields of Group entities.
type GroupSelect struct {
*GroupQuery
selector
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (gs *GroupSelect) Aggregate(fns ...AggregateFunc) *GroupSelect {
+ gs.fns = append(gs.fns, fns...)
+ return gs
}
// Scan applies the selector query and scans the result into the given value.
func (gs *GroupSelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, gs.ctx, "Select")
if err := gs.prepareQuery(ctx); err != nil {
return err
}
- gs.sql = gs.GroupQuery.sqlQuery(ctx)
- return gs.sqlScan(ctx, v)
+ return scanWithInterceptors[*GroupQuery, *GroupSelect](ctx, gs.GroupQuery, gs, gs.inters, v)
}
-func (gs *GroupSelect) sqlScan(ctx context.Context, v any) error {
+func (gs *GroupSelect) sqlScan(ctx context.Context, root *GroupQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(gs.fns))
+ for _, fn := range gs.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*gs.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
rows := &sql.Rows{}
- query, args := gs.sql.Query()
+ query, args := selector.Query()
if err := gs.driver.Query(ctx, query, args, rows); err != nil {
return err
}
diff --git a/backend/internal/data/ent/group_update.go b/backend/internal/data/ent/group_update.go
index bc4dbf9..fdb11a3 100644
--- a/backend/internal/data/ent/group_update.go
+++ b/backend/internal/data/ent/group_update.go
@@ -18,6 +18,7 @@ import (
"github.com/hay-kot/homebox/backend/internal/data/ent/item"
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
)
@@ -47,16 +48,24 @@ func (gu *GroupUpdate) SetName(s string) *GroupUpdate {
return gu
}
+// SetNillableName sets the "name" field if the given value is not nil.
+func (gu *GroupUpdate) SetNillableName(s *string) *GroupUpdate {
+ if s != nil {
+ gu.SetName(*s)
+ }
+ return gu
+}
+
// SetCurrency sets the "currency" field.
-func (gu *GroupUpdate) SetCurrency(gr group.Currency) *GroupUpdate {
- gu.mutation.SetCurrency(gr)
+func (gu *GroupUpdate) SetCurrency(s string) *GroupUpdate {
+ gu.mutation.SetCurrency(s)
return gu
}
// SetNillableCurrency sets the "currency" field if the given value is not nil.
-func (gu *GroupUpdate) SetNillableCurrency(gr *group.Currency) *GroupUpdate {
- if gr != nil {
- gu.SetCurrency(*gr)
+func (gu *GroupUpdate) SetNillableCurrency(s *string) *GroupUpdate {
+ if s != nil {
+ gu.SetCurrency(*s)
}
return gu
}
@@ -151,6 +160,21 @@ func (gu *GroupUpdate) AddInvitationTokens(g ...*GroupInvitationToken) *GroupUpd
return gu.AddInvitationTokenIDs(ids...)
}
+// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs.
+func (gu *GroupUpdate) AddNotifierIDs(ids ...uuid.UUID) *GroupUpdate {
+ gu.mutation.AddNotifierIDs(ids...)
+ return gu
+}
+
+// AddNotifiers adds the "notifiers" edges to the Notifier entity.
+func (gu *GroupUpdate) AddNotifiers(n ...*Notifier) *GroupUpdate {
+ ids := make([]uuid.UUID, len(n))
+ for i := range n {
+ ids[i] = n[i].ID
+ }
+ return gu.AddNotifierIDs(ids...)
+}
+
// Mutation returns the GroupMutation object of the builder.
func (gu *GroupUpdate) Mutation() *GroupMutation {
return gu.mutation
@@ -282,43 +306,31 @@ func (gu *GroupUpdate) RemoveInvitationTokens(g ...*GroupInvitationToken) *Group
return gu.RemoveInvitationTokenIDs(ids...)
}
+// ClearNotifiers clears all "notifiers" edges to the Notifier entity.
+func (gu *GroupUpdate) ClearNotifiers() *GroupUpdate {
+ gu.mutation.ClearNotifiers()
+ return gu
+}
+
+// RemoveNotifierIDs removes the "notifiers" edge to Notifier entities by IDs.
+func (gu *GroupUpdate) RemoveNotifierIDs(ids ...uuid.UUID) *GroupUpdate {
+ gu.mutation.RemoveNotifierIDs(ids...)
+ return gu
+}
+
+// RemoveNotifiers removes "notifiers" edges to Notifier entities.
+func (gu *GroupUpdate) RemoveNotifiers(n ...*Notifier) *GroupUpdate {
+ ids := make([]uuid.UUID, len(n))
+ for i := range n {
+ ids[i] = n[i].ID
+ }
+ return gu.RemoveNotifierIDs(ids...)
+}
+
// Save executes the query and returns the number of nodes affected by the update operation.
func (gu *GroupUpdate) Save(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
gu.defaults()
- if len(gu.hooks) == 0 {
- if err = gu.check(); err != nil {
- return 0, err
- }
- affected, err = gu.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*GroupMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = gu.check(); err != nil {
- return 0, err
- }
- gu.mutation = mutation
- affected, err = gu.sqlSave(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(gu.hooks) - 1; i >= 0; i-- {
- if gu.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = gu.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, gu.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, gu.sqlSave, gu.mutation, gu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -358,25 +370,14 @@ func (gu *GroupUpdate) check() error {
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Group.name": %w`, err)}
}
}
- if v, ok := gu.mutation.Currency(); ok {
- if err := group.CurrencyValidator(v); err != nil {
- return &ValidationError{Name: "currency", err: fmt.Errorf(`ent: validator failed for field "Group.currency": %w`, err)}
- }
- }
return nil
}
func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: group.Table,
- Columns: group.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
- },
+ if err := gu.check(); err != nil {
+ return n, err
}
+ _spec := sqlgraph.NewUpdateSpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID))
if ps := gu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -385,25 +386,13 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := gu.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: group.FieldUpdatedAt,
- })
+ _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := gu.mutation.Name(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: group.FieldName,
- })
+ _spec.SetField(group.FieldName, field.TypeString, value)
}
if value, ok := gu.mutation.Currency(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeEnum,
- Value: value,
- Column: group.FieldCurrency,
- })
+ _spec.SetField(group.FieldCurrency, field.TypeString, value)
}
if gu.mutation.UsersCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -413,10 +402,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -429,10 +415,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -448,10 +431,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -467,10 +447,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -483,10 +460,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -502,10 +476,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -521,10 +492,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -537,10 +505,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -556,10 +521,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -575,10 +537,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -591,10 +550,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -610,10 +566,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -629,10 +582,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -645,10 +595,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -664,10 +611,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -683,10 +627,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: groupinvitationtoken.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -699,10 +640,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: groupinvitationtoken.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -718,10 +656,52 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: groupinvitationtoken.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if gu.mutation.NotifiersCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.NotifiersTable,
+ Columns: []string{group.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := gu.mutation.RemovedNotifiersIDs(); len(nodes) > 0 && !gu.mutation.NotifiersCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.NotifiersTable,
+ Columns: []string{group.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := gu.mutation.NotifiersIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.NotifiersTable,
+ Columns: []string{group.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -737,6 +717,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
+ gu.mutation.done = true
return n, nil
}
@@ -760,16 +741,24 @@ func (guo *GroupUpdateOne) SetName(s string) *GroupUpdateOne {
return guo
}
+// SetNillableName sets the "name" field if the given value is not nil.
+func (guo *GroupUpdateOne) SetNillableName(s *string) *GroupUpdateOne {
+ if s != nil {
+ guo.SetName(*s)
+ }
+ return guo
+}
+
// SetCurrency sets the "currency" field.
-func (guo *GroupUpdateOne) SetCurrency(gr group.Currency) *GroupUpdateOne {
- guo.mutation.SetCurrency(gr)
+func (guo *GroupUpdateOne) SetCurrency(s string) *GroupUpdateOne {
+ guo.mutation.SetCurrency(s)
return guo
}
// SetNillableCurrency sets the "currency" field if the given value is not nil.
-func (guo *GroupUpdateOne) SetNillableCurrency(gr *group.Currency) *GroupUpdateOne {
- if gr != nil {
- guo.SetCurrency(*gr)
+func (guo *GroupUpdateOne) SetNillableCurrency(s *string) *GroupUpdateOne {
+ if s != nil {
+ guo.SetCurrency(*s)
}
return guo
}
@@ -864,6 +853,21 @@ func (guo *GroupUpdateOne) AddInvitationTokens(g ...*GroupInvitationToken) *Grou
return guo.AddInvitationTokenIDs(ids...)
}
+// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs.
+func (guo *GroupUpdateOne) AddNotifierIDs(ids ...uuid.UUID) *GroupUpdateOne {
+ guo.mutation.AddNotifierIDs(ids...)
+ return guo
+}
+
+// AddNotifiers adds the "notifiers" edges to the Notifier entity.
+func (guo *GroupUpdateOne) AddNotifiers(n ...*Notifier) *GroupUpdateOne {
+ ids := make([]uuid.UUID, len(n))
+ for i := range n {
+ ids[i] = n[i].ID
+ }
+ return guo.AddNotifierIDs(ids...)
+}
+
// Mutation returns the GroupMutation object of the builder.
func (guo *GroupUpdateOne) Mutation() *GroupMutation {
return guo.mutation
@@ -995,6 +999,33 @@ func (guo *GroupUpdateOne) RemoveInvitationTokens(g ...*GroupInvitationToken) *G
return guo.RemoveInvitationTokenIDs(ids...)
}
+// ClearNotifiers clears all "notifiers" edges to the Notifier entity.
+func (guo *GroupUpdateOne) ClearNotifiers() *GroupUpdateOne {
+ guo.mutation.ClearNotifiers()
+ return guo
+}
+
+// RemoveNotifierIDs removes the "notifiers" edge to Notifier entities by IDs.
+func (guo *GroupUpdateOne) RemoveNotifierIDs(ids ...uuid.UUID) *GroupUpdateOne {
+ guo.mutation.RemoveNotifierIDs(ids...)
+ return guo
+}
+
+// RemoveNotifiers removes "notifiers" edges to Notifier entities.
+func (guo *GroupUpdateOne) RemoveNotifiers(n ...*Notifier) *GroupUpdateOne {
+ ids := make([]uuid.UUID, len(n))
+ for i := range n {
+ ids[i] = n[i].ID
+ }
+ return guo.RemoveNotifierIDs(ids...)
+}
+
+// Where appends a list predicates to the GroupUpdate builder.
+func (guo *GroupUpdateOne) Where(ps ...predicate.Group) *GroupUpdateOne {
+ guo.mutation.Where(ps...)
+ return guo
+}
+
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (guo *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOne {
@@ -1004,47 +1035,8 @@ func (guo *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOn
// Save executes the query and returns the updated Group entity.
func (guo *GroupUpdateOne) Save(ctx context.Context) (*Group, error) {
- var (
- err error
- node *Group
- )
guo.defaults()
- if len(guo.hooks) == 0 {
- if err = guo.check(); err != nil {
- return nil, err
- }
- node, err = guo.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*GroupMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = guo.check(); err != nil {
- return nil, err
- }
- guo.mutation = mutation
- node, err = guo.sqlSave(ctx)
- mutation.done = true
- return node, err
- })
- for i := len(guo.hooks) - 1; i >= 0; i-- {
- if guo.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = guo.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, guo.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*Group)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from GroupMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, guo.sqlSave, guo.mutation, guo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -1084,25 +1076,14 @@ func (guo *GroupUpdateOne) check() error {
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Group.name": %w`, err)}
}
}
- if v, ok := guo.mutation.Currency(); ok {
- if err := group.CurrencyValidator(v); err != nil {
- return &ValidationError{Name: "currency", err: fmt.Errorf(`ent: validator failed for field "Group.currency": %w`, err)}
- }
- }
return nil
}
func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: group.Table,
- Columns: group.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
- },
+ if err := guo.check(); err != nil {
+ return _node, err
}
+ _spec := sqlgraph.NewUpdateSpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID))
id, ok := guo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Group.id" for update`)}
@@ -1128,25 +1109,13 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
}
}
if value, ok := guo.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: group.FieldUpdatedAt,
- })
+ _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := guo.mutation.Name(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: group.FieldName,
- })
+ _spec.SetField(group.FieldName, field.TypeString, value)
}
if value, ok := guo.mutation.Currency(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeEnum,
- Value: value,
- Column: group.FieldCurrency,
- })
+ _spec.SetField(group.FieldCurrency, field.TypeString, value)
}
if guo.mutation.UsersCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -1156,10 +1125,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1172,10 +1138,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1191,10 +1154,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1210,10 +1170,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1226,10 +1183,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1245,10 +1199,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1264,10 +1215,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1280,10 +1228,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1299,10 +1244,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1318,10 +1260,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1334,10 +1273,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1353,10 +1289,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1372,10 +1305,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1388,10 +1318,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1407,10 +1334,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: document.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1426,10 +1350,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: groupinvitationtoken.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1442,10 +1363,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: groupinvitationtoken.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1461,10 +1379,52 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: groupinvitationtoken.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if guo.mutation.NotifiersCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.NotifiersTable,
+ Columns: []string{group.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := guo.mutation.RemovedNotifiersIDs(); len(nodes) > 0 && !guo.mutation.NotifiersCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.NotifiersTable,
+ Columns: []string{group.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := guo.mutation.NotifiersIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.NotifiersTable,
+ Columns: []string{group.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1483,5 +1443,6 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
}
return nil, err
}
+ guo.mutation.done = true
return _node, nil
}
diff --git a/backend/internal/data/ent/groupinvitationtoken.go b/backend/internal/data/ent/groupinvitationtoken.go
index 248e40f..d715cc6 100644
--- a/backend/internal/data/ent/groupinvitationtoken.go
+++ b/backend/internal/data/ent/groupinvitationtoken.go
@@ -7,6 +7,7 @@ import (
"strings"
"time"
+ "entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
@@ -32,6 +33,7 @@ type GroupInvitationToken struct {
// The values are being populated by the GroupInvitationTokenQuery when eager-loading is set.
Edges GroupInvitationTokenEdges `json:"edges"`
group_invitation_tokens *uuid.UUID
+ selectValues sql.SelectValues
}
// GroupInvitationTokenEdges holds the relations/edges for other nodes in the graph.
@@ -72,7 +74,7 @@ func (*GroupInvitationToken) scanValues(columns []string) ([]any, error) {
case groupinvitationtoken.ForeignKeys[0]: // group_invitation_tokens
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
- return nil, fmt.Errorf("unexpected column %q for type GroupInvitationToken", columns[i])
+ values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -129,21 +131,29 @@ func (git *GroupInvitationToken) assignValues(columns []string, values []any) er
git.group_invitation_tokens = new(uuid.UUID)
*git.group_invitation_tokens = *value.S.(*uuid.UUID)
}
+ default:
+ git.selectValues.Set(columns[i], values[i])
}
}
return nil
}
+// Value returns the ent.Value that was dynamically selected and assigned to the GroupInvitationToken.
+// This includes values selected through modifiers, order, etc.
+func (git *GroupInvitationToken) Value(name string) (ent.Value, error) {
+ return git.selectValues.Get(name)
+}
+
// QueryGroup queries the "group" edge of the GroupInvitationToken entity.
func (git *GroupInvitationToken) QueryGroup() *GroupQuery {
- return (&GroupInvitationTokenClient{config: git.config}).QueryGroup(git)
+ return NewGroupInvitationTokenClient(git.config).QueryGroup(git)
}
// Update returns a builder for updating this GroupInvitationToken.
// Note that you need to call GroupInvitationToken.Unwrap() before calling this method if this GroupInvitationToken
// was returned from a transaction, and the transaction was committed or rolled back.
func (git *GroupInvitationToken) Update() *GroupInvitationTokenUpdateOne {
- return (&GroupInvitationTokenClient{config: git.config}).UpdateOne(git)
+ return NewGroupInvitationTokenClient(git.config).UpdateOne(git)
}
// Unwrap unwraps the GroupInvitationToken entity that was returned from a transaction after it was closed,
@@ -182,9 +192,3 @@ func (git *GroupInvitationToken) String() string {
// GroupInvitationTokens is a parsable slice of GroupInvitationToken.
type GroupInvitationTokens []*GroupInvitationToken
-
-func (git GroupInvitationTokens) config(cfg config) {
- for _i := range git {
- git[_i].config = cfg
- }
-}
diff --git a/backend/internal/data/ent/groupinvitationtoken/groupinvitationtoken.go b/backend/internal/data/ent/groupinvitationtoken/groupinvitationtoken.go
index 1daea17..748d739 100644
--- a/backend/internal/data/ent/groupinvitationtoken/groupinvitationtoken.go
+++ b/backend/internal/data/ent/groupinvitationtoken/groupinvitationtoken.go
@@ -5,6 +5,8 @@ package groupinvitationtoken
import (
"time"
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -81,3 +83,45 @@ var (
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)
+
+// OrderOption defines the ordering options for the GroupInvitationToken queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByExpiresAt orders the results by the expires_at field.
+func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
+}
+
+// ByUses orders the results by the uses field.
+func ByUses(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUses, opts...).ToFunc()
+}
+
+// ByGroupField orders the results by group field.
+func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
+ }
+}
+func newGroupStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(GroupInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
+ )
+}
diff --git a/backend/internal/data/ent/groupinvitationtoken/where.go b/backend/internal/data/ent/groupinvitationtoken/where.go
index bf4ccaa..d462df0 100644
--- a/backend/internal/data/ent/groupinvitationtoken/where.go
+++ b/backend/internal/data/ent/groupinvitationtoken/where.go
@@ -13,428 +13,272 @@ import (
// ID filters vertices based on their ID field.
func ID(id uuid.UUID) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.GroupInvitationToken(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id uuid.UUID) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.GroupInvitationToken(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id uuid.UUID) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldID), id))
- })
+ return predicate.GroupInvitationToken(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...uuid.UUID) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.In(s.C(FieldID), v...))
- })
+ return predicate.GroupInvitationToken(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...uuid.UUID) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.NotIn(s.C(FieldID), v...))
- })
+ return predicate.GroupInvitationToken(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id uuid.UUID) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldID), id))
- })
+ return predicate.GroupInvitationToken(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id uuid.UUID) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldID), id))
- })
+ return predicate.GroupInvitationToken(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id uuid.UUID) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldID), id))
- })
+ return predicate.GroupInvitationToken(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id uuid.UUID) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldID), id))
- })
+ return predicate.GroupInvitationToken(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldEQ(FieldUpdatedAt, v))
}
// Token applies equality check predicate on the "token" field. It's identical to TokenEQ.
func Token(v []byte) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldToken), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldEQ(FieldToken, v))
}
// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ.
func ExpiresAt(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldExpiresAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldEQ(FieldExpiresAt, v))
}
// Uses applies equality check predicate on the "uses" field. It's identical to UsesEQ.
func Uses(v int) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUses), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldEQ(FieldUses, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.GroupInvitationToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldCreatedAt), v...))
- })
+ return predicate.GroupInvitationToken(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.GroupInvitationToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
- })
+ return predicate.GroupInvitationToken(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldCreatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldCreatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.GroupInvitationToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.GroupInvitationToken(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.GroupInvitationToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.GroupInvitationToken(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldLTE(FieldUpdatedAt, v))
}
// TokenEQ applies the EQ predicate on the "token" field.
func TokenEQ(v []byte) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldToken), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldEQ(FieldToken, v))
}
// TokenNEQ applies the NEQ predicate on the "token" field.
func TokenNEQ(v []byte) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldToken), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldNEQ(FieldToken, v))
}
// TokenIn applies the In predicate on the "token" field.
func TokenIn(vs ...[]byte) predicate.GroupInvitationToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldToken), v...))
- })
+ return predicate.GroupInvitationToken(sql.FieldIn(FieldToken, vs...))
}
// TokenNotIn applies the NotIn predicate on the "token" field.
func TokenNotIn(vs ...[]byte) predicate.GroupInvitationToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldToken), v...))
- })
+ return predicate.GroupInvitationToken(sql.FieldNotIn(FieldToken, vs...))
}
// TokenGT applies the GT predicate on the "token" field.
func TokenGT(v []byte) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldToken), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldGT(FieldToken, v))
}
// TokenGTE applies the GTE predicate on the "token" field.
func TokenGTE(v []byte) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldToken), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldGTE(FieldToken, v))
}
// TokenLT applies the LT predicate on the "token" field.
func TokenLT(v []byte) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldToken), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldLT(FieldToken, v))
}
// TokenLTE applies the LTE predicate on the "token" field.
func TokenLTE(v []byte) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldToken), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldLTE(FieldToken, v))
}
// ExpiresAtEQ applies the EQ predicate on the "expires_at" field.
func ExpiresAtEQ(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldExpiresAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldEQ(FieldExpiresAt, v))
}
// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field.
func ExpiresAtNEQ(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldExpiresAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldNEQ(FieldExpiresAt, v))
}
// ExpiresAtIn applies the In predicate on the "expires_at" field.
func ExpiresAtIn(vs ...time.Time) predicate.GroupInvitationToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldExpiresAt), v...))
- })
+ return predicate.GroupInvitationToken(sql.FieldIn(FieldExpiresAt, vs...))
}
// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field.
func ExpiresAtNotIn(vs ...time.Time) predicate.GroupInvitationToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldExpiresAt), v...))
- })
+ return predicate.GroupInvitationToken(sql.FieldNotIn(FieldExpiresAt, vs...))
}
// ExpiresAtGT applies the GT predicate on the "expires_at" field.
func ExpiresAtGT(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldExpiresAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldGT(FieldExpiresAt, v))
}
// ExpiresAtGTE applies the GTE predicate on the "expires_at" field.
func ExpiresAtGTE(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldExpiresAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldGTE(FieldExpiresAt, v))
}
// ExpiresAtLT applies the LT predicate on the "expires_at" field.
func ExpiresAtLT(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldExpiresAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldLT(FieldExpiresAt, v))
}
// ExpiresAtLTE applies the LTE predicate on the "expires_at" field.
func ExpiresAtLTE(v time.Time) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldExpiresAt), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldLTE(FieldExpiresAt, v))
}
// UsesEQ applies the EQ predicate on the "uses" field.
func UsesEQ(v int) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUses), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldEQ(FieldUses, v))
}
// UsesNEQ applies the NEQ predicate on the "uses" field.
func UsesNEQ(v int) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldUses), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldNEQ(FieldUses, v))
}
// UsesIn applies the In predicate on the "uses" field.
func UsesIn(vs ...int) predicate.GroupInvitationToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldUses), v...))
- })
+ return predicate.GroupInvitationToken(sql.FieldIn(FieldUses, vs...))
}
// UsesNotIn applies the NotIn predicate on the "uses" field.
func UsesNotIn(vs ...int) predicate.GroupInvitationToken {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldUses), v...))
- })
+ return predicate.GroupInvitationToken(sql.FieldNotIn(FieldUses, vs...))
}
// UsesGT applies the GT predicate on the "uses" field.
func UsesGT(v int) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldUses), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldGT(FieldUses, v))
}
// UsesGTE applies the GTE predicate on the "uses" field.
func UsesGTE(v int) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldUses), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldGTE(FieldUses, v))
}
// UsesLT applies the LT predicate on the "uses" field.
func UsesLT(v int) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldUses), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldLT(FieldUses, v))
}
// UsesLTE applies the LTE predicate on the "uses" field.
func UsesLTE(v int) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldUses), v))
- })
+ return predicate.GroupInvitationToken(sql.FieldLTE(FieldUses, v))
}
// HasGroup applies the HasEdge predicate on the "group" edge.
@@ -442,7 +286,6 @@ func HasGroup() predicate.GroupInvitationToken {
return predicate.GroupInvitationToken(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(GroupTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -452,11 +295,7 @@ func HasGroup() predicate.GroupInvitationToken {
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
func HasGroupWith(preds ...predicate.Group) predicate.GroupInvitationToken {
return predicate.GroupInvitationToken(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(GroupInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
- )
+ step := newGroupStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -467,32 +306,15 @@ func HasGroupWith(preds ...predicate.Group) predicate.GroupInvitationToken {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.GroupInvitationToken) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for _, p := range predicates {
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.GroupInvitationToken(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.GroupInvitationToken) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for i, p := range predicates {
- if i > 0 {
- s1.Or()
- }
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.GroupInvitationToken(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.GroupInvitationToken) predicate.GroupInvitationToken {
- return predicate.GroupInvitationToken(func(s *sql.Selector) {
- p(s.Not())
- })
+ return predicate.GroupInvitationToken(sql.NotPredicates(p))
}
diff --git a/backend/internal/data/ent/groupinvitationtoken_create.go b/backend/internal/data/ent/groupinvitationtoken_create.go
index 34a3f48..1d5859f 100644
--- a/backend/internal/data/ent/groupinvitationtoken_create.go
+++ b/backend/internal/data/ent/groupinvitationtoken_create.go
@@ -124,50 +124,8 @@ func (gitc *GroupInvitationTokenCreate) Mutation() *GroupInvitationTokenMutation
// Save creates the GroupInvitationToken in the database.
func (gitc *GroupInvitationTokenCreate) Save(ctx context.Context) (*GroupInvitationToken, error) {
- var (
- err error
- node *GroupInvitationToken
- )
gitc.defaults()
- if len(gitc.hooks) == 0 {
- if err = gitc.check(); err != nil {
- return nil, err
- }
- node, err = gitc.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*GroupInvitationTokenMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = gitc.check(); err != nil {
- return nil, err
- }
- gitc.mutation = mutation
- if node, err = gitc.sqlSave(ctx); err != nil {
- return nil, err
- }
- mutation.id = &node.ID
- mutation.done = true
- return node, err
- })
- for i := len(gitc.hooks) - 1; i >= 0; i-- {
- if gitc.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = gitc.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, gitc.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*GroupInvitationToken)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from GroupInvitationTokenMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, gitc.sqlSave, gitc.mutation, gitc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -237,6 +195,9 @@ func (gitc *GroupInvitationTokenCreate) check() error {
}
func (gitc *GroupInvitationTokenCreate) sqlSave(ctx context.Context) (*GroupInvitationToken, error) {
+ if err := gitc.check(); err != nil {
+ return nil, err
+ }
_node, _spec := gitc.createSpec()
if err := sqlgraph.CreateNode(ctx, gitc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@@ -251,62 +212,38 @@ func (gitc *GroupInvitationTokenCreate) sqlSave(ctx context.Context) (*GroupInvi
return nil, err
}
}
+ gitc.mutation.id = &_node.ID
+ gitc.mutation.done = true
return _node, nil
}
func (gitc *GroupInvitationTokenCreate) createSpec() (*GroupInvitationToken, *sqlgraph.CreateSpec) {
var (
_node = &GroupInvitationToken{config: gitc.config}
- _spec = &sqlgraph.CreateSpec{
- Table: groupinvitationtoken.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: groupinvitationtoken.FieldID,
- },
- }
+ _spec = sqlgraph.NewCreateSpec(groupinvitationtoken.Table, sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID))
)
if id, ok := gitc.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = &id
}
if value, ok := gitc.mutation.CreatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: groupinvitationtoken.FieldCreatedAt,
- })
+ _spec.SetField(groupinvitationtoken.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := gitc.mutation.UpdatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: groupinvitationtoken.FieldUpdatedAt,
- })
+ _spec.SetField(groupinvitationtoken.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := gitc.mutation.Token(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeBytes,
- Value: value,
- Column: groupinvitationtoken.FieldToken,
- })
+ _spec.SetField(groupinvitationtoken.FieldToken, field.TypeBytes, value)
_node.Token = value
}
if value, ok := gitc.mutation.ExpiresAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: groupinvitationtoken.FieldExpiresAt,
- })
+ _spec.SetField(groupinvitationtoken.FieldExpiresAt, field.TypeTime, value)
_node.ExpiresAt = value
}
if value, ok := gitc.mutation.Uses(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: groupinvitationtoken.FieldUses,
- })
+ _spec.SetField(groupinvitationtoken.FieldUses, field.TypeInt, value)
_node.Uses = value
}
if nodes := gitc.mutation.GroupIDs(); len(nodes) > 0 {
@@ -317,10 +254,7 @@ func (gitc *GroupInvitationTokenCreate) createSpec() (*GroupInvitationToken, *sq
Columns: []string{groupinvitationtoken.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -335,11 +269,15 @@ func (gitc *GroupInvitationTokenCreate) createSpec() (*GroupInvitationToken, *sq
// GroupInvitationTokenCreateBulk is the builder for creating many GroupInvitationToken entities in bulk.
type GroupInvitationTokenCreateBulk struct {
config
+ err error
builders []*GroupInvitationTokenCreate
}
// Save creates the GroupInvitationToken entities in the database.
func (gitcb *GroupInvitationTokenCreateBulk) Save(ctx context.Context) ([]*GroupInvitationToken, error) {
+ if gitcb.err != nil {
+ return nil, gitcb.err
+ }
specs := make([]*sqlgraph.CreateSpec, len(gitcb.builders))
nodes := make([]*GroupInvitationToken, len(gitcb.builders))
mutators := make([]Mutator, len(gitcb.builders))
@@ -356,8 +294,8 @@ func (gitcb *GroupInvitationTokenCreateBulk) Save(ctx context.Context) ([]*Group
return nil, err
}
builder.mutation = mutation
- nodes[i], specs[i] = builder.createSpec()
var err error
+ nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, gitcb.builders[i+1].mutation)
} else {
diff --git a/backend/internal/data/ent/groupinvitationtoken_delete.go b/backend/internal/data/ent/groupinvitationtoken_delete.go
index 4fa2ceb..5878fdf 100644
--- a/backend/internal/data/ent/groupinvitationtoken_delete.go
+++ b/backend/internal/data/ent/groupinvitationtoken_delete.go
@@ -4,7 +4,6 @@ package ent
import (
"context"
- "fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@@ -28,34 +27,7 @@ func (gitd *GroupInvitationTokenDelete) Where(ps ...predicate.GroupInvitationTok
// Exec executes the deletion query and returns how many vertices were deleted.
func (gitd *GroupInvitationTokenDelete) Exec(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
- if len(gitd.hooks) == 0 {
- affected, err = gitd.sqlExec(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*GroupInvitationTokenMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- gitd.mutation = mutation
- affected, err = gitd.sqlExec(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(gitd.hooks) - 1; i >= 0; i-- {
- if gitd.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = gitd.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, gitd.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, gitd.sqlExec, gitd.mutation, gitd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@@ -68,15 +40,7 @@ func (gitd *GroupInvitationTokenDelete) ExecX(ctx context.Context) int {
}
func (gitd *GroupInvitationTokenDelete) sqlExec(ctx context.Context) (int, error) {
- _spec := &sqlgraph.DeleteSpec{
- Node: &sqlgraph.NodeSpec{
- Table: groupinvitationtoken.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: groupinvitationtoken.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewDeleteSpec(groupinvitationtoken.Table, sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID))
if ps := gitd.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -88,6 +52,7 @@ func (gitd *GroupInvitationTokenDelete) sqlExec(ctx context.Context) (int, error
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
+ gitd.mutation.done = true
return affected, err
}
@@ -96,6 +61,12 @@ type GroupInvitationTokenDeleteOne struct {
gitd *GroupInvitationTokenDelete
}
+// Where appends a list predicates to the GroupInvitationTokenDelete builder.
+func (gitdo *GroupInvitationTokenDeleteOne) Where(ps ...predicate.GroupInvitationToken) *GroupInvitationTokenDeleteOne {
+ gitdo.gitd.mutation.Where(ps...)
+ return gitdo
+}
+
// Exec executes the deletion query.
func (gitdo *GroupInvitationTokenDeleteOne) Exec(ctx context.Context) error {
n, err := gitdo.gitd.Exec(ctx)
@@ -111,5 +82,7 @@ func (gitdo *GroupInvitationTokenDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (gitdo *GroupInvitationTokenDeleteOne) ExecX(ctx context.Context) {
- gitdo.gitd.ExecX(ctx)
+ if err := gitdo.Exec(ctx); err != nil {
+ panic(err)
+ }
}
diff --git a/backend/internal/data/ent/groupinvitationtoken_query.go b/backend/internal/data/ent/groupinvitationtoken_query.go
index 4e8536b..89de054 100644
--- a/backend/internal/data/ent/groupinvitationtoken_query.go
+++ b/backend/internal/data/ent/groupinvitationtoken_query.go
@@ -19,11 +19,9 @@ import (
// GroupInvitationTokenQuery is the builder for querying GroupInvitationToken entities.
type GroupInvitationTokenQuery struct {
config
- limit *int
- offset *int
- unique *bool
- order []OrderFunc
- fields []string
+ ctx *QueryContext
+ order []groupinvitationtoken.OrderOption
+ inters []Interceptor
predicates []predicate.GroupInvitationToken
withGroup *GroupQuery
withFKs bool
@@ -38,34 +36,34 @@ func (gitq *GroupInvitationTokenQuery) Where(ps ...predicate.GroupInvitationToke
return gitq
}
-// Limit adds a limit step to the query.
+// Limit the number of records to be returned by this query.
func (gitq *GroupInvitationTokenQuery) Limit(limit int) *GroupInvitationTokenQuery {
- gitq.limit = &limit
+ gitq.ctx.Limit = &limit
return gitq
}
-// Offset adds an offset step to the query.
+// Offset to start from.
func (gitq *GroupInvitationTokenQuery) Offset(offset int) *GroupInvitationTokenQuery {
- gitq.offset = &offset
+ gitq.ctx.Offset = &offset
return gitq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (gitq *GroupInvitationTokenQuery) Unique(unique bool) *GroupInvitationTokenQuery {
- gitq.unique = &unique
+ gitq.ctx.Unique = &unique
return gitq
}
-// Order adds an order step to the query.
-func (gitq *GroupInvitationTokenQuery) Order(o ...OrderFunc) *GroupInvitationTokenQuery {
+// Order specifies how the records should be ordered.
+func (gitq *GroupInvitationTokenQuery) Order(o ...groupinvitationtoken.OrderOption) *GroupInvitationTokenQuery {
gitq.order = append(gitq.order, o...)
return gitq
}
// QueryGroup chains the current query on the "group" edge.
func (gitq *GroupInvitationTokenQuery) QueryGroup() *GroupQuery {
- query := &GroupQuery{config: gitq.config}
+ query := (&GroupClient{config: gitq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := gitq.prepareQuery(ctx); err != nil {
return nil, err
@@ -88,7 +86,7 @@ func (gitq *GroupInvitationTokenQuery) QueryGroup() *GroupQuery {
// First returns the first GroupInvitationToken entity from the query.
// Returns a *NotFoundError when no GroupInvitationToken was found.
func (gitq *GroupInvitationTokenQuery) First(ctx context.Context) (*GroupInvitationToken, error) {
- nodes, err := gitq.Limit(1).All(ctx)
+ nodes, err := gitq.Limit(1).All(setContextOp(ctx, gitq.ctx, "First"))
if err != nil {
return nil, err
}
@@ -111,7 +109,7 @@ func (gitq *GroupInvitationTokenQuery) FirstX(ctx context.Context) *GroupInvitat
// Returns a *NotFoundError when no GroupInvitationToken ID was found.
func (gitq *GroupInvitationTokenQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = gitq.Limit(1).IDs(ctx); err != nil {
+ if ids, err = gitq.Limit(1).IDs(setContextOp(ctx, gitq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@@ -134,7 +132,7 @@ func (gitq *GroupInvitationTokenQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one GroupInvitationToken entity is found.
// Returns a *NotFoundError when no GroupInvitationToken entities are found.
func (gitq *GroupInvitationTokenQuery) Only(ctx context.Context) (*GroupInvitationToken, error) {
- nodes, err := gitq.Limit(2).All(ctx)
+ nodes, err := gitq.Limit(2).All(setContextOp(ctx, gitq.ctx, "Only"))
if err != nil {
return nil, err
}
@@ -162,7 +160,7 @@ func (gitq *GroupInvitationTokenQuery) OnlyX(ctx context.Context) *GroupInvitati
// Returns a *NotFoundError when no entities are found.
func (gitq *GroupInvitationTokenQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = gitq.Limit(2).IDs(ctx); err != nil {
+ if ids, err = gitq.Limit(2).IDs(setContextOp(ctx, gitq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@@ -187,10 +185,12 @@ func (gitq *GroupInvitationTokenQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of GroupInvitationTokens.
func (gitq *GroupInvitationTokenQuery) All(ctx context.Context) ([]*GroupInvitationToken, error) {
+ ctx = setContextOp(ctx, gitq.ctx, "All")
if err := gitq.prepareQuery(ctx); err != nil {
return nil, err
}
- return gitq.sqlAll(ctx)
+ qr := querierAll[[]*GroupInvitationToken, *GroupInvitationTokenQuery]()
+ return withInterceptors[[]*GroupInvitationToken](ctx, gitq, qr, gitq.inters)
}
// AllX is like All, but panics if an error occurs.
@@ -203,9 +203,12 @@ func (gitq *GroupInvitationTokenQuery) AllX(ctx context.Context) []*GroupInvitat
}
// IDs executes the query and returns a list of GroupInvitationToken IDs.
-func (gitq *GroupInvitationTokenQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
- var ids []uuid.UUID
- if err := gitq.Select(groupinvitationtoken.FieldID).Scan(ctx, &ids); err != nil {
+func (gitq *GroupInvitationTokenQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
+ if gitq.ctx.Unique == nil && gitq.path != nil {
+ gitq.Unique(true)
+ }
+ ctx = setContextOp(ctx, gitq.ctx, "IDs")
+ if err = gitq.Select(groupinvitationtoken.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@@ -222,10 +225,11 @@ func (gitq *GroupInvitationTokenQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query.
func (gitq *GroupInvitationTokenQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, gitq.ctx, "Count")
if err := gitq.prepareQuery(ctx); err != nil {
return 0, err
}
- return gitq.sqlCount(ctx)
+ return withInterceptors[int](ctx, gitq, querierCount[*GroupInvitationTokenQuery](), gitq.inters)
}
// CountX is like Count, but panics if an error occurs.
@@ -239,10 +243,15 @@ func (gitq *GroupInvitationTokenQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (gitq *GroupInvitationTokenQuery) Exist(ctx context.Context) (bool, error) {
- if err := gitq.prepareQuery(ctx); err != nil {
- return false, err
+ ctx = setContextOp(ctx, gitq.ctx, "Exist")
+ switch _, err := gitq.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
}
- return gitq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@@ -262,22 +271,21 @@ func (gitq *GroupInvitationTokenQuery) Clone() *GroupInvitationTokenQuery {
}
return &GroupInvitationTokenQuery{
config: gitq.config,
- limit: gitq.limit,
- offset: gitq.offset,
- order: append([]OrderFunc{}, gitq.order...),
+ ctx: gitq.ctx.Clone(),
+ order: append([]groupinvitationtoken.OrderOption{}, gitq.order...),
+ inters: append([]Interceptor{}, gitq.inters...),
predicates: append([]predicate.GroupInvitationToken{}, gitq.predicates...),
withGroup: gitq.withGroup.Clone(),
// clone intermediate query.
- sql: gitq.sql.Clone(),
- path: gitq.path,
- unique: gitq.unique,
+ sql: gitq.sql.Clone(),
+ path: gitq.path,
}
}
// WithGroup tells the query-builder to eager-load the nodes that are connected to
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
func (gitq *GroupInvitationTokenQuery) WithGroup(opts ...func(*GroupQuery)) *GroupInvitationTokenQuery {
- query := &GroupQuery{config: gitq.config}
+ query := (&GroupClient{config: gitq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -300,16 +308,11 @@ func (gitq *GroupInvitationTokenQuery) WithGroup(opts ...func(*GroupQuery)) *Gro
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (gitq *GroupInvitationTokenQuery) GroupBy(field string, fields ...string) *GroupInvitationTokenGroupBy {
- grbuild := &GroupInvitationTokenGroupBy{config: gitq.config}
- grbuild.fields = append([]string{field}, fields...)
- grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
- if err := gitq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- return gitq.sqlQuery(ctx), nil
- }
+ gitq.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &GroupInvitationTokenGroupBy{build: gitq}
+ grbuild.flds = &gitq.ctx.Fields
grbuild.label = groupinvitationtoken.Label
- grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
+ grbuild.scan = grbuild.Scan
return grbuild
}
@@ -326,15 +329,30 @@ func (gitq *GroupInvitationTokenQuery) GroupBy(field string, fields ...string) *
// Select(groupinvitationtoken.FieldCreatedAt).
// Scan(ctx, &v)
func (gitq *GroupInvitationTokenQuery) Select(fields ...string) *GroupInvitationTokenSelect {
- gitq.fields = append(gitq.fields, fields...)
- selbuild := &GroupInvitationTokenSelect{GroupInvitationTokenQuery: gitq}
- selbuild.label = groupinvitationtoken.Label
- selbuild.flds, selbuild.scan = &gitq.fields, selbuild.Scan
- return selbuild
+ gitq.ctx.Fields = append(gitq.ctx.Fields, fields...)
+ sbuild := &GroupInvitationTokenSelect{GroupInvitationTokenQuery: gitq}
+ sbuild.label = groupinvitationtoken.Label
+ sbuild.flds, sbuild.scan = &gitq.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a GroupInvitationTokenSelect configured with the given aggregations.
+func (gitq *GroupInvitationTokenQuery) Aggregate(fns ...AggregateFunc) *GroupInvitationTokenSelect {
+ return gitq.Select().Aggregate(fns...)
}
func (gitq *GroupInvitationTokenQuery) prepareQuery(ctx context.Context) error {
- for _, f := range gitq.fields {
+ for _, inter := range gitq.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, gitq); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range gitq.ctx.Fields {
if !groupinvitationtoken.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@@ -404,6 +422,9 @@ func (gitq *GroupInvitationTokenQuery) loadGroup(ctx context.Context, query *Gro
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
+ if len(ids) == 0 {
+ return nil
+ }
query.Where(group.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@@ -423,41 +444,22 @@ func (gitq *GroupInvitationTokenQuery) loadGroup(ctx context.Context, query *Gro
func (gitq *GroupInvitationTokenQuery) sqlCount(ctx context.Context) (int, error) {
_spec := gitq.querySpec()
- _spec.Node.Columns = gitq.fields
- if len(gitq.fields) > 0 {
- _spec.Unique = gitq.unique != nil && *gitq.unique
+ _spec.Node.Columns = gitq.ctx.Fields
+ if len(gitq.ctx.Fields) > 0 {
+ _spec.Unique = gitq.ctx.Unique != nil && *gitq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, gitq.driver, _spec)
}
-func (gitq *GroupInvitationTokenQuery) sqlExist(ctx context.Context) (bool, error) {
- switch _, err := gitq.FirstID(ctx); {
- case IsNotFound(err):
- return false, nil
- case err != nil:
- return false, fmt.Errorf("ent: check existence: %w", err)
- default:
- return true, nil
- }
-}
-
func (gitq *GroupInvitationTokenQuery) querySpec() *sqlgraph.QuerySpec {
- _spec := &sqlgraph.QuerySpec{
- Node: &sqlgraph.NodeSpec{
- Table: groupinvitationtoken.Table,
- Columns: groupinvitationtoken.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: groupinvitationtoken.FieldID,
- },
- },
- From: gitq.sql,
- Unique: true,
- }
- if unique := gitq.unique; unique != nil {
+ _spec := sqlgraph.NewQuerySpec(groupinvitationtoken.Table, groupinvitationtoken.Columns, sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID))
+ _spec.From = gitq.sql
+ if unique := gitq.ctx.Unique; unique != nil {
_spec.Unique = *unique
+ } else if gitq.path != nil {
+ _spec.Unique = true
}
- if fields := gitq.fields; len(fields) > 0 {
+ if fields := gitq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, groupinvitationtoken.FieldID)
for i := range fields {
@@ -473,10 +475,10 @@ func (gitq *GroupInvitationTokenQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
- if limit := gitq.limit; limit != nil {
+ if limit := gitq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
- if offset := gitq.offset; offset != nil {
+ if offset := gitq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := gitq.order; len(ps) > 0 {
@@ -492,7 +494,7 @@ func (gitq *GroupInvitationTokenQuery) querySpec() *sqlgraph.QuerySpec {
func (gitq *GroupInvitationTokenQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(gitq.driver.Dialect())
t1 := builder.Table(groupinvitationtoken.Table)
- columns := gitq.fields
+ columns := gitq.ctx.Fields
if len(columns) == 0 {
columns = groupinvitationtoken.Columns
}
@@ -501,7 +503,7 @@ func (gitq *GroupInvitationTokenQuery) sqlQuery(ctx context.Context) *sql.Select
selector = gitq.sql
selector.Select(selector.Columns(columns...)...)
}
- if gitq.unique != nil && *gitq.unique {
+ if gitq.ctx.Unique != nil && *gitq.ctx.Unique {
selector.Distinct()
}
for _, p := range gitq.predicates {
@@ -510,12 +512,12 @@ func (gitq *GroupInvitationTokenQuery) sqlQuery(ctx context.Context) *sql.Select
for _, p := range gitq.order {
p(selector)
}
- if offset := gitq.offset; offset != nil {
+ if offset := gitq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
- if limit := gitq.limit; limit != nil {
+ if limit := gitq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@@ -523,13 +525,8 @@ func (gitq *GroupInvitationTokenQuery) sqlQuery(ctx context.Context) *sql.Select
// GroupInvitationTokenGroupBy is the group-by builder for GroupInvitationToken entities.
type GroupInvitationTokenGroupBy struct {
- config
selector
- fields []string
- fns []AggregateFunc
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
- path func(context.Context) (*sql.Selector, error)
+ build *GroupInvitationTokenQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@@ -538,74 +535,77 @@ func (gitgb *GroupInvitationTokenGroupBy) Aggregate(fns ...AggregateFunc) *Group
return gitgb
}
-// Scan applies the group-by query and scans the result into the given value.
+// Scan applies the selector query and scans the result into the given value.
func (gitgb *GroupInvitationTokenGroupBy) Scan(ctx context.Context, v any) error {
- query, err := gitgb.path(ctx)
- if err != nil {
+ ctx = setContextOp(ctx, gitgb.build.ctx, "GroupBy")
+ if err := gitgb.build.prepareQuery(ctx); err != nil {
return err
}
- gitgb.sql = query
- return gitgb.sqlScan(ctx, v)
+ return scanWithInterceptors[*GroupInvitationTokenQuery, *GroupInvitationTokenGroupBy](ctx, gitgb.build, gitgb, gitgb.build.inters, v)
}
-func (gitgb *GroupInvitationTokenGroupBy) sqlScan(ctx context.Context, v any) error {
- for _, f := range gitgb.fields {
- if !groupinvitationtoken.ValidColumn(f) {
- return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
- }
- }
- selector := gitgb.sqlQuery()
- if err := selector.Err(); err != nil {
- return err
- }
- rows := &sql.Rows{}
- query, args := selector.Query()
- if err := gitgb.driver.Query(ctx, query, args, rows); err != nil {
- return err
- }
- defer rows.Close()
- return sql.ScanSlice(rows, v)
-}
-
-func (gitgb *GroupInvitationTokenGroupBy) sqlQuery() *sql.Selector {
- selector := gitgb.sql.Select()
+func (gitgb *GroupInvitationTokenGroupBy) sqlScan(ctx context.Context, root *GroupInvitationTokenQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(gitgb.fns))
for _, fn := range gitgb.fns {
aggregation = append(aggregation, fn(selector))
}
- // If no columns were selected in a custom aggregation function, the default
- // selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
- columns := make([]string, 0, len(gitgb.fields)+len(gitgb.fns))
- for _, f := range gitgb.fields {
+ columns := make([]string, 0, len(*gitgb.flds)+len(gitgb.fns))
+ for _, f := range *gitgb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
- return selector.GroupBy(selector.Columns(gitgb.fields...)...)
+ selector.GroupBy(selector.Columns(*gitgb.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := gitgb.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
}
// GroupInvitationTokenSelect is the builder for selecting fields of GroupInvitationToken entities.
type GroupInvitationTokenSelect struct {
*GroupInvitationTokenQuery
selector
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (gits *GroupInvitationTokenSelect) Aggregate(fns ...AggregateFunc) *GroupInvitationTokenSelect {
+ gits.fns = append(gits.fns, fns...)
+ return gits
}
// Scan applies the selector query and scans the result into the given value.
func (gits *GroupInvitationTokenSelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, gits.ctx, "Select")
if err := gits.prepareQuery(ctx); err != nil {
return err
}
- gits.sql = gits.GroupInvitationTokenQuery.sqlQuery(ctx)
- return gits.sqlScan(ctx, v)
+ return scanWithInterceptors[*GroupInvitationTokenQuery, *GroupInvitationTokenSelect](ctx, gits.GroupInvitationTokenQuery, gits, gits.inters, v)
}
-func (gits *GroupInvitationTokenSelect) sqlScan(ctx context.Context, v any) error {
+func (gits *GroupInvitationTokenSelect) sqlScan(ctx context.Context, root *GroupInvitationTokenQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(gits.fns))
+ for _, fn := range gits.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*gits.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
rows := &sql.Rows{}
- query, args := gits.sql.Query()
+ query, args := selector.Query()
if err := gits.driver.Query(ctx, query, args, rows); err != nil {
return err
}
diff --git a/backend/internal/data/ent/groupinvitationtoken_update.go b/backend/internal/data/ent/groupinvitationtoken_update.go
index 33cbef2..3e0db91 100644
--- a/backend/internal/data/ent/groupinvitationtoken_update.go
+++ b/backend/internal/data/ent/groupinvitationtoken_update.go
@@ -109,35 +109,8 @@ func (gitu *GroupInvitationTokenUpdate) ClearGroup() *GroupInvitationTokenUpdate
// Save executes the query and returns the number of nodes affected by the update operation.
func (gitu *GroupInvitationTokenUpdate) Save(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
gitu.defaults()
- if len(gitu.hooks) == 0 {
- affected, err = gitu.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*GroupInvitationTokenMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- gitu.mutation = mutation
- affected, err = gitu.sqlSave(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(gitu.hooks) - 1; i >= 0; i-- {
- if gitu.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = gitu.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, gitu.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, gitu.sqlSave, gitu.mutation, gitu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -171,16 +144,7 @@ func (gitu *GroupInvitationTokenUpdate) defaults() {
}
func (gitu *GroupInvitationTokenUpdate) sqlSave(ctx context.Context) (n int, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: groupinvitationtoken.Table,
- Columns: groupinvitationtoken.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: groupinvitationtoken.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewUpdateSpec(groupinvitationtoken.Table, groupinvitationtoken.Columns, sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID))
if ps := gitu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -189,39 +153,19 @@ func (gitu *GroupInvitationTokenUpdate) sqlSave(ctx context.Context) (n int, err
}
}
if value, ok := gitu.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: groupinvitationtoken.FieldUpdatedAt,
- })
+ _spec.SetField(groupinvitationtoken.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := gitu.mutation.Token(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBytes,
- Value: value,
- Column: groupinvitationtoken.FieldToken,
- })
+ _spec.SetField(groupinvitationtoken.FieldToken, field.TypeBytes, value)
}
if value, ok := gitu.mutation.ExpiresAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: groupinvitationtoken.FieldExpiresAt,
- })
+ _spec.SetField(groupinvitationtoken.FieldExpiresAt, field.TypeTime, value)
}
if value, ok := gitu.mutation.Uses(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: groupinvitationtoken.FieldUses,
- })
+ _spec.SetField(groupinvitationtoken.FieldUses, field.TypeInt, value)
}
if value, ok := gitu.mutation.AddedUses(); ok {
- _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: groupinvitationtoken.FieldUses,
- })
+ _spec.AddField(groupinvitationtoken.FieldUses, field.TypeInt, value)
}
if gitu.mutation.GroupCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -231,10 +175,7 @@ func (gitu *GroupInvitationTokenUpdate) sqlSave(ctx context.Context) (n int, err
Columns: []string{groupinvitationtoken.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -247,10 +188,7 @@ func (gitu *GroupInvitationTokenUpdate) sqlSave(ctx context.Context) (n int, err
Columns: []string{groupinvitationtoken.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -266,6 +204,7 @@ func (gitu *GroupInvitationTokenUpdate) sqlSave(ctx context.Context) (n int, err
}
return 0, err
}
+ gitu.mutation.done = true
return n, nil
}
@@ -354,6 +293,12 @@ func (gituo *GroupInvitationTokenUpdateOne) ClearGroup() *GroupInvitationTokenUp
return gituo
}
+// Where appends a list predicates to the GroupInvitationTokenUpdate builder.
+func (gituo *GroupInvitationTokenUpdateOne) Where(ps ...predicate.GroupInvitationToken) *GroupInvitationTokenUpdateOne {
+ gituo.mutation.Where(ps...)
+ return gituo
+}
+
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (gituo *GroupInvitationTokenUpdateOne) Select(field string, fields ...string) *GroupInvitationTokenUpdateOne {
@@ -363,41 +308,8 @@ func (gituo *GroupInvitationTokenUpdateOne) Select(field string, fields ...strin
// Save executes the query and returns the updated GroupInvitationToken entity.
func (gituo *GroupInvitationTokenUpdateOne) Save(ctx context.Context) (*GroupInvitationToken, error) {
- var (
- err error
- node *GroupInvitationToken
- )
gituo.defaults()
- if len(gituo.hooks) == 0 {
- node, err = gituo.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*GroupInvitationTokenMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- gituo.mutation = mutation
- node, err = gituo.sqlSave(ctx)
- mutation.done = true
- return node, err
- })
- for i := len(gituo.hooks) - 1; i >= 0; i-- {
- if gituo.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = gituo.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, gituo.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*GroupInvitationToken)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from GroupInvitationTokenMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, gituo.sqlSave, gituo.mutation, gituo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -431,16 +343,7 @@ func (gituo *GroupInvitationTokenUpdateOne) defaults() {
}
func (gituo *GroupInvitationTokenUpdateOne) sqlSave(ctx context.Context) (_node *GroupInvitationToken, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: groupinvitationtoken.Table,
- Columns: groupinvitationtoken.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: groupinvitationtoken.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewUpdateSpec(groupinvitationtoken.Table, groupinvitationtoken.Columns, sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID))
id, ok := gituo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "GroupInvitationToken.id" for update`)}
@@ -466,39 +369,19 @@ func (gituo *GroupInvitationTokenUpdateOne) sqlSave(ctx context.Context) (_node
}
}
if value, ok := gituo.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: groupinvitationtoken.FieldUpdatedAt,
- })
+ _spec.SetField(groupinvitationtoken.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := gituo.mutation.Token(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBytes,
- Value: value,
- Column: groupinvitationtoken.FieldToken,
- })
+ _spec.SetField(groupinvitationtoken.FieldToken, field.TypeBytes, value)
}
if value, ok := gituo.mutation.ExpiresAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: groupinvitationtoken.FieldExpiresAt,
- })
+ _spec.SetField(groupinvitationtoken.FieldExpiresAt, field.TypeTime, value)
}
if value, ok := gituo.mutation.Uses(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: groupinvitationtoken.FieldUses,
- })
+ _spec.SetField(groupinvitationtoken.FieldUses, field.TypeInt, value)
}
if value, ok := gituo.mutation.AddedUses(); ok {
- _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: groupinvitationtoken.FieldUses,
- })
+ _spec.AddField(groupinvitationtoken.FieldUses, field.TypeInt, value)
}
if gituo.mutation.GroupCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -508,10 +391,7 @@ func (gituo *GroupInvitationTokenUpdateOne) sqlSave(ctx context.Context) (_node
Columns: []string{groupinvitationtoken.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -524,10 +404,7 @@ func (gituo *GroupInvitationTokenUpdateOne) sqlSave(ctx context.Context) (_node
Columns: []string{groupinvitationtoken.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -546,5 +423,6 @@ func (gituo *GroupInvitationTokenUpdateOne) sqlSave(ctx context.Context) (_node
}
return nil, err
}
+ gituo.mutation.done = true
return _node, nil
}
diff --git a/backend/internal/data/ent/has_id.go b/backend/internal/data/ent/has_id.go
index a6afc6a..0877caa 100644
--- a/backend/internal/data/ent/has_id.go
+++ b/backend/internal/data/ent/has_id.go
@@ -8,6 +8,10 @@ func (a *Attachment) GetID() uuid.UUID {
return a.ID
}
+func (ar *AuthRoles) GetID() int {
+ return ar.ID
+}
+
func (at *AuthTokens) GetID() uuid.UUID {
return at.ID
}
@@ -16,14 +20,14 @@ func (d *Document) GetID() uuid.UUID {
return d.ID
}
-func (dt *DocumentToken) GetID() uuid.UUID {
- return dt.ID
-}
-
func (gr *Group) GetID() uuid.UUID {
return gr.ID
}
+func (git *GroupInvitationToken) GetID() uuid.UUID {
+ return git.ID
+}
+
func (i *Item) GetID() uuid.UUID {
return i.ID
}
@@ -40,6 +44,14 @@ func (l *Location) GetID() uuid.UUID {
return l.ID
}
+func (me *MaintenanceEntry) GetID() uuid.UUID {
+ return me.ID
+}
+
+func (n *Notifier) GetID() uuid.UUID {
+ return n.ID
+}
+
func (u *User) GetID() uuid.UUID {
return u.ID
}
diff --git a/backend/internal/data/ent/hook/hook.go b/backend/internal/data/ent/hook/hook.go
index c0a7378..4648b23 100644
--- a/backend/internal/data/ent/hook/hook.go
+++ b/backend/internal/data/ent/hook/hook.go
@@ -15,11 +15,22 @@ type AttachmentFunc func(context.Context, *ent.AttachmentMutation) (ent.Value, e
// Mutate calls f(ctx, m).
func (f AttachmentFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
- mv, ok := m.(*ent.AttachmentMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AttachmentMutation", m)
+ if mv, ok := m.(*ent.AttachmentMutation); ok {
+ return f(ctx, mv)
}
- return f(ctx, mv)
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AttachmentMutation", m)
+}
+
+// The AuthRolesFunc type is an adapter to allow the use of ordinary
+// function as AuthRoles mutator.
+type AuthRolesFunc func(context.Context, *ent.AuthRolesMutation) (ent.Value, error)
+
+// Mutate calls f(ctx, m).
+func (f AuthRolesFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
+ if mv, ok := m.(*ent.AuthRolesMutation); ok {
+ return f(ctx, mv)
+ }
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AuthRolesMutation", m)
}
// The AuthTokensFunc type is an adapter to allow the use of ordinary
@@ -28,11 +39,10 @@ type AuthTokensFunc func(context.Context, *ent.AuthTokensMutation) (ent.Value, e
// Mutate calls f(ctx, m).
func (f AuthTokensFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
- mv, ok := m.(*ent.AuthTokensMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AuthTokensMutation", m)
+ if mv, ok := m.(*ent.AuthTokensMutation); ok {
+ return f(ctx, mv)
}
- return f(ctx, mv)
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AuthTokensMutation", m)
}
// The DocumentFunc type is an adapter to allow the use of ordinary
@@ -41,24 +51,10 @@ type DocumentFunc func(context.Context, *ent.DocumentMutation) (ent.Value, error
// Mutate calls f(ctx, m).
func (f DocumentFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
- mv, ok := m.(*ent.DocumentMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DocumentMutation", m)
+ if mv, ok := m.(*ent.DocumentMutation); ok {
+ return f(ctx, mv)
}
- return f(ctx, mv)
-}
-
-// The DocumentTokenFunc type is an adapter to allow the use of ordinary
-// function as DocumentToken mutator.
-type DocumentTokenFunc func(context.Context, *ent.DocumentTokenMutation) (ent.Value, error)
-
-// Mutate calls f(ctx, m).
-func (f DocumentTokenFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
- mv, ok := m.(*ent.DocumentTokenMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DocumentTokenMutation", m)
- }
- return f(ctx, mv)
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DocumentMutation", m)
}
// The GroupFunc type is an adapter to allow the use of ordinary
@@ -67,11 +63,10 @@ type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f GroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
- mv, ok := m.(*ent.GroupMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m)
+ if mv, ok := m.(*ent.GroupMutation); ok {
+ return f(ctx, mv)
}
- return f(ctx, mv)
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m)
}
// The GroupInvitationTokenFunc type is an adapter to allow the use of ordinary
@@ -80,11 +75,10 @@ type GroupInvitationTokenFunc func(context.Context, *ent.GroupInvitationTokenMut
// Mutate calls f(ctx, m).
func (f GroupInvitationTokenFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
- mv, ok := m.(*ent.GroupInvitationTokenMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupInvitationTokenMutation", m)
+ if mv, ok := m.(*ent.GroupInvitationTokenMutation); ok {
+ return f(ctx, mv)
}
- return f(ctx, mv)
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupInvitationTokenMutation", m)
}
// The ItemFunc type is an adapter to allow the use of ordinary
@@ -93,11 +87,10 @@ type ItemFunc func(context.Context, *ent.ItemMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f ItemFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
- mv, ok := m.(*ent.ItemMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ItemMutation", m)
+ if mv, ok := m.(*ent.ItemMutation); ok {
+ return f(ctx, mv)
}
- return f(ctx, mv)
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ItemMutation", m)
}
// The ItemFieldFunc type is an adapter to allow the use of ordinary
@@ -106,11 +99,10 @@ type ItemFieldFunc func(context.Context, *ent.ItemFieldMutation) (ent.Value, err
// Mutate calls f(ctx, m).
func (f ItemFieldFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
- mv, ok := m.(*ent.ItemFieldMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ItemFieldMutation", m)
+ if mv, ok := m.(*ent.ItemFieldMutation); ok {
+ return f(ctx, mv)
}
- return f(ctx, mv)
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ItemFieldMutation", m)
}
// The LabelFunc type is an adapter to allow the use of ordinary
@@ -119,11 +111,10 @@ type LabelFunc func(context.Context, *ent.LabelMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f LabelFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
- mv, ok := m.(*ent.LabelMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.LabelMutation", m)
+ if mv, ok := m.(*ent.LabelMutation); ok {
+ return f(ctx, mv)
}
- return f(ctx, mv)
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.LabelMutation", m)
}
// The LocationFunc type is an adapter to allow the use of ordinary
@@ -132,11 +123,34 @@ type LocationFunc func(context.Context, *ent.LocationMutation) (ent.Value, error
// Mutate calls f(ctx, m).
func (f LocationFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
- mv, ok := m.(*ent.LocationMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.LocationMutation", m)
+ if mv, ok := m.(*ent.LocationMutation); ok {
+ return f(ctx, mv)
}
- return f(ctx, mv)
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.LocationMutation", m)
+}
+
+// The MaintenanceEntryFunc type is an adapter to allow the use of ordinary
+// function as MaintenanceEntry mutator.
+type MaintenanceEntryFunc func(context.Context, *ent.MaintenanceEntryMutation) (ent.Value, error)
+
+// Mutate calls f(ctx, m).
+func (f MaintenanceEntryFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
+ if mv, ok := m.(*ent.MaintenanceEntryMutation); ok {
+ return f(ctx, mv)
+ }
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MaintenanceEntryMutation", m)
+}
+
+// The NotifierFunc type is an adapter to allow the use of ordinary
+// function as Notifier mutator.
+type NotifierFunc func(context.Context, *ent.NotifierMutation) (ent.Value, error)
+
+// Mutate calls f(ctx, m).
+func (f NotifierFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
+ if mv, ok := m.(*ent.NotifierMutation); ok {
+ return f(ctx, mv)
+ }
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.NotifierMutation", m)
}
// The UserFunc type is an adapter to allow the use of ordinary
@@ -145,11 +159,10 @@ type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f UserFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
- mv, ok := m.(*ent.UserMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m)
+ if mv, ok := m.(*ent.UserMutation); ok {
+ return f(ctx, mv)
}
- return f(ctx, mv)
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m)
}
// Condition is a hook condition function.
diff --git a/backend/internal/data/ent/item.go b/backend/internal/data/ent/item.go
index 802acff..7b2be8a 100644
--- a/backend/internal/data/ent/item.go
+++ b/backend/internal/data/ent/item.go
@@ -7,6 +7,7 @@ import (
"strings"
"time"
+ "entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
@@ -37,6 +38,8 @@ type Item struct {
Insured bool `json:"insured,omitempty"`
// Archived holds the value of the "archived" field.
Archived bool `json:"archived,omitempty"`
+ // AssetID holds the value of the "asset_id" field.
+ AssetID int `json:"asset_id,omitempty"`
// SerialNumber holds the value of the "serial_number" field.
SerialNumber string `json:"serial_number,omitempty"`
// ModelNumber holds the value of the "model_number" field.
@@ -69,33 +72,49 @@ type Item struct {
group_items *uuid.UUID
item_children *uuid.UUID
location_items *uuid.UUID
+ selectValues sql.SelectValues
}
// ItemEdges holds the relations/edges for other nodes in the graph.
type ItemEdges struct {
+ // Group holds the value of the group edge.
+ Group *Group `json:"group,omitempty"`
// Parent holds the value of the parent edge.
Parent *Item `json:"parent,omitempty"`
// Children holds the value of the children edge.
Children []*Item `json:"children,omitempty"`
- // Group holds the value of the group edge.
- Group *Group `json:"group,omitempty"`
// Label holds the value of the label edge.
Label []*Label `json:"label,omitempty"`
// Location holds the value of the location edge.
Location *Location `json:"location,omitempty"`
// Fields holds the value of the fields edge.
Fields []*ItemField `json:"fields,omitempty"`
+ // MaintenanceEntries holds the value of the maintenance_entries edge.
+ MaintenanceEntries []*MaintenanceEntry `json:"maintenance_entries,omitempty"`
// Attachments holds the value of the attachments edge.
Attachments []*Attachment `json:"attachments,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
- loadedTypes [7]bool
+ loadedTypes [8]bool
+}
+
+// GroupOrErr returns the Group value or an error if the edge
+// was not loaded in eager-loading, or loaded but was not found.
+func (e ItemEdges) GroupOrErr() (*Group, error) {
+ if e.loadedTypes[0] {
+ if e.Group == nil {
+ // Edge was loaded but was not found.
+ return nil, &NotFoundError{label: group.Label}
+ }
+ return e.Group, nil
+ }
+ return nil, &NotLoadedError{edge: "group"}
}
// ParentOrErr returns the Parent value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e ItemEdges) ParentOrErr() (*Item, error) {
- if e.loadedTypes[0] {
+ if e.loadedTypes[1] {
if e.Parent == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: item.Label}
@@ -108,25 +127,12 @@ func (e ItemEdges) ParentOrErr() (*Item, error) {
// ChildrenOrErr returns the Children value or an error if the edge
// was not loaded in eager-loading.
func (e ItemEdges) ChildrenOrErr() ([]*Item, error) {
- if e.loadedTypes[1] {
+ if e.loadedTypes[2] {
return e.Children, nil
}
return nil, &NotLoadedError{edge: "children"}
}
-// GroupOrErr returns the Group value or an error if the edge
-// was not loaded in eager-loading, or loaded but was not found.
-func (e ItemEdges) GroupOrErr() (*Group, error) {
- if e.loadedTypes[2] {
- if e.Group == nil {
- // Edge was loaded but was not found.
- return nil, &NotFoundError{label: group.Label}
- }
- return e.Group, nil
- }
- return nil, &NotLoadedError{edge: "group"}
-}
-
// LabelOrErr returns the Label value or an error if the edge
// was not loaded in eager-loading.
func (e ItemEdges) LabelOrErr() ([]*Label, error) {
@@ -158,10 +164,19 @@ func (e ItemEdges) FieldsOrErr() ([]*ItemField, error) {
return nil, &NotLoadedError{edge: "fields"}
}
+// MaintenanceEntriesOrErr returns the MaintenanceEntries value or an error if the edge
+// was not loaded in eager-loading.
+func (e ItemEdges) MaintenanceEntriesOrErr() ([]*MaintenanceEntry, error) {
+ if e.loadedTypes[6] {
+ return e.MaintenanceEntries, nil
+ }
+ return nil, &NotLoadedError{edge: "maintenance_entries"}
+}
+
// AttachmentsOrErr returns the Attachments value or an error if the edge
// was not loaded in eager-loading.
func (e ItemEdges) AttachmentsOrErr() ([]*Attachment, error) {
- if e.loadedTypes[6] {
+ if e.loadedTypes[7] {
return e.Attachments, nil
}
return nil, &NotLoadedError{edge: "attachments"}
@@ -176,7 +191,7 @@ func (*Item) scanValues(columns []string) ([]any, error) {
values[i] = new(sql.NullBool)
case item.FieldPurchasePrice, item.FieldSoldPrice:
values[i] = new(sql.NullFloat64)
- case item.FieldQuantity:
+ case item.FieldQuantity, item.FieldAssetID:
values[i] = new(sql.NullInt64)
case item.FieldName, item.FieldDescription, item.FieldImportRef, item.FieldNotes, item.FieldSerialNumber, item.FieldModelNumber, item.FieldManufacturer, item.FieldWarrantyDetails, item.FieldPurchaseFrom, item.FieldSoldTo, item.FieldSoldNotes:
values[i] = new(sql.NullString)
@@ -191,7 +206,7 @@ func (*Item) scanValues(columns []string) ([]any, error) {
case item.ForeignKeys[2]: // location_items
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
- return nil, fmt.Errorf("unexpected column %q for type Item", columns[i])
+ values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -265,6 +280,12 @@ func (i *Item) assignValues(columns []string, values []any) error {
} else if value.Valid {
i.Archived = value.Bool
}
+ case item.FieldAssetID:
+ if value, ok := values[j].(*sql.NullInt64); !ok {
+ return fmt.Errorf("unexpected type %T for field asset_id", values[j])
+ } else if value.Valid {
+ i.AssetID = int(value.Int64)
+ }
case item.FieldSerialNumber:
if value, ok := values[j].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field serial_number", values[j])
@@ -364,51 +385,64 @@ func (i *Item) assignValues(columns []string, values []any) error {
i.location_items = new(uuid.UUID)
*i.location_items = *value.S.(*uuid.UUID)
}
+ default:
+ i.selectValues.Set(columns[j], values[j])
}
}
return nil
}
-// QueryParent queries the "parent" edge of the Item entity.
-func (i *Item) QueryParent() *ItemQuery {
- return (&ItemClient{config: i.config}).QueryParent(i)
-}
-
-// QueryChildren queries the "children" edge of the Item entity.
-func (i *Item) QueryChildren() *ItemQuery {
- return (&ItemClient{config: i.config}).QueryChildren(i)
+// Value returns the ent.Value that was dynamically selected and assigned to the Item.
+// This includes values selected through modifiers, order, etc.
+func (i *Item) Value(name string) (ent.Value, error) {
+ return i.selectValues.Get(name)
}
// QueryGroup queries the "group" edge of the Item entity.
func (i *Item) QueryGroup() *GroupQuery {
- return (&ItemClient{config: i.config}).QueryGroup(i)
+ return NewItemClient(i.config).QueryGroup(i)
+}
+
+// QueryParent queries the "parent" edge of the Item entity.
+func (i *Item) QueryParent() *ItemQuery {
+ return NewItemClient(i.config).QueryParent(i)
+}
+
+// QueryChildren queries the "children" edge of the Item entity.
+func (i *Item) QueryChildren() *ItemQuery {
+ return NewItemClient(i.config).QueryChildren(i)
}
// QueryLabel queries the "label" edge of the Item entity.
func (i *Item) QueryLabel() *LabelQuery {
- return (&ItemClient{config: i.config}).QueryLabel(i)
+ return NewItemClient(i.config).QueryLabel(i)
}
// QueryLocation queries the "location" edge of the Item entity.
func (i *Item) QueryLocation() *LocationQuery {
- return (&ItemClient{config: i.config}).QueryLocation(i)
+ return NewItemClient(i.config).QueryLocation(i)
}
// QueryFields queries the "fields" edge of the Item entity.
func (i *Item) QueryFields() *ItemFieldQuery {
- return (&ItemClient{config: i.config}).QueryFields(i)
+ return NewItemClient(i.config).QueryFields(i)
+}
+
+// QueryMaintenanceEntries queries the "maintenance_entries" edge of the Item entity.
+func (i *Item) QueryMaintenanceEntries() *MaintenanceEntryQuery {
+ return NewItemClient(i.config).QueryMaintenanceEntries(i)
}
// QueryAttachments queries the "attachments" edge of the Item entity.
func (i *Item) QueryAttachments() *AttachmentQuery {
- return (&ItemClient{config: i.config}).QueryAttachments(i)
+ return NewItemClient(i.config).QueryAttachments(i)
}
// Update returns a builder for updating this Item.
// Note that you need to call Item.Unwrap() before calling this method if this Item
// was returned from a transaction, and the transaction was committed or rolled back.
func (i *Item) Update() *ItemUpdateOne {
- return (&ItemClient{config: i.config}).UpdateOne(i)
+ return NewItemClient(i.config).UpdateOne(i)
}
// Unwrap unwraps the Item entity that was returned from a transaction after it was closed,
@@ -454,6 +488,9 @@ func (i *Item) String() string {
builder.WriteString("archived=")
builder.WriteString(fmt.Sprintf("%v", i.Archived))
builder.WriteString(", ")
+ builder.WriteString("asset_id=")
+ builder.WriteString(fmt.Sprintf("%v", i.AssetID))
+ builder.WriteString(", ")
builder.WriteString("serial_number=")
builder.WriteString(i.SerialNumber)
builder.WriteString(", ")
@@ -498,9 +535,3 @@ func (i *Item) String() string {
// Items is a parsable slice of Item.
type Items []*Item
-
-func (i Items) config(cfg config) {
- for _i := range i {
- i[_i].config = cfg
- }
-}
diff --git a/backend/internal/data/ent/item/item.go b/backend/internal/data/ent/item/item.go
index c2991da..bd04679 100644
--- a/backend/internal/data/ent/item/item.go
+++ b/backend/internal/data/ent/item/item.go
@@ -5,6 +5,8 @@ package item
import (
"time"
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -31,6 +33,8 @@ const (
FieldInsured = "insured"
// FieldArchived holds the string denoting the archived field in the database.
FieldArchived = "archived"
+ // FieldAssetID holds the string denoting the asset_id field in the database.
+ FieldAssetID = "asset_id"
// FieldSerialNumber holds the string denoting the serial_number field in the database.
FieldSerialNumber = "serial_number"
// FieldModelNumber holds the string denoting the model_number field in the database.
@@ -57,22 +61,31 @@ const (
FieldSoldPrice = "sold_price"
// FieldSoldNotes holds the string denoting the sold_notes field in the database.
FieldSoldNotes = "sold_notes"
+ // EdgeGroup holds the string denoting the group edge name in mutations.
+ EdgeGroup = "group"
// EdgeParent holds the string denoting the parent edge name in mutations.
EdgeParent = "parent"
// EdgeChildren holds the string denoting the children edge name in mutations.
EdgeChildren = "children"
- // EdgeGroup holds the string denoting the group edge name in mutations.
- EdgeGroup = "group"
// EdgeLabel holds the string denoting the label edge name in mutations.
EdgeLabel = "label"
// EdgeLocation holds the string denoting the location edge name in mutations.
EdgeLocation = "location"
// EdgeFields holds the string denoting the fields edge name in mutations.
EdgeFields = "fields"
+ // EdgeMaintenanceEntries holds the string denoting the maintenance_entries edge name in mutations.
+ EdgeMaintenanceEntries = "maintenance_entries"
// EdgeAttachments holds the string denoting the attachments edge name in mutations.
EdgeAttachments = "attachments"
// Table holds the table name of the item in the database.
Table = "items"
+ // GroupTable is the table that holds the group relation/edge.
+ GroupTable = "items"
+ // GroupInverseTable is the table name for the Group entity.
+ // It exists in this package in order to avoid circular dependency with the "group" package.
+ GroupInverseTable = "groups"
+ // GroupColumn is the table column denoting the group relation/edge.
+ GroupColumn = "group_items"
// ParentTable is the table that holds the parent relation/edge.
ParentTable = "items"
// ParentColumn is the table column denoting the parent relation/edge.
@@ -81,13 +94,6 @@ const (
ChildrenTable = "items"
// ChildrenColumn is the table column denoting the children relation/edge.
ChildrenColumn = "item_children"
- // GroupTable is the table that holds the group relation/edge.
- GroupTable = "items"
- // GroupInverseTable is the table name for the Group entity.
- // It exists in this package in order to avoid circular dependency with the "group" package.
- GroupInverseTable = "groups"
- // GroupColumn is the table column denoting the group relation/edge.
- GroupColumn = "group_items"
// LabelTable is the table that holds the label relation/edge. The primary key declared below.
LabelTable = "label_items"
// LabelInverseTable is the table name for the Label entity.
@@ -107,6 +113,13 @@ const (
FieldsInverseTable = "item_fields"
// FieldsColumn is the table column denoting the fields relation/edge.
FieldsColumn = "item_fields"
+ // MaintenanceEntriesTable is the table that holds the maintenance_entries relation/edge.
+ MaintenanceEntriesTable = "maintenance_entries"
+ // MaintenanceEntriesInverseTable is the table name for the MaintenanceEntry entity.
+ // It exists in this package in order to avoid circular dependency with the "maintenanceentry" package.
+ MaintenanceEntriesInverseTable = "maintenance_entries"
+ // MaintenanceEntriesColumn is the table column denoting the maintenance_entries relation/edge.
+ MaintenanceEntriesColumn = "item_id"
// AttachmentsTable is the table that holds the attachments relation/edge.
AttachmentsTable = "attachments"
// AttachmentsInverseTable is the table name for the Attachment entity.
@@ -128,6 +141,7 @@ var Columns = []string{
FieldQuantity,
FieldInsured,
FieldArchived,
+ FieldAssetID,
FieldSerialNumber,
FieldModelNumber,
FieldManufacturer,
@@ -193,6 +207,8 @@ var (
DefaultInsured bool
// DefaultArchived holds the default value on creation for the "archived" field.
DefaultArchived bool
+ // DefaultAssetID holds the default value on creation for the "asset_id" field.
+ DefaultAssetID int
// SerialNumberValidator is a validator for the "serial_number" field. It is called by the builders before save.
SerialNumberValidator func(string) error
// ModelNumberValidator is a validator for the "model_number" field. It is called by the builders before save.
@@ -212,3 +228,273 @@ var (
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)
+
+// OrderOption defines the ordering options for the Item queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByName orders the results by the name field.
+func ByName(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldName, opts...).ToFunc()
+}
+
+// ByDescription orders the results by the description field.
+func ByDescription(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldDescription, opts...).ToFunc()
+}
+
+// ByImportRef orders the results by the import_ref field.
+func ByImportRef(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldImportRef, opts...).ToFunc()
+}
+
+// ByNotes orders the results by the notes field.
+func ByNotes(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldNotes, opts...).ToFunc()
+}
+
+// ByQuantity orders the results by the quantity field.
+func ByQuantity(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldQuantity, opts...).ToFunc()
+}
+
+// ByInsured orders the results by the insured field.
+func ByInsured(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldInsured, opts...).ToFunc()
+}
+
+// ByArchived orders the results by the archived field.
+func ByArchived(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldArchived, opts...).ToFunc()
+}
+
+// ByAssetID orders the results by the asset_id field.
+func ByAssetID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldAssetID, opts...).ToFunc()
+}
+
+// BySerialNumber orders the results by the serial_number field.
+func BySerialNumber(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldSerialNumber, opts...).ToFunc()
+}
+
+// ByModelNumber orders the results by the model_number field.
+func ByModelNumber(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldModelNumber, opts...).ToFunc()
+}
+
+// ByManufacturer orders the results by the manufacturer field.
+func ByManufacturer(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldManufacturer, opts...).ToFunc()
+}
+
+// ByLifetimeWarranty orders the results by the lifetime_warranty field.
+func ByLifetimeWarranty(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldLifetimeWarranty, opts...).ToFunc()
+}
+
+// ByWarrantyExpires orders the results by the warranty_expires field.
+func ByWarrantyExpires(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldWarrantyExpires, opts...).ToFunc()
+}
+
+// ByWarrantyDetails orders the results by the warranty_details field.
+func ByWarrantyDetails(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldWarrantyDetails, opts...).ToFunc()
+}
+
+// ByPurchaseTime orders the results by the purchase_time field.
+func ByPurchaseTime(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldPurchaseTime, opts...).ToFunc()
+}
+
+// ByPurchaseFrom orders the results by the purchase_from field.
+func ByPurchaseFrom(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldPurchaseFrom, opts...).ToFunc()
+}
+
+// ByPurchasePrice orders the results by the purchase_price field.
+func ByPurchasePrice(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldPurchasePrice, opts...).ToFunc()
+}
+
+// BySoldTime orders the results by the sold_time field.
+func BySoldTime(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldSoldTime, opts...).ToFunc()
+}
+
+// BySoldTo orders the results by the sold_to field.
+func BySoldTo(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldSoldTo, opts...).ToFunc()
+}
+
+// BySoldPrice orders the results by the sold_price field.
+func BySoldPrice(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldSoldPrice, opts...).ToFunc()
+}
+
+// BySoldNotes orders the results by the sold_notes field.
+func BySoldNotes(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldSoldNotes, opts...).ToFunc()
+}
+
+// ByGroupField orders the results by group field.
+func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
+ }
+}
+
+// ByParentField orders the results by parent field.
+func ByParentField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newParentStep(), sql.OrderByField(field, opts...))
+ }
+}
+
+// ByChildrenCount orders the results by children count.
+func ByChildrenCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newChildrenStep(), opts...)
+ }
+}
+
+// ByChildren orders the results by children terms.
+func ByChildren(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newChildrenStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+
+// ByLabelCount orders the results by label count.
+func ByLabelCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newLabelStep(), opts...)
+ }
+}
+
+// ByLabel orders the results by label terms.
+func ByLabel(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newLabelStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+
+// ByLocationField orders the results by location field.
+func ByLocationField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newLocationStep(), sql.OrderByField(field, opts...))
+ }
+}
+
+// ByFieldsCount orders the results by fields count.
+func ByFieldsCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newFieldsStep(), opts...)
+ }
+}
+
+// ByFields orders the results by fields terms.
+func ByFields(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newFieldsStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+
+// ByMaintenanceEntriesCount orders the results by maintenance_entries count.
+func ByMaintenanceEntriesCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newMaintenanceEntriesStep(), opts...)
+ }
+}
+
+// ByMaintenanceEntries orders the results by maintenance_entries terms.
+func ByMaintenanceEntries(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newMaintenanceEntriesStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+
+// ByAttachmentsCount orders the results by attachments count.
+func ByAttachmentsCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newAttachmentsStep(), opts...)
+ }
+}
+
+// ByAttachments orders the results by attachments terms.
+func ByAttachments(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newAttachmentsStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+func newGroupStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(GroupInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
+ )
+}
+func newParentStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(Table, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
+ )
+}
+func newChildrenStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(Table, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
+ )
+}
+func newLabelStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(LabelInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2M, true, LabelTable, LabelPrimaryKey...),
+ )
+}
+func newLocationStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(LocationInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, LocationTable, LocationColumn),
+ )
+}
+func newFieldsStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(FieldsInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, FieldsTable, FieldsColumn),
+ )
+}
+func newMaintenanceEntriesStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(MaintenanceEntriesInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, MaintenanceEntriesTable, MaintenanceEntriesColumn),
+ )
+}
+func newAttachmentsStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(AttachmentsInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn),
+ )
+}
diff --git a/backend/internal/data/ent/item/where.go b/backend/internal/data/ent/item/where.go
index 2897e35..7504e6a 100644
--- a/backend/internal/data/ent/item/where.go
+++ b/backend/internal/data/ent/item/where.go
@@ -13,2051 +13,1419 @@ import (
// ID filters vertices based on their ID field.
func ID(id uuid.UUID) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.Item(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id uuid.UUID) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.Item(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id uuid.UUID) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldID), id))
- })
+ return predicate.Item(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...uuid.UUID) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.In(s.C(FieldID), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...uuid.UUID) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.NotIn(s.C(FieldID), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id uuid.UUID) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldID), id))
- })
+ return predicate.Item(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id uuid.UUID) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldID), id))
- })
+ return predicate.Item(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id uuid.UUID) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldID), id))
- })
+ return predicate.Item(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id uuid.UUID) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldID), id))
- })
+ return predicate.Item(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldUpdatedAt, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldName), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldName, v))
}
// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ.
func Description(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldDescription), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldDescription, v))
}
// ImportRef applies equality check predicate on the "import_ref" field. It's identical to ImportRefEQ.
func ImportRef(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldImportRef), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldImportRef, v))
}
// Notes applies equality check predicate on the "notes" field. It's identical to NotesEQ.
func Notes(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldNotes), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldNotes, v))
}
// Quantity applies equality check predicate on the "quantity" field. It's identical to QuantityEQ.
func Quantity(v int) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldQuantity), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldQuantity, v))
}
// Insured applies equality check predicate on the "insured" field. It's identical to InsuredEQ.
func Insured(v bool) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldInsured), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldInsured, v))
}
// Archived applies equality check predicate on the "archived" field. It's identical to ArchivedEQ.
func Archived(v bool) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldArchived), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldArchived, v))
+}
+
+// AssetID applies equality check predicate on the "asset_id" field. It's identical to AssetIDEQ.
+func AssetID(v int) predicate.Item {
+ return predicate.Item(sql.FieldEQ(FieldAssetID, v))
}
// SerialNumber applies equality check predicate on the "serial_number" field. It's identical to SerialNumberEQ.
func SerialNumber(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldSerialNumber), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldSerialNumber, v))
}
// ModelNumber applies equality check predicate on the "model_number" field. It's identical to ModelNumberEQ.
func ModelNumber(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldModelNumber), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldModelNumber, v))
}
// Manufacturer applies equality check predicate on the "manufacturer" field. It's identical to ManufacturerEQ.
func Manufacturer(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldManufacturer), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldManufacturer, v))
}
// LifetimeWarranty applies equality check predicate on the "lifetime_warranty" field. It's identical to LifetimeWarrantyEQ.
func LifetimeWarranty(v bool) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldLifetimeWarranty), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldLifetimeWarranty, v))
}
// WarrantyExpires applies equality check predicate on the "warranty_expires" field. It's identical to WarrantyExpiresEQ.
func WarrantyExpires(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldWarrantyExpires), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldWarrantyExpires, v))
}
// WarrantyDetails applies equality check predicate on the "warranty_details" field. It's identical to WarrantyDetailsEQ.
func WarrantyDetails(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldWarrantyDetails), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldWarrantyDetails, v))
}
// PurchaseTime applies equality check predicate on the "purchase_time" field. It's identical to PurchaseTimeEQ.
func PurchaseTime(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldPurchaseTime), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldPurchaseTime, v))
}
// PurchaseFrom applies equality check predicate on the "purchase_from" field. It's identical to PurchaseFromEQ.
func PurchaseFrom(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldPurchaseFrom), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldPurchaseFrom, v))
}
// PurchasePrice applies equality check predicate on the "purchase_price" field. It's identical to PurchasePriceEQ.
func PurchasePrice(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldPurchasePrice), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldPurchasePrice, v))
}
// SoldTime applies equality check predicate on the "sold_time" field. It's identical to SoldTimeEQ.
func SoldTime(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldSoldTime), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldSoldTime, v))
}
// SoldTo applies equality check predicate on the "sold_to" field. It's identical to SoldToEQ.
func SoldTo(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldSoldTo), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldSoldTo, v))
}
// SoldPrice applies equality check predicate on the "sold_price" field. It's identical to SoldPriceEQ.
func SoldPrice(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldSoldPrice), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldSoldPrice, v))
}
// SoldNotes applies equality check predicate on the "sold_notes" field. It's identical to SoldNotesEQ.
func SoldNotes(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldSoldNotes), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldSoldNotes, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldCreatedAt), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldCreatedAt), v))
- })
+ return predicate.Item(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldCreatedAt), v))
- })
+ return predicate.Item(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Item(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Item(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldUpdatedAt, v))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldName), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldName), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldName), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldName), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldName), v))
- })
+ return predicate.Item(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldName), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldName), v))
- })
+ return predicate.Item(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldName), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldName), v))
- })
+ return predicate.Item(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldName), v))
- })
+ return predicate.Item(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldName), v))
- })
+ return predicate.Item(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldName), v))
- })
+ return predicate.Item(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldName), v))
- })
+ return predicate.Item(sql.FieldContainsFold(FieldName, v))
}
// DescriptionEQ applies the EQ predicate on the "description" field.
func DescriptionEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldDescription), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldDescription, v))
}
// DescriptionNEQ applies the NEQ predicate on the "description" field.
func DescriptionNEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldDescription), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldDescription, v))
}
// DescriptionIn applies the In predicate on the "description" field.
func DescriptionIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldDescription), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldDescription, vs...))
}
// DescriptionNotIn applies the NotIn predicate on the "description" field.
func DescriptionNotIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldDescription), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldDescription, vs...))
}
// DescriptionGT applies the GT predicate on the "description" field.
func DescriptionGT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldDescription), v))
- })
+ return predicate.Item(sql.FieldGT(FieldDescription, v))
}
// DescriptionGTE applies the GTE predicate on the "description" field.
func DescriptionGTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldDescription), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldDescription, v))
}
// DescriptionLT applies the LT predicate on the "description" field.
func DescriptionLT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldDescription), v))
- })
+ return predicate.Item(sql.FieldLT(FieldDescription, v))
}
// DescriptionLTE applies the LTE predicate on the "description" field.
func DescriptionLTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldDescription), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldDescription, v))
}
// DescriptionContains applies the Contains predicate on the "description" field.
func DescriptionContains(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldDescription), v))
- })
+ return predicate.Item(sql.FieldContains(FieldDescription, v))
}
// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field.
func DescriptionHasPrefix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldDescription), v))
- })
+ return predicate.Item(sql.FieldHasPrefix(FieldDescription, v))
}
// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field.
func DescriptionHasSuffix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldDescription), v))
- })
+ return predicate.Item(sql.FieldHasSuffix(FieldDescription, v))
}
// DescriptionIsNil applies the IsNil predicate on the "description" field.
func DescriptionIsNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldDescription)))
- })
+ return predicate.Item(sql.FieldIsNull(FieldDescription))
}
// DescriptionNotNil applies the NotNil predicate on the "description" field.
func DescriptionNotNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldDescription)))
- })
+ return predicate.Item(sql.FieldNotNull(FieldDescription))
}
// DescriptionEqualFold applies the EqualFold predicate on the "description" field.
func DescriptionEqualFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldDescription), v))
- })
+ return predicate.Item(sql.FieldEqualFold(FieldDescription, v))
}
// DescriptionContainsFold applies the ContainsFold predicate on the "description" field.
func DescriptionContainsFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldDescription), v))
- })
+ return predicate.Item(sql.FieldContainsFold(FieldDescription, v))
}
// ImportRefEQ applies the EQ predicate on the "import_ref" field.
func ImportRefEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldImportRef), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldImportRef, v))
}
// ImportRefNEQ applies the NEQ predicate on the "import_ref" field.
func ImportRefNEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldImportRef), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldImportRef, v))
}
// ImportRefIn applies the In predicate on the "import_ref" field.
func ImportRefIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldImportRef), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldImportRef, vs...))
}
// ImportRefNotIn applies the NotIn predicate on the "import_ref" field.
func ImportRefNotIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldImportRef), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldImportRef, vs...))
}
// ImportRefGT applies the GT predicate on the "import_ref" field.
func ImportRefGT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldImportRef), v))
- })
+ return predicate.Item(sql.FieldGT(FieldImportRef, v))
}
// ImportRefGTE applies the GTE predicate on the "import_ref" field.
func ImportRefGTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldImportRef), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldImportRef, v))
}
// ImportRefLT applies the LT predicate on the "import_ref" field.
func ImportRefLT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldImportRef), v))
- })
+ return predicate.Item(sql.FieldLT(FieldImportRef, v))
}
// ImportRefLTE applies the LTE predicate on the "import_ref" field.
func ImportRefLTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldImportRef), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldImportRef, v))
}
// ImportRefContains applies the Contains predicate on the "import_ref" field.
func ImportRefContains(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldImportRef), v))
- })
+ return predicate.Item(sql.FieldContains(FieldImportRef, v))
}
// ImportRefHasPrefix applies the HasPrefix predicate on the "import_ref" field.
func ImportRefHasPrefix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldImportRef), v))
- })
+ return predicate.Item(sql.FieldHasPrefix(FieldImportRef, v))
}
// ImportRefHasSuffix applies the HasSuffix predicate on the "import_ref" field.
func ImportRefHasSuffix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldImportRef), v))
- })
+ return predicate.Item(sql.FieldHasSuffix(FieldImportRef, v))
}
// ImportRefIsNil applies the IsNil predicate on the "import_ref" field.
func ImportRefIsNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldImportRef)))
- })
+ return predicate.Item(sql.FieldIsNull(FieldImportRef))
}
// ImportRefNotNil applies the NotNil predicate on the "import_ref" field.
func ImportRefNotNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldImportRef)))
- })
+ return predicate.Item(sql.FieldNotNull(FieldImportRef))
}
// ImportRefEqualFold applies the EqualFold predicate on the "import_ref" field.
func ImportRefEqualFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldImportRef), v))
- })
+ return predicate.Item(sql.FieldEqualFold(FieldImportRef, v))
}
// ImportRefContainsFold applies the ContainsFold predicate on the "import_ref" field.
func ImportRefContainsFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldImportRef), v))
- })
+ return predicate.Item(sql.FieldContainsFold(FieldImportRef, v))
}
// NotesEQ applies the EQ predicate on the "notes" field.
func NotesEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldNotes), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldNotes, v))
}
// NotesNEQ applies the NEQ predicate on the "notes" field.
func NotesNEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldNotes), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldNotes, v))
}
// NotesIn applies the In predicate on the "notes" field.
func NotesIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldNotes), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldNotes, vs...))
}
// NotesNotIn applies the NotIn predicate on the "notes" field.
func NotesNotIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldNotes), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldNotes, vs...))
}
// NotesGT applies the GT predicate on the "notes" field.
func NotesGT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldNotes), v))
- })
+ return predicate.Item(sql.FieldGT(FieldNotes, v))
}
// NotesGTE applies the GTE predicate on the "notes" field.
func NotesGTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldNotes), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldNotes, v))
}
// NotesLT applies the LT predicate on the "notes" field.
func NotesLT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldNotes), v))
- })
+ return predicate.Item(sql.FieldLT(FieldNotes, v))
}
// NotesLTE applies the LTE predicate on the "notes" field.
func NotesLTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldNotes), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldNotes, v))
}
// NotesContains applies the Contains predicate on the "notes" field.
func NotesContains(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldNotes), v))
- })
+ return predicate.Item(sql.FieldContains(FieldNotes, v))
}
// NotesHasPrefix applies the HasPrefix predicate on the "notes" field.
func NotesHasPrefix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldNotes), v))
- })
+ return predicate.Item(sql.FieldHasPrefix(FieldNotes, v))
}
// NotesHasSuffix applies the HasSuffix predicate on the "notes" field.
func NotesHasSuffix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldNotes), v))
- })
+ return predicate.Item(sql.FieldHasSuffix(FieldNotes, v))
}
// NotesIsNil applies the IsNil predicate on the "notes" field.
func NotesIsNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldNotes)))
- })
+ return predicate.Item(sql.FieldIsNull(FieldNotes))
}
// NotesNotNil applies the NotNil predicate on the "notes" field.
func NotesNotNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldNotes)))
- })
+ return predicate.Item(sql.FieldNotNull(FieldNotes))
}
// NotesEqualFold applies the EqualFold predicate on the "notes" field.
func NotesEqualFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldNotes), v))
- })
+ return predicate.Item(sql.FieldEqualFold(FieldNotes, v))
}
// NotesContainsFold applies the ContainsFold predicate on the "notes" field.
func NotesContainsFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldNotes), v))
- })
+ return predicate.Item(sql.FieldContainsFold(FieldNotes, v))
}
// QuantityEQ applies the EQ predicate on the "quantity" field.
func QuantityEQ(v int) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldQuantity), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldQuantity, v))
}
// QuantityNEQ applies the NEQ predicate on the "quantity" field.
func QuantityNEQ(v int) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldQuantity), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldQuantity, v))
}
// QuantityIn applies the In predicate on the "quantity" field.
func QuantityIn(vs ...int) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldQuantity), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldQuantity, vs...))
}
// QuantityNotIn applies the NotIn predicate on the "quantity" field.
func QuantityNotIn(vs ...int) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldQuantity), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldQuantity, vs...))
}
// QuantityGT applies the GT predicate on the "quantity" field.
func QuantityGT(v int) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldQuantity), v))
- })
+ return predicate.Item(sql.FieldGT(FieldQuantity, v))
}
// QuantityGTE applies the GTE predicate on the "quantity" field.
func QuantityGTE(v int) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldQuantity), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldQuantity, v))
}
// QuantityLT applies the LT predicate on the "quantity" field.
func QuantityLT(v int) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldQuantity), v))
- })
+ return predicate.Item(sql.FieldLT(FieldQuantity, v))
}
// QuantityLTE applies the LTE predicate on the "quantity" field.
func QuantityLTE(v int) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldQuantity), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldQuantity, v))
}
// InsuredEQ applies the EQ predicate on the "insured" field.
func InsuredEQ(v bool) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldInsured), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldInsured, v))
}
// InsuredNEQ applies the NEQ predicate on the "insured" field.
func InsuredNEQ(v bool) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldInsured), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldInsured, v))
}
// ArchivedEQ applies the EQ predicate on the "archived" field.
func ArchivedEQ(v bool) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldArchived), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldArchived, v))
}
// ArchivedNEQ applies the NEQ predicate on the "archived" field.
func ArchivedNEQ(v bool) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldArchived), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldArchived, v))
+}
+
+// AssetIDEQ applies the EQ predicate on the "asset_id" field.
+func AssetIDEQ(v int) predicate.Item {
+ return predicate.Item(sql.FieldEQ(FieldAssetID, v))
+}
+
+// AssetIDNEQ applies the NEQ predicate on the "asset_id" field.
+func AssetIDNEQ(v int) predicate.Item {
+ return predicate.Item(sql.FieldNEQ(FieldAssetID, v))
+}
+
+// AssetIDIn applies the In predicate on the "asset_id" field.
+func AssetIDIn(vs ...int) predicate.Item {
+ return predicate.Item(sql.FieldIn(FieldAssetID, vs...))
+}
+
+// AssetIDNotIn applies the NotIn predicate on the "asset_id" field.
+func AssetIDNotIn(vs ...int) predicate.Item {
+ return predicate.Item(sql.FieldNotIn(FieldAssetID, vs...))
+}
+
+// AssetIDGT applies the GT predicate on the "asset_id" field.
+func AssetIDGT(v int) predicate.Item {
+ return predicate.Item(sql.FieldGT(FieldAssetID, v))
+}
+
+// AssetIDGTE applies the GTE predicate on the "asset_id" field.
+func AssetIDGTE(v int) predicate.Item {
+ return predicate.Item(sql.FieldGTE(FieldAssetID, v))
+}
+
+// AssetIDLT applies the LT predicate on the "asset_id" field.
+func AssetIDLT(v int) predicate.Item {
+ return predicate.Item(sql.FieldLT(FieldAssetID, v))
+}
+
+// AssetIDLTE applies the LTE predicate on the "asset_id" field.
+func AssetIDLTE(v int) predicate.Item {
+ return predicate.Item(sql.FieldLTE(FieldAssetID, v))
}
// SerialNumberEQ applies the EQ predicate on the "serial_number" field.
func SerialNumberEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldSerialNumber), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldSerialNumber, v))
}
// SerialNumberNEQ applies the NEQ predicate on the "serial_number" field.
func SerialNumberNEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldSerialNumber), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldSerialNumber, v))
}
// SerialNumberIn applies the In predicate on the "serial_number" field.
func SerialNumberIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldSerialNumber), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldSerialNumber, vs...))
}
// SerialNumberNotIn applies the NotIn predicate on the "serial_number" field.
func SerialNumberNotIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldSerialNumber), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldSerialNumber, vs...))
}
// SerialNumberGT applies the GT predicate on the "serial_number" field.
func SerialNumberGT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldSerialNumber), v))
- })
+ return predicate.Item(sql.FieldGT(FieldSerialNumber, v))
}
// SerialNumberGTE applies the GTE predicate on the "serial_number" field.
func SerialNumberGTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldSerialNumber), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldSerialNumber, v))
}
// SerialNumberLT applies the LT predicate on the "serial_number" field.
func SerialNumberLT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldSerialNumber), v))
- })
+ return predicate.Item(sql.FieldLT(FieldSerialNumber, v))
}
// SerialNumberLTE applies the LTE predicate on the "serial_number" field.
func SerialNumberLTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldSerialNumber), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldSerialNumber, v))
}
// SerialNumberContains applies the Contains predicate on the "serial_number" field.
func SerialNumberContains(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldSerialNumber), v))
- })
+ return predicate.Item(sql.FieldContains(FieldSerialNumber, v))
}
// SerialNumberHasPrefix applies the HasPrefix predicate on the "serial_number" field.
func SerialNumberHasPrefix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldSerialNumber), v))
- })
+ return predicate.Item(sql.FieldHasPrefix(FieldSerialNumber, v))
}
// SerialNumberHasSuffix applies the HasSuffix predicate on the "serial_number" field.
func SerialNumberHasSuffix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldSerialNumber), v))
- })
+ return predicate.Item(sql.FieldHasSuffix(FieldSerialNumber, v))
}
// SerialNumberIsNil applies the IsNil predicate on the "serial_number" field.
func SerialNumberIsNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldSerialNumber)))
- })
+ return predicate.Item(sql.FieldIsNull(FieldSerialNumber))
}
// SerialNumberNotNil applies the NotNil predicate on the "serial_number" field.
func SerialNumberNotNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldSerialNumber)))
- })
+ return predicate.Item(sql.FieldNotNull(FieldSerialNumber))
}
// SerialNumberEqualFold applies the EqualFold predicate on the "serial_number" field.
func SerialNumberEqualFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldSerialNumber), v))
- })
+ return predicate.Item(sql.FieldEqualFold(FieldSerialNumber, v))
}
// SerialNumberContainsFold applies the ContainsFold predicate on the "serial_number" field.
func SerialNumberContainsFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldSerialNumber), v))
- })
+ return predicate.Item(sql.FieldContainsFold(FieldSerialNumber, v))
}
// ModelNumberEQ applies the EQ predicate on the "model_number" field.
func ModelNumberEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldModelNumber), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldModelNumber, v))
}
// ModelNumberNEQ applies the NEQ predicate on the "model_number" field.
func ModelNumberNEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldModelNumber), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldModelNumber, v))
}
// ModelNumberIn applies the In predicate on the "model_number" field.
func ModelNumberIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldModelNumber), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldModelNumber, vs...))
}
// ModelNumberNotIn applies the NotIn predicate on the "model_number" field.
func ModelNumberNotIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldModelNumber), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldModelNumber, vs...))
}
// ModelNumberGT applies the GT predicate on the "model_number" field.
func ModelNumberGT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldModelNumber), v))
- })
+ return predicate.Item(sql.FieldGT(FieldModelNumber, v))
}
// ModelNumberGTE applies the GTE predicate on the "model_number" field.
func ModelNumberGTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldModelNumber), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldModelNumber, v))
}
// ModelNumberLT applies the LT predicate on the "model_number" field.
func ModelNumberLT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldModelNumber), v))
- })
+ return predicate.Item(sql.FieldLT(FieldModelNumber, v))
}
// ModelNumberLTE applies the LTE predicate on the "model_number" field.
func ModelNumberLTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldModelNumber), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldModelNumber, v))
}
// ModelNumberContains applies the Contains predicate on the "model_number" field.
func ModelNumberContains(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldModelNumber), v))
- })
+ return predicate.Item(sql.FieldContains(FieldModelNumber, v))
}
// ModelNumberHasPrefix applies the HasPrefix predicate on the "model_number" field.
func ModelNumberHasPrefix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldModelNumber), v))
- })
+ return predicate.Item(sql.FieldHasPrefix(FieldModelNumber, v))
}
// ModelNumberHasSuffix applies the HasSuffix predicate on the "model_number" field.
func ModelNumberHasSuffix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldModelNumber), v))
- })
+ return predicate.Item(sql.FieldHasSuffix(FieldModelNumber, v))
}
// ModelNumberIsNil applies the IsNil predicate on the "model_number" field.
func ModelNumberIsNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldModelNumber)))
- })
+ return predicate.Item(sql.FieldIsNull(FieldModelNumber))
}
// ModelNumberNotNil applies the NotNil predicate on the "model_number" field.
func ModelNumberNotNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldModelNumber)))
- })
+ return predicate.Item(sql.FieldNotNull(FieldModelNumber))
}
// ModelNumberEqualFold applies the EqualFold predicate on the "model_number" field.
func ModelNumberEqualFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldModelNumber), v))
- })
+ return predicate.Item(sql.FieldEqualFold(FieldModelNumber, v))
}
// ModelNumberContainsFold applies the ContainsFold predicate on the "model_number" field.
func ModelNumberContainsFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldModelNumber), v))
- })
+ return predicate.Item(sql.FieldContainsFold(FieldModelNumber, v))
}
// ManufacturerEQ applies the EQ predicate on the "manufacturer" field.
func ManufacturerEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldManufacturer), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldManufacturer, v))
}
// ManufacturerNEQ applies the NEQ predicate on the "manufacturer" field.
func ManufacturerNEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldManufacturer), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldManufacturer, v))
}
// ManufacturerIn applies the In predicate on the "manufacturer" field.
func ManufacturerIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldManufacturer), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldManufacturer, vs...))
}
// ManufacturerNotIn applies the NotIn predicate on the "manufacturer" field.
func ManufacturerNotIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldManufacturer), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldManufacturer, vs...))
}
// ManufacturerGT applies the GT predicate on the "manufacturer" field.
func ManufacturerGT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldManufacturer), v))
- })
+ return predicate.Item(sql.FieldGT(FieldManufacturer, v))
}
// ManufacturerGTE applies the GTE predicate on the "manufacturer" field.
func ManufacturerGTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldManufacturer), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldManufacturer, v))
}
// ManufacturerLT applies the LT predicate on the "manufacturer" field.
func ManufacturerLT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldManufacturer), v))
- })
+ return predicate.Item(sql.FieldLT(FieldManufacturer, v))
}
// ManufacturerLTE applies the LTE predicate on the "manufacturer" field.
func ManufacturerLTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldManufacturer), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldManufacturer, v))
}
// ManufacturerContains applies the Contains predicate on the "manufacturer" field.
func ManufacturerContains(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldManufacturer), v))
- })
+ return predicate.Item(sql.FieldContains(FieldManufacturer, v))
}
// ManufacturerHasPrefix applies the HasPrefix predicate on the "manufacturer" field.
func ManufacturerHasPrefix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldManufacturer), v))
- })
+ return predicate.Item(sql.FieldHasPrefix(FieldManufacturer, v))
}
// ManufacturerHasSuffix applies the HasSuffix predicate on the "manufacturer" field.
func ManufacturerHasSuffix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldManufacturer), v))
- })
+ return predicate.Item(sql.FieldHasSuffix(FieldManufacturer, v))
}
// ManufacturerIsNil applies the IsNil predicate on the "manufacturer" field.
func ManufacturerIsNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldManufacturer)))
- })
+ return predicate.Item(sql.FieldIsNull(FieldManufacturer))
}
// ManufacturerNotNil applies the NotNil predicate on the "manufacturer" field.
func ManufacturerNotNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldManufacturer)))
- })
+ return predicate.Item(sql.FieldNotNull(FieldManufacturer))
}
// ManufacturerEqualFold applies the EqualFold predicate on the "manufacturer" field.
func ManufacturerEqualFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldManufacturer), v))
- })
+ return predicate.Item(sql.FieldEqualFold(FieldManufacturer, v))
}
// ManufacturerContainsFold applies the ContainsFold predicate on the "manufacturer" field.
func ManufacturerContainsFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldManufacturer), v))
- })
+ return predicate.Item(sql.FieldContainsFold(FieldManufacturer, v))
}
// LifetimeWarrantyEQ applies the EQ predicate on the "lifetime_warranty" field.
func LifetimeWarrantyEQ(v bool) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldLifetimeWarranty), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldLifetimeWarranty, v))
}
// LifetimeWarrantyNEQ applies the NEQ predicate on the "lifetime_warranty" field.
func LifetimeWarrantyNEQ(v bool) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldLifetimeWarranty), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldLifetimeWarranty, v))
}
// WarrantyExpiresEQ applies the EQ predicate on the "warranty_expires" field.
func WarrantyExpiresEQ(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldWarrantyExpires), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldWarrantyExpires, v))
}
// WarrantyExpiresNEQ applies the NEQ predicate on the "warranty_expires" field.
func WarrantyExpiresNEQ(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldWarrantyExpires), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldWarrantyExpires, v))
}
// WarrantyExpiresIn applies the In predicate on the "warranty_expires" field.
func WarrantyExpiresIn(vs ...time.Time) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldWarrantyExpires), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldWarrantyExpires, vs...))
}
// WarrantyExpiresNotIn applies the NotIn predicate on the "warranty_expires" field.
func WarrantyExpiresNotIn(vs ...time.Time) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldWarrantyExpires), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldWarrantyExpires, vs...))
}
// WarrantyExpiresGT applies the GT predicate on the "warranty_expires" field.
func WarrantyExpiresGT(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldWarrantyExpires), v))
- })
+ return predicate.Item(sql.FieldGT(FieldWarrantyExpires, v))
}
// WarrantyExpiresGTE applies the GTE predicate on the "warranty_expires" field.
func WarrantyExpiresGTE(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldWarrantyExpires), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldWarrantyExpires, v))
}
// WarrantyExpiresLT applies the LT predicate on the "warranty_expires" field.
func WarrantyExpiresLT(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldWarrantyExpires), v))
- })
+ return predicate.Item(sql.FieldLT(FieldWarrantyExpires, v))
}
// WarrantyExpiresLTE applies the LTE predicate on the "warranty_expires" field.
func WarrantyExpiresLTE(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldWarrantyExpires), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldWarrantyExpires, v))
}
// WarrantyExpiresIsNil applies the IsNil predicate on the "warranty_expires" field.
func WarrantyExpiresIsNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldWarrantyExpires)))
- })
+ return predicate.Item(sql.FieldIsNull(FieldWarrantyExpires))
}
// WarrantyExpiresNotNil applies the NotNil predicate on the "warranty_expires" field.
func WarrantyExpiresNotNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldWarrantyExpires)))
- })
+ return predicate.Item(sql.FieldNotNull(FieldWarrantyExpires))
}
// WarrantyDetailsEQ applies the EQ predicate on the "warranty_details" field.
func WarrantyDetailsEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldWarrantyDetails), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldWarrantyDetails, v))
}
// WarrantyDetailsNEQ applies the NEQ predicate on the "warranty_details" field.
func WarrantyDetailsNEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldWarrantyDetails), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldWarrantyDetails, v))
}
// WarrantyDetailsIn applies the In predicate on the "warranty_details" field.
func WarrantyDetailsIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldWarrantyDetails), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldWarrantyDetails, vs...))
}
// WarrantyDetailsNotIn applies the NotIn predicate on the "warranty_details" field.
func WarrantyDetailsNotIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldWarrantyDetails), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldWarrantyDetails, vs...))
}
// WarrantyDetailsGT applies the GT predicate on the "warranty_details" field.
func WarrantyDetailsGT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldWarrantyDetails), v))
- })
+ return predicate.Item(sql.FieldGT(FieldWarrantyDetails, v))
}
// WarrantyDetailsGTE applies the GTE predicate on the "warranty_details" field.
func WarrantyDetailsGTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldWarrantyDetails), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldWarrantyDetails, v))
}
// WarrantyDetailsLT applies the LT predicate on the "warranty_details" field.
func WarrantyDetailsLT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldWarrantyDetails), v))
- })
+ return predicate.Item(sql.FieldLT(FieldWarrantyDetails, v))
}
// WarrantyDetailsLTE applies the LTE predicate on the "warranty_details" field.
func WarrantyDetailsLTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldWarrantyDetails), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldWarrantyDetails, v))
}
// WarrantyDetailsContains applies the Contains predicate on the "warranty_details" field.
func WarrantyDetailsContains(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldWarrantyDetails), v))
- })
+ return predicate.Item(sql.FieldContains(FieldWarrantyDetails, v))
}
// WarrantyDetailsHasPrefix applies the HasPrefix predicate on the "warranty_details" field.
func WarrantyDetailsHasPrefix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldWarrantyDetails), v))
- })
+ return predicate.Item(sql.FieldHasPrefix(FieldWarrantyDetails, v))
}
// WarrantyDetailsHasSuffix applies the HasSuffix predicate on the "warranty_details" field.
func WarrantyDetailsHasSuffix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldWarrantyDetails), v))
- })
+ return predicate.Item(sql.FieldHasSuffix(FieldWarrantyDetails, v))
}
// WarrantyDetailsIsNil applies the IsNil predicate on the "warranty_details" field.
func WarrantyDetailsIsNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldWarrantyDetails)))
- })
+ return predicate.Item(sql.FieldIsNull(FieldWarrantyDetails))
}
// WarrantyDetailsNotNil applies the NotNil predicate on the "warranty_details" field.
func WarrantyDetailsNotNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldWarrantyDetails)))
- })
+ return predicate.Item(sql.FieldNotNull(FieldWarrantyDetails))
}
// WarrantyDetailsEqualFold applies the EqualFold predicate on the "warranty_details" field.
func WarrantyDetailsEqualFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldWarrantyDetails), v))
- })
+ return predicate.Item(sql.FieldEqualFold(FieldWarrantyDetails, v))
}
// WarrantyDetailsContainsFold applies the ContainsFold predicate on the "warranty_details" field.
func WarrantyDetailsContainsFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldWarrantyDetails), v))
- })
+ return predicate.Item(sql.FieldContainsFold(FieldWarrantyDetails, v))
}
// PurchaseTimeEQ applies the EQ predicate on the "purchase_time" field.
func PurchaseTimeEQ(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldPurchaseTime), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldPurchaseTime, v))
}
// PurchaseTimeNEQ applies the NEQ predicate on the "purchase_time" field.
func PurchaseTimeNEQ(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldPurchaseTime), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldPurchaseTime, v))
}
// PurchaseTimeIn applies the In predicate on the "purchase_time" field.
func PurchaseTimeIn(vs ...time.Time) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldPurchaseTime), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldPurchaseTime, vs...))
}
// PurchaseTimeNotIn applies the NotIn predicate on the "purchase_time" field.
func PurchaseTimeNotIn(vs ...time.Time) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldPurchaseTime), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldPurchaseTime, vs...))
}
// PurchaseTimeGT applies the GT predicate on the "purchase_time" field.
func PurchaseTimeGT(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldPurchaseTime), v))
- })
+ return predicate.Item(sql.FieldGT(FieldPurchaseTime, v))
}
// PurchaseTimeGTE applies the GTE predicate on the "purchase_time" field.
func PurchaseTimeGTE(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldPurchaseTime), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldPurchaseTime, v))
}
// PurchaseTimeLT applies the LT predicate on the "purchase_time" field.
func PurchaseTimeLT(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldPurchaseTime), v))
- })
+ return predicate.Item(sql.FieldLT(FieldPurchaseTime, v))
}
// PurchaseTimeLTE applies the LTE predicate on the "purchase_time" field.
func PurchaseTimeLTE(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldPurchaseTime), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldPurchaseTime, v))
}
// PurchaseTimeIsNil applies the IsNil predicate on the "purchase_time" field.
func PurchaseTimeIsNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldPurchaseTime)))
- })
+ return predicate.Item(sql.FieldIsNull(FieldPurchaseTime))
}
// PurchaseTimeNotNil applies the NotNil predicate on the "purchase_time" field.
func PurchaseTimeNotNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldPurchaseTime)))
- })
+ return predicate.Item(sql.FieldNotNull(FieldPurchaseTime))
}
// PurchaseFromEQ applies the EQ predicate on the "purchase_from" field.
func PurchaseFromEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldPurchaseFrom), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldPurchaseFrom, v))
}
// PurchaseFromNEQ applies the NEQ predicate on the "purchase_from" field.
func PurchaseFromNEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldPurchaseFrom), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldPurchaseFrom, v))
}
// PurchaseFromIn applies the In predicate on the "purchase_from" field.
func PurchaseFromIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldPurchaseFrom), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldPurchaseFrom, vs...))
}
// PurchaseFromNotIn applies the NotIn predicate on the "purchase_from" field.
func PurchaseFromNotIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldPurchaseFrom), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldPurchaseFrom, vs...))
}
// PurchaseFromGT applies the GT predicate on the "purchase_from" field.
func PurchaseFromGT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldPurchaseFrom), v))
- })
+ return predicate.Item(sql.FieldGT(FieldPurchaseFrom, v))
}
// PurchaseFromGTE applies the GTE predicate on the "purchase_from" field.
func PurchaseFromGTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldPurchaseFrom), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldPurchaseFrom, v))
}
// PurchaseFromLT applies the LT predicate on the "purchase_from" field.
func PurchaseFromLT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldPurchaseFrom), v))
- })
+ return predicate.Item(sql.FieldLT(FieldPurchaseFrom, v))
}
// PurchaseFromLTE applies the LTE predicate on the "purchase_from" field.
func PurchaseFromLTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldPurchaseFrom), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldPurchaseFrom, v))
}
// PurchaseFromContains applies the Contains predicate on the "purchase_from" field.
func PurchaseFromContains(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldPurchaseFrom), v))
- })
+ return predicate.Item(sql.FieldContains(FieldPurchaseFrom, v))
}
// PurchaseFromHasPrefix applies the HasPrefix predicate on the "purchase_from" field.
func PurchaseFromHasPrefix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldPurchaseFrom), v))
- })
+ return predicate.Item(sql.FieldHasPrefix(FieldPurchaseFrom, v))
}
// PurchaseFromHasSuffix applies the HasSuffix predicate on the "purchase_from" field.
func PurchaseFromHasSuffix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldPurchaseFrom), v))
- })
+ return predicate.Item(sql.FieldHasSuffix(FieldPurchaseFrom, v))
}
// PurchaseFromIsNil applies the IsNil predicate on the "purchase_from" field.
func PurchaseFromIsNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldPurchaseFrom)))
- })
+ return predicate.Item(sql.FieldIsNull(FieldPurchaseFrom))
}
// PurchaseFromNotNil applies the NotNil predicate on the "purchase_from" field.
func PurchaseFromNotNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldPurchaseFrom)))
- })
+ return predicate.Item(sql.FieldNotNull(FieldPurchaseFrom))
}
// PurchaseFromEqualFold applies the EqualFold predicate on the "purchase_from" field.
func PurchaseFromEqualFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldPurchaseFrom), v))
- })
+ return predicate.Item(sql.FieldEqualFold(FieldPurchaseFrom, v))
}
// PurchaseFromContainsFold applies the ContainsFold predicate on the "purchase_from" field.
func PurchaseFromContainsFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldPurchaseFrom), v))
- })
+ return predicate.Item(sql.FieldContainsFold(FieldPurchaseFrom, v))
}
// PurchasePriceEQ applies the EQ predicate on the "purchase_price" field.
func PurchasePriceEQ(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldPurchasePrice), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldPurchasePrice, v))
}
// PurchasePriceNEQ applies the NEQ predicate on the "purchase_price" field.
func PurchasePriceNEQ(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldPurchasePrice), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldPurchasePrice, v))
}
// PurchasePriceIn applies the In predicate on the "purchase_price" field.
func PurchasePriceIn(vs ...float64) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldPurchasePrice), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldPurchasePrice, vs...))
}
// PurchasePriceNotIn applies the NotIn predicate on the "purchase_price" field.
func PurchasePriceNotIn(vs ...float64) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldPurchasePrice), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldPurchasePrice, vs...))
}
// PurchasePriceGT applies the GT predicate on the "purchase_price" field.
func PurchasePriceGT(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldPurchasePrice), v))
- })
+ return predicate.Item(sql.FieldGT(FieldPurchasePrice, v))
}
// PurchasePriceGTE applies the GTE predicate on the "purchase_price" field.
func PurchasePriceGTE(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldPurchasePrice), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldPurchasePrice, v))
}
// PurchasePriceLT applies the LT predicate on the "purchase_price" field.
func PurchasePriceLT(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldPurchasePrice), v))
- })
+ return predicate.Item(sql.FieldLT(FieldPurchasePrice, v))
}
// PurchasePriceLTE applies the LTE predicate on the "purchase_price" field.
func PurchasePriceLTE(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldPurchasePrice), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldPurchasePrice, v))
}
// SoldTimeEQ applies the EQ predicate on the "sold_time" field.
func SoldTimeEQ(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldSoldTime), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldSoldTime, v))
}
// SoldTimeNEQ applies the NEQ predicate on the "sold_time" field.
func SoldTimeNEQ(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldSoldTime), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldSoldTime, v))
}
// SoldTimeIn applies the In predicate on the "sold_time" field.
func SoldTimeIn(vs ...time.Time) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldSoldTime), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldSoldTime, vs...))
}
// SoldTimeNotIn applies the NotIn predicate on the "sold_time" field.
func SoldTimeNotIn(vs ...time.Time) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldSoldTime), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldSoldTime, vs...))
}
// SoldTimeGT applies the GT predicate on the "sold_time" field.
func SoldTimeGT(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldSoldTime), v))
- })
+ return predicate.Item(sql.FieldGT(FieldSoldTime, v))
}
// SoldTimeGTE applies the GTE predicate on the "sold_time" field.
func SoldTimeGTE(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldSoldTime), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldSoldTime, v))
}
// SoldTimeLT applies the LT predicate on the "sold_time" field.
func SoldTimeLT(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldSoldTime), v))
- })
+ return predicate.Item(sql.FieldLT(FieldSoldTime, v))
}
// SoldTimeLTE applies the LTE predicate on the "sold_time" field.
func SoldTimeLTE(v time.Time) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldSoldTime), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldSoldTime, v))
}
// SoldTimeIsNil applies the IsNil predicate on the "sold_time" field.
func SoldTimeIsNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldSoldTime)))
- })
+ return predicate.Item(sql.FieldIsNull(FieldSoldTime))
}
// SoldTimeNotNil applies the NotNil predicate on the "sold_time" field.
func SoldTimeNotNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldSoldTime)))
- })
+ return predicate.Item(sql.FieldNotNull(FieldSoldTime))
}
// SoldToEQ applies the EQ predicate on the "sold_to" field.
func SoldToEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldSoldTo), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldSoldTo, v))
}
// SoldToNEQ applies the NEQ predicate on the "sold_to" field.
func SoldToNEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldSoldTo), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldSoldTo, v))
}
// SoldToIn applies the In predicate on the "sold_to" field.
func SoldToIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldSoldTo), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldSoldTo, vs...))
}
// SoldToNotIn applies the NotIn predicate on the "sold_to" field.
func SoldToNotIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldSoldTo), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldSoldTo, vs...))
}
// SoldToGT applies the GT predicate on the "sold_to" field.
func SoldToGT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldSoldTo), v))
- })
+ return predicate.Item(sql.FieldGT(FieldSoldTo, v))
}
// SoldToGTE applies the GTE predicate on the "sold_to" field.
func SoldToGTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldSoldTo), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldSoldTo, v))
}
// SoldToLT applies the LT predicate on the "sold_to" field.
func SoldToLT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldSoldTo), v))
- })
+ return predicate.Item(sql.FieldLT(FieldSoldTo, v))
}
// SoldToLTE applies the LTE predicate on the "sold_to" field.
func SoldToLTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldSoldTo), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldSoldTo, v))
}
// SoldToContains applies the Contains predicate on the "sold_to" field.
func SoldToContains(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldSoldTo), v))
- })
+ return predicate.Item(sql.FieldContains(FieldSoldTo, v))
}
// SoldToHasPrefix applies the HasPrefix predicate on the "sold_to" field.
func SoldToHasPrefix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldSoldTo), v))
- })
+ return predicate.Item(sql.FieldHasPrefix(FieldSoldTo, v))
}
// SoldToHasSuffix applies the HasSuffix predicate on the "sold_to" field.
func SoldToHasSuffix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldSoldTo), v))
- })
+ return predicate.Item(sql.FieldHasSuffix(FieldSoldTo, v))
}
// SoldToIsNil applies the IsNil predicate on the "sold_to" field.
func SoldToIsNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldSoldTo)))
- })
+ return predicate.Item(sql.FieldIsNull(FieldSoldTo))
}
// SoldToNotNil applies the NotNil predicate on the "sold_to" field.
func SoldToNotNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldSoldTo)))
- })
+ return predicate.Item(sql.FieldNotNull(FieldSoldTo))
}
// SoldToEqualFold applies the EqualFold predicate on the "sold_to" field.
func SoldToEqualFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldSoldTo), v))
- })
+ return predicate.Item(sql.FieldEqualFold(FieldSoldTo, v))
}
// SoldToContainsFold applies the ContainsFold predicate on the "sold_to" field.
func SoldToContainsFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldSoldTo), v))
- })
+ return predicate.Item(sql.FieldContainsFold(FieldSoldTo, v))
}
// SoldPriceEQ applies the EQ predicate on the "sold_price" field.
func SoldPriceEQ(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldSoldPrice), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldSoldPrice, v))
}
// SoldPriceNEQ applies the NEQ predicate on the "sold_price" field.
func SoldPriceNEQ(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldSoldPrice), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldSoldPrice, v))
}
// SoldPriceIn applies the In predicate on the "sold_price" field.
func SoldPriceIn(vs ...float64) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldSoldPrice), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldSoldPrice, vs...))
}
// SoldPriceNotIn applies the NotIn predicate on the "sold_price" field.
func SoldPriceNotIn(vs ...float64) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldSoldPrice), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldSoldPrice, vs...))
}
// SoldPriceGT applies the GT predicate on the "sold_price" field.
func SoldPriceGT(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldSoldPrice), v))
- })
+ return predicate.Item(sql.FieldGT(FieldSoldPrice, v))
}
// SoldPriceGTE applies the GTE predicate on the "sold_price" field.
func SoldPriceGTE(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldSoldPrice), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldSoldPrice, v))
}
// SoldPriceLT applies the LT predicate on the "sold_price" field.
func SoldPriceLT(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldSoldPrice), v))
- })
+ return predicate.Item(sql.FieldLT(FieldSoldPrice, v))
}
// SoldPriceLTE applies the LTE predicate on the "sold_price" field.
func SoldPriceLTE(v float64) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldSoldPrice), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldSoldPrice, v))
}
// SoldNotesEQ applies the EQ predicate on the "sold_notes" field.
func SoldNotesEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldSoldNotes), v))
- })
+ return predicate.Item(sql.FieldEQ(FieldSoldNotes, v))
}
// SoldNotesNEQ applies the NEQ predicate on the "sold_notes" field.
func SoldNotesNEQ(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldSoldNotes), v))
- })
+ return predicate.Item(sql.FieldNEQ(FieldSoldNotes, v))
}
// SoldNotesIn applies the In predicate on the "sold_notes" field.
func SoldNotesIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldSoldNotes), v...))
- })
+ return predicate.Item(sql.FieldIn(FieldSoldNotes, vs...))
}
// SoldNotesNotIn applies the NotIn predicate on the "sold_notes" field.
func SoldNotesNotIn(vs ...string) predicate.Item {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldSoldNotes), v...))
- })
+ return predicate.Item(sql.FieldNotIn(FieldSoldNotes, vs...))
}
// SoldNotesGT applies the GT predicate on the "sold_notes" field.
func SoldNotesGT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldSoldNotes), v))
- })
+ return predicate.Item(sql.FieldGT(FieldSoldNotes, v))
}
// SoldNotesGTE applies the GTE predicate on the "sold_notes" field.
func SoldNotesGTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldSoldNotes), v))
- })
+ return predicate.Item(sql.FieldGTE(FieldSoldNotes, v))
}
// SoldNotesLT applies the LT predicate on the "sold_notes" field.
func SoldNotesLT(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldSoldNotes), v))
- })
+ return predicate.Item(sql.FieldLT(FieldSoldNotes, v))
}
// SoldNotesLTE applies the LTE predicate on the "sold_notes" field.
func SoldNotesLTE(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldSoldNotes), v))
- })
+ return predicate.Item(sql.FieldLTE(FieldSoldNotes, v))
}
// SoldNotesContains applies the Contains predicate on the "sold_notes" field.
func SoldNotesContains(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldSoldNotes), v))
- })
+ return predicate.Item(sql.FieldContains(FieldSoldNotes, v))
}
// SoldNotesHasPrefix applies the HasPrefix predicate on the "sold_notes" field.
func SoldNotesHasPrefix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldSoldNotes), v))
- })
+ return predicate.Item(sql.FieldHasPrefix(FieldSoldNotes, v))
}
// SoldNotesHasSuffix applies the HasSuffix predicate on the "sold_notes" field.
func SoldNotesHasSuffix(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldSoldNotes), v))
- })
+ return predicate.Item(sql.FieldHasSuffix(FieldSoldNotes, v))
}
// SoldNotesIsNil applies the IsNil predicate on the "sold_notes" field.
func SoldNotesIsNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldSoldNotes)))
- })
+ return predicate.Item(sql.FieldIsNull(FieldSoldNotes))
}
// SoldNotesNotNil applies the NotNil predicate on the "sold_notes" field.
func SoldNotesNotNil() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldSoldNotes)))
- })
+ return predicate.Item(sql.FieldNotNull(FieldSoldNotes))
}
// SoldNotesEqualFold applies the EqualFold predicate on the "sold_notes" field.
func SoldNotesEqualFold(v string) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldSoldNotes), v))
- })
+ return predicate.Item(sql.FieldEqualFold(FieldSoldNotes, v))
}
// SoldNotesContainsFold applies the ContainsFold predicate on the "sold_notes" field.
func SoldNotesContainsFold(v string) predicate.Item {
+ return predicate.Item(sql.FieldContainsFold(FieldSoldNotes, v))
+}
+
+// HasGroup applies the HasEdge predicate on the "group" edge.
+func HasGroup() predicate.Item {
return predicate.Item(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldSoldNotes), v))
+ step := sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
+ )
+ sqlgraph.HasNeighbors(s, step)
+ })
+}
+
+// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
+func HasGroupWith(preds ...predicate.Group) predicate.Item {
+ return predicate.Item(func(s *sql.Selector) {
+ step := newGroupStep()
+ sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+ for _, p := range preds {
+ p(s)
+ }
+ })
})
}
@@ -2066,7 +1434,6 @@ func HasParent() predicate.Item {
return predicate.Item(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(ParentTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -2076,11 +1443,7 @@ func HasParent() predicate.Item {
// HasParentWith applies the HasEdge predicate on the "parent" edge with a given conditions (other predicates).
func HasParentWith(preds ...predicate.Item) predicate.Item {
return predicate.Item(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(Table, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
- )
+ step := newParentStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -2094,7 +1457,6 @@ func HasChildren() predicate.Item {
return predicate.Item(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(ChildrenTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -2104,39 +1466,7 @@ func HasChildren() predicate.Item {
// HasChildrenWith applies the HasEdge predicate on the "children" edge with a given conditions (other predicates).
func HasChildrenWith(preds ...predicate.Item) predicate.Item {
return predicate.Item(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(Table, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
- )
- sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
- for _, p := range preds {
- p(s)
- }
- })
- })
-}
-
-// HasGroup applies the HasEdge predicate on the "group" edge.
-func HasGroup() predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(GroupTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
- )
- sqlgraph.HasNeighbors(s, step)
- })
-}
-
-// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
-func HasGroupWith(preds ...predicate.Group) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(GroupInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
- )
+ step := newChildrenStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -2150,7 +1480,6 @@ func HasLabel() predicate.Item {
return predicate.Item(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(LabelTable, FieldID),
sqlgraph.Edge(sqlgraph.M2M, true, LabelTable, LabelPrimaryKey...),
)
sqlgraph.HasNeighbors(s, step)
@@ -2160,11 +1489,7 @@ func HasLabel() predicate.Item {
// HasLabelWith applies the HasEdge predicate on the "label" edge with a given conditions (other predicates).
func HasLabelWith(preds ...predicate.Label) predicate.Item {
return predicate.Item(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(LabelInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2M, true, LabelTable, LabelPrimaryKey...),
- )
+ step := newLabelStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -2178,7 +1503,6 @@ func HasLocation() predicate.Item {
return predicate.Item(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(LocationTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, LocationTable, LocationColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -2188,11 +1512,7 @@ func HasLocation() predicate.Item {
// HasLocationWith applies the HasEdge predicate on the "location" edge with a given conditions (other predicates).
func HasLocationWith(preds ...predicate.Location) predicate.Item {
return predicate.Item(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(LocationInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, LocationTable, LocationColumn),
- )
+ step := newLocationStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -2206,7 +1526,6 @@ func HasFields() predicate.Item {
return predicate.Item(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(FieldsTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, FieldsTable, FieldsColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -2215,12 +1534,31 @@ func HasFields() predicate.Item {
// HasFieldsWith applies the HasEdge predicate on the "fields" edge with a given conditions (other predicates).
func HasFieldsWith(preds ...predicate.ItemField) predicate.Item {
+ return predicate.Item(func(s *sql.Selector) {
+ step := newFieldsStep()
+ sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+ for _, p := range preds {
+ p(s)
+ }
+ })
+ })
+}
+
+// HasMaintenanceEntries applies the HasEdge predicate on the "maintenance_entries" edge.
+func HasMaintenanceEntries() predicate.Item {
return predicate.Item(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(FieldsInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, FieldsTable, FieldsColumn),
+ sqlgraph.Edge(sqlgraph.O2M, false, MaintenanceEntriesTable, MaintenanceEntriesColumn),
)
+ sqlgraph.HasNeighbors(s, step)
+ })
+}
+
+// HasMaintenanceEntriesWith applies the HasEdge predicate on the "maintenance_entries" edge with a given conditions (other predicates).
+func HasMaintenanceEntriesWith(preds ...predicate.MaintenanceEntry) predicate.Item {
+ return predicate.Item(func(s *sql.Selector) {
+ step := newMaintenanceEntriesStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -2234,7 +1572,6 @@ func HasAttachments() predicate.Item {
return predicate.Item(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(AttachmentsTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -2244,11 +1581,7 @@ func HasAttachments() predicate.Item {
// HasAttachmentsWith applies the HasEdge predicate on the "attachments" edge with a given conditions (other predicates).
func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Item {
return predicate.Item(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(AttachmentsInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn),
- )
+ step := newAttachmentsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -2259,32 +1592,15 @@ func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Item {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Item) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for _, p := range predicates {
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.Item(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Item) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for i, p := range predicates {
- if i > 0 {
- s1.Or()
- }
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.Item(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Item) predicate.Item {
- return predicate.Item(func(s *sql.Selector) {
- p(s.Not())
- })
+ return predicate.Item(sql.NotPredicates(p))
}
diff --git a/backend/internal/data/ent/item_create.go b/backend/internal/data/ent/item_create.go
index f4de18e..9eb1cb6 100644
--- a/backend/internal/data/ent/item_create.go
+++ b/backend/internal/data/ent/item_create.go
@@ -17,6 +17,7 @@ import (
"github.com/hay-kot/homebox/backend/internal/data/ent/itemfield"
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
)
// ItemCreate is the builder for creating a Item entity.
@@ -144,6 +145,20 @@ func (ic *ItemCreate) SetNillableArchived(b *bool) *ItemCreate {
return ic
}
+// SetAssetID sets the "asset_id" field.
+func (ic *ItemCreate) SetAssetID(i int) *ItemCreate {
+ ic.mutation.SetAssetID(i)
+ return ic
+}
+
+// SetNillableAssetID sets the "asset_id" field if the given value is not nil.
+func (ic *ItemCreate) SetNillableAssetID(i *int) *ItemCreate {
+ if i != nil {
+ ic.SetAssetID(*i)
+ }
+ return ic
+}
+
// SetSerialNumber sets the "serial_number" field.
func (ic *ItemCreate) SetSerialNumber(s string) *ItemCreate {
ic.mutation.SetSerialNumber(s)
@@ -340,6 +355,17 @@ func (ic *ItemCreate) SetNillableID(u *uuid.UUID) *ItemCreate {
return ic
}
+// SetGroupID sets the "group" edge to the Group entity by ID.
+func (ic *ItemCreate) SetGroupID(id uuid.UUID) *ItemCreate {
+ ic.mutation.SetGroupID(id)
+ return ic
+}
+
+// SetGroup sets the "group" edge to the Group entity.
+func (ic *ItemCreate) SetGroup(g *Group) *ItemCreate {
+ return ic.SetGroupID(g.ID)
+}
+
// SetParentID sets the "parent" edge to the Item entity by ID.
func (ic *ItemCreate) SetParentID(id uuid.UUID) *ItemCreate {
ic.mutation.SetParentID(id)
@@ -374,17 +400,6 @@ func (ic *ItemCreate) AddChildren(i ...*Item) *ItemCreate {
return ic.AddChildIDs(ids...)
}
-// SetGroupID sets the "group" edge to the Group entity by ID.
-func (ic *ItemCreate) SetGroupID(id uuid.UUID) *ItemCreate {
- ic.mutation.SetGroupID(id)
- return ic
-}
-
-// SetGroup sets the "group" edge to the Group entity.
-func (ic *ItemCreate) SetGroup(g *Group) *ItemCreate {
- return ic.SetGroupID(g.ID)
-}
-
// AddLabelIDs adds the "label" edge to the Label entity by IDs.
func (ic *ItemCreate) AddLabelIDs(ids ...uuid.UUID) *ItemCreate {
ic.mutation.AddLabelIDs(ids...)
@@ -434,6 +449,21 @@ func (ic *ItemCreate) AddFields(i ...*ItemField) *ItemCreate {
return ic.AddFieldIDs(ids...)
}
+// AddMaintenanceEntryIDs adds the "maintenance_entries" edge to the MaintenanceEntry entity by IDs.
+func (ic *ItemCreate) AddMaintenanceEntryIDs(ids ...uuid.UUID) *ItemCreate {
+ ic.mutation.AddMaintenanceEntryIDs(ids...)
+ return ic
+}
+
+// AddMaintenanceEntries adds the "maintenance_entries" edges to the MaintenanceEntry entity.
+func (ic *ItemCreate) AddMaintenanceEntries(m ...*MaintenanceEntry) *ItemCreate {
+ ids := make([]uuid.UUID, len(m))
+ for i := range m {
+ ids[i] = m[i].ID
+ }
+ return ic.AddMaintenanceEntryIDs(ids...)
+}
+
// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs.
func (ic *ItemCreate) AddAttachmentIDs(ids ...uuid.UUID) *ItemCreate {
ic.mutation.AddAttachmentIDs(ids...)
@@ -456,50 +486,8 @@ func (ic *ItemCreate) Mutation() *ItemMutation {
// Save creates the Item in the database.
func (ic *ItemCreate) Save(ctx context.Context) (*Item, error) {
- var (
- err error
- node *Item
- )
ic.defaults()
- if len(ic.hooks) == 0 {
- if err = ic.check(); err != nil {
- return nil, err
- }
- node, err = ic.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*ItemMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = ic.check(); err != nil {
- return nil, err
- }
- ic.mutation = mutation
- if node, err = ic.sqlSave(ctx); err != nil {
- return nil, err
- }
- mutation.id = &node.ID
- mutation.done = true
- return node, err
- })
- for i := len(ic.hooks) - 1; i >= 0; i-- {
- if ic.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = ic.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, ic.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*Item)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from ItemMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, ic.sqlSave, ic.mutation, ic.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -546,6 +534,10 @@ func (ic *ItemCreate) defaults() {
v := item.DefaultArchived
ic.mutation.SetArchived(v)
}
+ if _, ok := ic.mutation.AssetID(); !ok {
+ v := item.DefaultAssetID
+ ic.mutation.SetAssetID(v)
+ }
if _, ok := ic.mutation.LifetimeWarranty(); !ok {
v := item.DefaultLifetimeWarranty
ic.mutation.SetLifetimeWarranty(v)
@@ -604,6 +596,9 @@ func (ic *ItemCreate) check() error {
if _, ok := ic.mutation.Archived(); !ok {
return &ValidationError{Name: "archived", err: errors.New(`ent: missing required field "Item.archived"`)}
}
+ if _, ok := ic.mutation.AssetID(); !ok {
+ return &ValidationError{Name: "asset_id", err: errors.New(`ent: missing required field "Item.asset_id"`)}
+ }
if v, ok := ic.mutation.SerialNumber(); ok {
if err := item.SerialNumberValidator(v); err != nil {
return &ValidationError{Name: "serial_number", err: fmt.Errorf(`ent: validator failed for field "Item.serial_number": %w`, err)}
@@ -645,6 +640,9 @@ func (ic *ItemCreate) check() error {
}
func (ic *ItemCreate) sqlSave(ctx context.Context) (*Item, error) {
+ if err := ic.check(); err != nil {
+ return nil, err
+ }
_node, _spec := ic.createSpec()
if err := sqlgraph.CreateNode(ctx, ic.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@@ -659,200 +657,129 @@ func (ic *ItemCreate) sqlSave(ctx context.Context) (*Item, error) {
return nil, err
}
}
+ ic.mutation.id = &_node.ID
+ ic.mutation.done = true
return _node, nil
}
func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
var (
_node = &Item{config: ic.config}
- _spec = &sqlgraph.CreateSpec{
- Table: item.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
- }
+ _spec = sqlgraph.NewCreateSpec(item.Table, sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID))
)
if id, ok := ic.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = &id
}
if value, ok := ic.mutation.CreatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: item.FieldCreatedAt,
- })
+ _spec.SetField(item.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := ic.mutation.UpdatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: item.FieldUpdatedAt,
- })
+ _spec.SetField(item.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := ic.mutation.Name(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldName,
- })
+ _spec.SetField(item.FieldName, field.TypeString, value)
_node.Name = value
}
if value, ok := ic.mutation.Description(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldDescription,
- })
+ _spec.SetField(item.FieldDescription, field.TypeString, value)
_node.Description = value
}
if value, ok := ic.mutation.ImportRef(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldImportRef,
- })
+ _spec.SetField(item.FieldImportRef, field.TypeString, value)
_node.ImportRef = value
}
if value, ok := ic.mutation.Notes(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldNotes,
- })
+ _spec.SetField(item.FieldNotes, field.TypeString, value)
_node.Notes = value
}
if value, ok := ic.mutation.Quantity(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: item.FieldQuantity,
- })
+ _spec.SetField(item.FieldQuantity, field.TypeInt, value)
_node.Quantity = value
}
if value, ok := ic.mutation.Insured(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: item.FieldInsured,
- })
+ _spec.SetField(item.FieldInsured, field.TypeBool, value)
_node.Insured = value
}
if value, ok := ic.mutation.Archived(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: item.FieldArchived,
- })
+ _spec.SetField(item.FieldArchived, field.TypeBool, value)
_node.Archived = value
}
+ if value, ok := ic.mutation.AssetID(); ok {
+ _spec.SetField(item.FieldAssetID, field.TypeInt, value)
+ _node.AssetID = value
+ }
if value, ok := ic.mutation.SerialNumber(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldSerialNumber,
- })
+ _spec.SetField(item.FieldSerialNumber, field.TypeString, value)
_node.SerialNumber = value
}
if value, ok := ic.mutation.ModelNumber(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldModelNumber,
- })
+ _spec.SetField(item.FieldModelNumber, field.TypeString, value)
_node.ModelNumber = value
}
if value, ok := ic.mutation.Manufacturer(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldManufacturer,
- })
+ _spec.SetField(item.FieldManufacturer, field.TypeString, value)
_node.Manufacturer = value
}
if value, ok := ic.mutation.LifetimeWarranty(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: item.FieldLifetimeWarranty,
- })
+ _spec.SetField(item.FieldLifetimeWarranty, field.TypeBool, value)
_node.LifetimeWarranty = value
}
if value, ok := ic.mutation.WarrantyExpires(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: item.FieldWarrantyExpires,
- })
+ _spec.SetField(item.FieldWarrantyExpires, field.TypeTime, value)
_node.WarrantyExpires = value
}
if value, ok := ic.mutation.WarrantyDetails(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldWarrantyDetails,
- })
+ _spec.SetField(item.FieldWarrantyDetails, field.TypeString, value)
_node.WarrantyDetails = value
}
if value, ok := ic.mutation.PurchaseTime(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: item.FieldPurchaseTime,
- })
+ _spec.SetField(item.FieldPurchaseTime, field.TypeTime, value)
_node.PurchaseTime = value
}
if value, ok := ic.mutation.PurchaseFrom(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldPurchaseFrom,
- })
+ _spec.SetField(item.FieldPurchaseFrom, field.TypeString, value)
_node.PurchaseFrom = value
}
if value, ok := ic.mutation.PurchasePrice(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeFloat64,
- Value: value,
- Column: item.FieldPurchasePrice,
- })
+ _spec.SetField(item.FieldPurchasePrice, field.TypeFloat64, value)
_node.PurchasePrice = value
}
if value, ok := ic.mutation.SoldTime(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: item.FieldSoldTime,
- })
+ _spec.SetField(item.FieldSoldTime, field.TypeTime, value)
_node.SoldTime = value
}
if value, ok := ic.mutation.SoldTo(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldSoldTo,
- })
+ _spec.SetField(item.FieldSoldTo, field.TypeString, value)
_node.SoldTo = value
}
if value, ok := ic.mutation.SoldPrice(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeFloat64,
- Value: value,
- Column: item.FieldSoldPrice,
- })
+ _spec.SetField(item.FieldSoldPrice, field.TypeFloat64, value)
_node.SoldPrice = value
}
if value, ok := ic.mutation.SoldNotes(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldSoldNotes,
- })
+ _spec.SetField(item.FieldSoldNotes, field.TypeString, value)
_node.SoldNotes = value
}
+ if nodes := ic.mutation.GroupIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: item.GroupTable,
+ Columns: []string{item.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _node.group_items = &nodes[0]
+ _spec.Edges = append(_spec.Edges, edge)
+ }
if nodes := ic.mutation.ParentIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
@@ -861,10 +788,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
Columns: []string{item.ParentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -881,10 +805,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
Columns: []string{item.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -892,26 +813,6 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
}
_spec.Edges = append(_spec.Edges, edge)
}
- if nodes := ic.mutation.GroupIDs(); len(nodes) > 0 {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: item.GroupTable,
- Columns: []string{item.GroupColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _node.group_items = &nodes[0]
- _spec.Edges = append(_spec.Edges, edge)
- }
if nodes := ic.mutation.LabelIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2M,
@@ -920,10 +821,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
Columns: item.LabelPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -939,10 +837,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
Columns: []string{item.LocationColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -959,10 +854,23 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
Columns: []string{item.FieldsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: itemfield.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges = append(_spec.Edges, edge)
+ }
+ if nodes := ic.mutation.MaintenanceEntriesIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: item.MaintenanceEntriesTable,
+ Columns: []string{item.MaintenanceEntriesColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -978,10 +886,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
Columns: []string{item.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -995,11 +900,15 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
// ItemCreateBulk is the builder for creating many Item entities in bulk.
type ItemCreateBulk struct {
config
+ err error
builders []*ItemCreate
}
// Save creates the Item entities in the database.
func (icb *ItemCreateBulk) Save(ctx context.Context) ([]*Item, error) {
+ if icb.err != nil {
+ return nil, icb.err
+ }
specs := make([]*sqlgraph.CreateSpec, len(icb.builders))
nodes := make([]*Item, len(icb.builders))
mutators := make([]Mutator, len(icb.builders))
@@ -1016,8 +925,8 @@ func (icb *ItemCreateBulk) Save(ctx context.Context) ([]*Item, error) {
return nil, err
}
builder.mutation = mutation
- nodes[i], specs[i] = builder.createSpec()
var err error
+ nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, icb.builders[i+1].mutation)
} else {
diff --git a/backend/internal/data/ent/item_delete.go b/backend/internal/data/ent/item_delete.go
index 56b7e03..d634d5d 100644
--- a/backend/internal/data/ent/item_delete.go
+++ b/backend/internal/data/ent/item_delete.go
@@ -4,7 +4,6 @@ package ent
import (
"context"
- "fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@@ -28,34 +27,7 @@ func (id *ItemDelete) Where(ps ...predicate.Item) *ItemDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (id *ItemDelete) Exec(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
- if len(id.hooks) == 0 {
- affected, err = id.sqlExec(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*ItemMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- id.mutation = mutation
- affected, err = id.sqlExec(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(id.hooks) - 1; i >= 0; i-- {
- if id.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = id.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, id.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, id.sqlExec, id.mutation, id.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@@ -68,15 +40,7 @@ func (id *ItemDelete) ExecX(ctx context.Context) int {
}
func (id *ItemDelete) sqlExec(ctx context.Context) (int, error) {
- _spec := &sqlgraph.DeleteSpec{
- Node: &sqlgraph.NodeSpec{
- Table: item.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewDeleteSpec(item.Table, sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID))
if ps := id.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -88,6 +52,7 @@ func (id *ItemDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
+ id.mutation.done = true
return affected, err
}
@@ -96,6 +61,12 @@ type ItemDeleteOne struct {
id *ItemDelete
}
+// Where appends a list predicates to the ItemDelete builder.
+func (ido *ItemDeleteOne) Where(ps ...predicate.Item) *ItemDeleteOne {
+ ido.id.mutation.Where(ps...)
+ return ido
+}
+
// Exec executes the deletion query.
func (ido *ItemDeleteOne) Exec(ctx context.Context) error {
n, err := ido.id.Exec(ctx)
@@ -111,5 +82,7 @@ func (ido *ItemDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (ido *ItemDeleteOne) ExecX(ctx context.Context) {
- ido.id.ExecX(ctx)
+ if err := ido.Exec(ctx); err != nil {
+ panic(err)
+ }
}
diff --git a/backend/internal/data/ent/item_query.go b/backend/internal/data/ent/item_query.go
index fba0917..12fc331 100644
--- a/backend/internal/data/ent/item_query.go
+++ b/backend/internal/data/ent/item_query.go
@@ -18,26 +18,26 @@ import (
"github.com/hay-kot/homebox/backend/internal/data/ent/itemfield"
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
)
// ItemQuery is the builder for querying Item entities.
type ItemQuery struct {
config
- limit *int
- offset *int
- unique *bool
- order []OrderFunc
- fields []string
- predicates []predicate.Item
- withParent *ItemQuery
- withChildren *ItemQuery
- withGroup *GroupQuery
- withLabel *LabelQuery
- withLocation *LocationQuery
- withFields *ItemFieldQuery
- withAttachments *AttachmentQuery
- withFKs bool
+ ctx *QueryContext
+ order []item.OrderOption
+ inters []Interceptor
+ predicates []predicate.Item
+ withGroup *GroupQuery
+ withParent *ItemQuery
+ withChildren *ItemQuery
+ withLabel *LabelQuery
+ withLocation *LocationQuery
+ withFields *ItemFieldQuery
+ withMaintenanceEntries *MaintenanceEntryQuery
+ withAttachments *AttachmentQuery
+ withFKs bool
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
@@ -49,34 +49,56 @@ func (iq *ItemQuery) Where(ps ...predicate.Item) *ItemQuery {
return iq
}
-// Limit adds a limit step to the query.
+// Limit the number of records to be returned by this query.
func (iq *ItemQuery) Limit(limit int) *ItemQuery {
- iq.limit = &limit
+ iq.ctx.Limit = &limit
return iq
}
-// Offset adds an offset step to the query.
+// Offset to start from.
func (iq *ItemQuery) Offset(offset int) *ItemQuery {
- iq.offset = &offset
+ iq.ctx.Offset = &offset
return iq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (iq *ItemQuery) Unique(unique bool) *ItemQuery {
- iq.unique = &unique
+ iq.ctx.Unique = &unique
return iq
}
-// Order adds an order step to the query.
-func (iq *ItemQuery) Order(o ...OrderFunc) *ItemQuery {
+// Order specifies how the records should be ordered.
+func (iq *ItemQuery) Order(o ...item.OrderOption) *ItemQuery {
iq.order = append(iq.order, o...)
return iq
}
+// QueryGroup chains the current query on the "group" edge.
+func (iq *ItemQuery) QueryGroup() *GroupQuery {
+ query := (&GroupClient{config: iq.config}).Query()
+ query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+ if err := iq.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ selector := iq.sqlQuery(ctx)
+ if err := selector.Err(); err != nil {
+ return nil, err
+ }
+ step := sqlgraph.NewStep(
+ sqlgraph.From(item.Table, item.FieldID, selector),
+ sqlgraph.To(group.Table, group.FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, item.GroupTable, item.GroupColumn),
+ )
+ fromU = sqlgraph.SetNeighbors(iq.driver.Dialect(), step)
+ return fromU, nil
+ }
+ return query
+}
+
// QueryParent chains the current query on the "parent" edge.
func (iq *ItemQuery) QueryParent() *ItemQuery {
- query := &ItemQuery{config: iq.config}
+ query := (&ItemClient{config: iq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := iq.prepareQuery(ctx); err != nil {
return nil, err
@@ -98,7 +120,7 @@ func (iq *ItemQuery) QueryParent() *ItemQuery {
// QueryChildren chains the current query on the "children" edge.
func (iq *ItemQuery) QueryChildren() *ItemQuery {
- query := &ItemQuery{config: iq.config}
+ query := (&ItemClient{config: iq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := iq.prepareQuery(ctx); err != nil {
return nil, err
@@ -118,31 +140,9 @@ func (iq *ItemQuery) QueryChildren() *ItemQuery {
return query
}
-// QueryGroup chains the current query on the "group" edge.
-func (iq *ItemQuery) QueryGroup() *GroupQuery {
- query := &GroupQuery{config: iq.config}
- query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
- if err := iq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- selector := iq.sqlQuery(ctx)
- if err := selector.Err(); err != nil {
- return nil, err
- }
- step := sqlgraph.NewStep(
- sqlgraph.From(item.Table, item.FieldID, selector),
- sqlgraph.To(group.Table, group.FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, item.GroupTable, item.GroupColumn),
- )
- fromU = sqlgraph.SetNeighbors(iq.driver.Dialect(), step)
- return fromU, nil
- }
- return query
-}
-
// QueryLabel chains the current query on the "label" edge.
func (iq *ItemQuery) QueryLabel() *LabelQuery {
- query := &LabelQuery{config: iq.config}
+ query := (&LabelClient{config: iq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := iq.prepareQuery(ctx); err != nil {
return nil, err
@@ -164,7 +164,7 @@ func (iq *ItemQuery) QueryLabel() *LabelQuery {
// QueryLocation chains the current query on the "location" edge.
func (iq *ItemQuery) QueryLocation() *LocationQuery {
- query := &LocationQuery{config: iq.config}
+ query := (&LocationClient{config: iq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := iq.prepareQuery(ctx); err != nil {
return nil, err
@@ -186,7 +186,7 @@ func (iq *ItemQuery) QueryLocation() *LocationQuery {
// QueryFields chains the current query on the "fields" edge.
func (iq *ItemQuery) QueryFields() *ItemFieldQuery {
- query := &ItemFieldQuery{config: iq.config}
+ query := (&ItemFieldClient{config: iq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := iq.prepareQuery(ctx); err != nil {
return nil, err
@@ -206,9 +206,31 @@ func (iq *ItemQuery) QueryFields() *ItemFieldQuery {
return query
}
+// QueryMaintenanceEntries chains the current query on the "maintenance_entries" edge.
+func (iq *ItemQuery) QueryMaintenanceEntries() *MaintenanceEntryQuery {
+ query := (&MaintenanceEntryClient{config: iq.config}).Query()
+ query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+ if err := iq.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ selector := iq.sqlQuery(ctx)
+ if err := selector.Err(); err != nil {
+ return nil, err
+ }
+ step := sqlgraph.NewStep(
+ sqlgraph.From(item.Table, item.FieldID, selector),
+ sqlgraph.To(maintenanceentry.Table, maintenanceentry.FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, item.MaintenanceEntriesTable, item.MaintenanceEntriesColumn),
+ )
+ fromU = sqlgraph.SetNeighbors(iq.driver.Dialect(), step)
+ return fromU, nil
+ }
+ return query
+}
+
// QueryAttachments chains the current query on the "attachments" edge.
func (iq *ItemQuery) QueryAttachments() *AttachmentQuery {
- query := &AttachmentQuery{config: iq.config}
+ query := (&AttachmentClient{config: iq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := iq.prepareQuery(ctx); err != nil {
return nil, err
@@ -231,7 +253,7 @@ func (iq *ItemQuery) QueryAttachments() *AttachmentQuery {
// First returns the first Item entity from the query.
// Returns a *NotFoundError when no Item was found.
func (iq *ItemQuery) First(ctx context.Context) (*Item, error) {
- nodes, err := iq.Limit(1).All(ctx)
+ nodes, err := iq.Limit(1).All(setContextOp(ctx, iq.ctx, "First"))
if err != nil {
return nil, err
}
@@ -254,7 +276,7 @@ func (iq *ItemQuery) FirstX(ctx context.Context) *Item {
// Returns a *NotFoundError when no Item ID was found.
func (iq *ItemQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = iq.Limit(1).IDs(ctx); err != nil {
+ if ids, err = iq.Limit(1).IDs(setContextOp(ctx, iq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@@ -277,7 +299,7 @@ func (iq *ItemQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one Item entity is found.
// Returns a *NotFoundError when no Item entities are found.
func (iq *ItemQuery) Only(ctx context.Context) (*Item, error) {
- nodes, err := iq.Limit(2).All(ctx)
+ nodes, err := iq.Limit(2).All(setContextOp(ctx, iq.ctx, "Only"))
if err != nil {
return nil, err
}
@@ -305,7 +327,7 @@ func (iq *ItemQuery) OnlyX(ctx context.Context) *Item {
// Returns a *NotFoundError when no entities are found.
func (iq *ItemQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = iq.Limit(2).IDs(ctx); err != nil {
+ if ids, err = iq.Limit(2).IDs(setContextOp(ctx, iq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@@ -330,10 +352,12 @@ func (iq *ItemQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Items.
func (iq *ItemQuery) All(ctx context.Context) ([]*Item, error) {
+ ctx = setContextOp(ctx, iq.ctx, "All")
if err := iq.prepareQuery(ctx); err != nil {
return nil, err
}
- return iq.sqlAll(ctx)
+ qr := querierAll[[]*Item, *ItemQuery]()
+ return withInterceptors[[]*Item](ctx, iq, qr, iq.inters)
}
// AllX is like All, but panics if an error occurs.
@@ -346,9 +370,12 @@ func (iq *ItemQuery) AllX(ctx context.Context) []*Item {
}
// IDs executes the query and returns a list of Item IDs.
-func (iq *ItemQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
- var ids []uuid.UUID
- if err := iq.Select(item.FieldID).Scan(ctx, &ids); err != nil {
+func (iq *ItemQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
+ if iq.ctx.Unique == nil && iq.path != nil {
+ iq.Unique(true)
+ }
+ ctx = setContextOp(ctx, iq.ctx, "IDs")
+ if err = iq.Select(item.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@@ -365,10 +392,11 @@ func (iq *ItemQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query.
func (iq *ItemQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, iq.ctx, "Count")
if err := iq.prepareQuery(ctx); err != nil {
return 0, err
}
- return iq.sqlCount(ctx)
+ return withInterceptors[int](ctx, iq, querierCount[*ItemQuery](), iq.inters)
}
// CountX is like Count, but panics if an error occurs.
@@ -382,10 +410,15 @@ func (iq *ItemQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (iq *ItemQuery) Exist(ctx context.Context) (bool, error) {
- if err := iq.prepareQuery(ctx); err != nil {
- return false, err
+ ctx = setContextOp(ctx, iq.ctx, "Exist")
+ switch _, err := iq.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
}
- return iq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@@ -404,29 +437,40 @@ func (iq *ItemQuery) Clone() *ItemQuery {
return nil
}
return &ItemQuery{
- config: iq.config,
- limit: iq.limit,
- offset: iq.offset,
- order: append([]OrderFunc{}, iq.order...),
- predicates: append([]predicate.Item{}, iq.predicates...),
- withParent: iq.withParent.Clone(),
- withChildren: iq.withChildren.Clone(),
- withGroup: iq.withGroup.Clone(),
- withLabel: iq.withLabel.Clone(),
- withLocation: iq.withLocation.Clone(),
- withFields: iq.withFields.Clone(),
- withAttachments: iq.withAttachments.Clone(),
+ config: iq.config,
+ ctx: iq.ctx.Clone(),
+ order: append([]item.OrderOption{}, iq.order...),
+ inters: append([]Interceptor{}, iq.inters...),
+ predicates: append([]predicate.Item{}, iq.predicates...),
+ withGroup: iq.withGroup.Clone(),
+ withParent: iq.withParent.Clone(),
+ withChildren: iq.withChildren.Clone(),
+ withLabel: iq.withLabel.Clone(),
+ withLocation: iq.withLocation.Clone(),
+ withFields: iq.withFields.Clone(),
+ withMaintenanceEntries: iq.withMaintenanceEntries.Clone(),
+ withAttachments: iq.withAttachments.Clone(),
// clone intermediate query.
- sql: iq.sql.Clone(),
- path: iq.path,
- unique: iq.unique,
+ sql: iq.sql.Clone(),
+ path: iq.path,
}
}
+// WithGroup tells the query-builder to eager-load the nodes that are connected to
+// the "group" edge. The optional arguments are used to configure the query builder of the edge.
+func (iq *ItemQuery) WithGroup(opts ...func(*GroupQuery)) *ItemQuery {
+ query := (&GroupClient{config: iq.config}).Query()
+ for _, opt := range opts {
+ opt(query)
+ }
+ iq.withGroup = query
+ return iq
+}
+
// WithParent tells the query-builder to eager-load the nodes that are connected to
// the "parent" edge. The optional arguments are used to configure the query builder of the edge.
func (iq *ItemQuery) WithParent(opts ...func(*ItemQuery)) *ItemQuery {
- query := &ItemQuery{config: iq.config}
+ query := (&ItemClient{config: iq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -437,7 +481,7 @@ func (iq *ItemQuery) WithParent(opts ...func(*ItemQuery)) *ItemQuery {
// WithChildren tells the query-builder to eager-load the nodes that are connected to
// the "children" edge. The optional arguments are used to configure the query builder of the edge.
func (iq *ItemQuery) WithChildren(opts ...func(*ItemQuery)) *ItemQuery {
- query := &ItemQuery{config: iq.config}
+ query := (&ItemClient{config: iq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -445,21 +489,10 @@ func (iq *ItemQuery) WithChildren(opts ...func(*ItemQuery)) *ItemQuery {
return iq
}
-// WithGroup tells the query-builder to eager-load the nodes that are connected to
-// the "group" edge. The optional arguments are used to configure the query builder of the edge.
-func (iq *ItemQuery) WithGroup(opts ...func(*GroupQuery)) *ItemQuery {
- query := &GroupQuery{config: iq.config}
- for _, opt := range opts {
- opt(query)
- }
- iq.withGroup = query
- return iq
-}
-
// WithLabel tells the query-builder to eager-load the nodes that are connected to
// the "label" edge. The optional arguments are used to configure the query builder of the edge.
func (iq *ItemQuery) WithLabel(opts ...func(*LabelQuery)) *ItemQuery {
- query := &LabelQuery{config: iq.config}
+ query := (&LabelClient{config: iq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -470,7 +503,7 @@ func (iq *ItemQuery) WithLabel(opts ...func(*LabelQuery)) *ItemQuery {
// WithLocation tells the query-builder to eager-load the nodes that are connected to
// the "location" edge. The optional arguments are used to configure the query builder of the edge.
func (iq *ItemQuery) WithLocation(opts ...func(*LocationQuery)) *ItemQuery {
- query := &LocationQuery{config: iq.config}
+ query := (&LocationClient{config: iq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -481,7 +514,7 @@ func (iq *ItemQuery) WithLocation(opts ...func(*LocationQuery)) *ItemQuery {
// WithFields tells the query-builder to eager-load the nodes that are connected to
// the "fields" edge. The optional arguments are used to configure the query builder of the edge.
func (iq *ItemQuery) WithFields(opts ...func(*ItemFieldQuery)) *ItemQuery {
- query := &ItemFieldQuery{config: iq.config}
+ query := (&ItemFieldClient{config: iq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -489,10 +522,21 @@ func (iq *ItemQuery) WithFields(opts ...func(*ItemFieldQuery)) *ItemQuery {
return iq
}
+// WithMaintenanceEntries tells the query-builder to eager-load the nodes that are connected to
+// the "maintenance_entries" edge. The optional arguments are used to configure the query builder of the edge.
+func (iq *ItemQuery) WithMaintenanceEntries(opts ...func(*MaintenanceEntryQuery)) *ItemQuery {
+ query := (&MaintenanceEntryClient{config: iq.config}).Query()
+ for _, opt := range opts {
+ opt(query)
+ }
+ iq.withMaintenanceEntries = query
+ return iq
+}
+
// WithAttachments tells the query-builder to eager-load the nodes that are connected to
// the "attachments" edge. The optional arguments are used to configure the query builder of the edge.
func (iq *ItemQuery) WithAttachments(opts ...func(*AttachmentQuery)) *ItemQuery {
- query := &AttachmentQuery{config: iq.config}
+ query := (&AttachmentClient{config: iq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -515,16 +559,11 @@ func (iq *ItemQuery) WithAttachments(opts ...func(*AttachmentQuery)) *ItemQuery
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (iq *ItemQuery) GroupBy(field string, fields ...string) *ItemGroupBy {
- grbuild := &ItemGroupBy{config: iq.config}
- grbuild.fields = append([]string{field}, fields...)
- grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
- if err := iq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- return iq.sqlQuery(ctx), nil
- }
+ iq.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &ItemGroupBy{build: iq}
+ grbuild.flds = &iq.ctx.Fields
grbuild.label = item.Label
- grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
+ grbuild.scan = grbuild.Scan
return grbuild
}
@@ -541,15 +580,30 @@ func (iq *ItemQuery) GroupBy(field string, fields ...string) *ItemGroupBy {
// Select(item.FieldCreatedAt).
// Scan(ctx, &v)
func (iq *ItemQuery) Select(fields ...string) *ItemSelect {
- iq.fields = append(iq.fields, fields...)
- selbuild := &ItemSelect{ItemQuery: iq}
- selbuild.label = item.Label
- selbuild.flds, selbuild.scan = &iq.fields, selbuild.Scan
- return selbuild
+ iq.ctx.Fields = append(iq.ctx.Fields, fields...)
+ sbuild := &ItemSelect{ItemQuery: iq}
+ sbuild.label = item.Label
+ sbuild.flds, sbuild.scan = &iq.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a ItemSelect configured with the given aggregations.
+func (iq *ItemQuery) Aggregate(fns ...AggregateFunc) *ItemSelect {
+ return iq.Select().Aggregate(fns...)
}
func (iq *ItemQuery) prepareQuery(ctx context.Context) error {
- for _, f := range iq.fields {
+ for _, inter := range iq.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, iq); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range iq.ctx.Fields {
if !item.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@@ -569,17 +623,18 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e
nodes = []*Item{}
withFKs = iq.withFKs
_spec = iq.querySpec()
- loadedTypes = [7]bool{
+ loadedTypes = [8]bool{
+ iq.withGroup != nil,
iq.withParent != nil,
iq.withChildren != nil,
- iq.withGroup != nil,
iq.withLabel != nil,
iq.withLocation != nil,
iq.withFields != nil,
+ iq.withMaintenanceEntries != nil,
iq.withAttachments != nil,
}
)
- if iq.withParent != nil || iq.withGroup != nil || iq.withLocation != nil {
+ if iq.withGroup != nil || iq.withParent != nil || iq.withLocation != nil {
withFKs = true
}
if withFKs {
@@ -603,6 +658,12 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e
if len(nodes) == 0 {
return nodes, nil
}
+ if query := iq.withGroup; query != nil {
+ if err := iq.loadGroup(ctx, query, nodes, nil,
+ func(n *Item, e *Group) { n.Edges.Group = e }); err != nil {
+ return nil, err
+ }
+ }
if query := iq.withParent; query != nil {
if err := iq.loadParent(ctx, query, nodes, nil,
func(n *Item, e *Item) { n.Edges.Parent = e }); err != nil {
@@ -616,12 +677,6 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e
return nil, err
}
}
- if query := iq.withGroup; query != nil {
- if err := iq.loadGroup(ctx, query, nodes, nil,
- func(n *Item, e *Group) { n.Edges.Group = e }); err != nil {
- return nil, err
- }
- }
if query := iq.withLabel; query != nil {
if err := iq.loadLabel(ctx, query, nodes,
func(n *Item) { n.Edges.Label = []*Label{} },
@@ -642,6 +697,13 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e
return nil, err
}
}
+ if query := iq.withMaintenanceEntries; query != nil {
+ if err := iq.loadMaintenanceEntries(ctx, query, nodes,
+ func(n *Item) { n.Edges.MaintenanceEntries = []*MaintenanceEntry{} },
+ func(n *Item, e *MaintenanceEntry) { n.Edges.MaintenanceEntries = append(n.Edges.MaintenanceEntries, e) }); err != nil {
+ return nil, err
+ }
+ }
if query := iq.withAttachments; query != nil {
if err := iq.loadAttachments(ctx, query, nodes,
func(n *Item) { n.Edges.Attachments = []*Attachment{} },
@@ -652,6 +714,38 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e
return nodes, nil
}
+func (iq *ItemQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Item, init func(*Item), assign func(*Item, *Group)) error {
+ ids := make([]uuid.UUID, 0, len(nodes))
+ nodeids := make(map[uuid.UUID][]*Item)
+ for i := range nodes {
+ if nodes[i].group_items == nil {
+ continue
+ }
+ fk := *nodes[i].group_items
+ if _, ok := nodeids[fk]; !ok {
+ ids = append(ids, fk)
+ }
+ nodeids[fk] = append(nodeids[fk], nodes[i])
+ }
+ if len(ids) == 0 {
+ return nil
+ }
+ query.Where(group.IDIn(ids...))
+ neighbors, err := query.All(ctx)
+ if err != nil {
+ return err
+ }
+ for _, n := range neighbors {
+ nodes, ok := nodeids[n.ID]
+ if !ok {
+ return fmt.Errorf(`unexpected foreign-key "group_items" returned %v`, n.ID)
+ }
+ for i := range nodes {
+ assign(nodes[i], n)
+ }
+ }
+ return nil
+}
func (iq *ItemQuery) loadParent(ctx context.Context, query *ItemQuery, nodes []*Item, init func(*Item), assign func(*Item, *Item)) error {
ids := make([]uuid.UUID, 0, len(nodes))
nodeids := make(map[uuid.UUID][]*Item)
@@ -665,6 +759,9 @@ func (iq *ItemQuery) loadParent(ctx context.Context, query *ItemQuery, nodes []*
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
+ if len(ids) == 0 {
+ return nil
+ }
query.Where(item.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@@ -693,7 +790,7 @@ func (iq *ItemQuery) loadChildren(ctx context.Context, query *ItemQuery, nodes [
}
query.withFKs = true
query.Where(predicate.Item(func(s *sql.Selector) {
- s.Where(sql.InValues(item.ChildrenColumn, fks...))
+ s.Where(sql.InValues(s.C(item.ChildrenColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -706,41 +803,12 @@ func (iq *ItemQuery) loadChildren(ctx context.Context, query *ItemQuery, nodes [
}
node, ok := nodeids[*fk]
if !ok {
- return fmt.Errorf(`unexpected foreign-key "item_children" returned %v for node %v`, *fk, n.ID)
+ return fmt.Errorf(`unexpected referenced foreign-key "item_children" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
return nil
}
-func (iq *ItemQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Item, init func(*Item), assign func(*Item, *Group)) error {
- ids := make([]uuid.UUID, 0, len(nodes))
- nodeids := make(map[uuid.UUID][]*Item)
- for i := range nodes {
- if nodes[i].group_items == nil {
- continue
- }
- fk := *nodes[i].group_items
- if _, ok := nodeids[fk]; !ok {
- ids = append(ids, fk)
- }
- nodeids[fk] = append(nodeids[fk], nodes[i])
- }
- query.Where(group.IDIn(ids...))
- neighbors, err := query.All(ctx)
- if err != nil {
- return err
- }
- for _, n := range neighbors {
- nodes, ok := nodeids[n.ID]
- if !ok {
- return fmt.Errorf(`unexpected foreign-key "group_items" returned %v`, n.ID)
- }
- for i := range nodes {
- assign(nodes[i], n)
- }
- }
- return nil
-}
func (iq *ItemQuery) loadLabel(ctx context.Context, query *LabelQuery, nodes []*Item, init func(*Item), assign func(*Item, *Label)) error {
edgeIDs := make([]driver.Value, len(nodes))
byID := make(map[uuid.UUID]*Item)
@@ -764,27 +832,30 @@ func (iq *ItemQuery) loadLabel(ctx context.Context, query *LabelQuery, nodes []*
if err := query.prepareQuery(ctx); err != nil {
return err
}
- neighbors, err := query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) {
- assign := spec.Assign
- values := spec.ScanValues
- spec.ScanValues = func(columns []string) ([]any, error) {
- values, err := values(columns[1:])
- if err != nil {
- return nil, err
+ qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
+ return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) {
+ assign := spec.Assign
+ values := spec.ScanValues
+ spec.ScanValues = func(columns []string) ([]any, error) {
+ values, err := values(columns[1:])
+ if err != nil {
+ return nil, err
+ }
+ return append([]any{new(uuid.UUID)}, values...), nil
}
- return append([]any{new(uuid.UUID)}, values...), nil
- }
- spec.Assign = func(columns []string, values []any) error {
- outValue := *values[0].(*uuid.UUID)
- inValue := *values[1].(*uuid.UUID)
- if nids[inValue] == nil {
- nids[inValue] = map[*Item]struct{}{byID[outValue]: struct{}{}}
- return assign(columns[1:], values[1:])
+ spec.Assign = func(columns []string, values []any) error {
+ outValue := *values[0].(*uuid.UUID)
+ inValue := *values[1].(*uuid.UUID)
+ if nids[inValue] == nil {
+ nids[inValue] = map[*Item]struct{}{byID[outValue]: {}}
+ return assign(columns[1:], values[1:])
+ }
+ nids[inValue][byID[outValue]] = struct{}{}
+ return nil
}
- nids[inValue][byID[outValue]] = struct{}{}
- return nil
- }
+ })
})
+ neighbors, err := withInterceptors[[]*Label](ctx, query, qr, query.inters)
if err != nil {
return err
}
@@ -812,6 +883,9 @@ func (iq *ItemQuery) loadLocation(ctx context.Context, query *LocationQuery, nod
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
+ if len(ids) == 0 {
+ return nil
+ }
query.Where(location.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@@ -840,7 +914,7 @@ func (iq *ItemQuery) loadFields(ctx context.Context, query *ItemFieldQuery, node
}
query.withFKs = true
query.Where(predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.InValues(item.FieldsColumn, fks...))
+ s.Where(sql.InValues(s.C(item.FieldsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -853,7 +927,37 @@ func (iq *ItemQuery) loadFields(ctx context.Context, query *ItemFieldQuery, node
}
node, ok := nodeids[*fk]
if !ok {
- return fmt.Errorf(`unexpected foreign-key "item_fields" returned %v for node %v`, *fk, n.ID)
+ return fmt.Errorf(`unexpected referenced foreign-key "item_fields" returned %v for node %v`, *fk, n.ID)
+ }
+ assign(node, n)
+ }
+ return nil
+}
+func (iq *ItemQuery) loadMaintenanceEntries(ctx context.Context, query *MaintenanceEntryQuery, nodes []*Item, init func(*Item), assign func(*Item, *MaintenanceEntry)) error {
+ fks := make([]driver.Value, 0, len(nodes))
+ nodeids := make(map[uuid.UUID]*Item)
+ for i := range nodes {
+ fks = append(fks, nodes[i].ID)
+ nodeids[nodes[i].ID] = nodes[i]
+ if init != nil {
+ init(nodes[i])
+ }
+ }
+ if len(query.ctx.Fields) > 0 {
+ query.ctx.AppendFieldOnce(maintenanceentry.FieldItemID)
+ }
+ query.Where(predicate.MaintenanceEntry(func(s *sql.Selector) {
+ s.Where(sql.InValues(s.C(item.MaintenanceEntriesColumn), fks...))
+ }))
+ neighbors, err := query.All(ctx)
+ if err != nil {
+ return err
+ }
+ for _, n := range neighbors {
+ fk := n.ItemID
+ node, ok := nodeids[fk]
+ if !ok {
+ return fmt.Errorf(`unexpected referenced foreign-key "item_id" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}
@@ -871,7 +975,7 @@ func (iq *ItemQuery) loadAttachments(ctx context.Context, query *AttachmentQuery
}
query.withFKs = true
query.Where(predicate.Attachment(func(s *sql.Selector) {
- s.Where(sql.InValues(item.AttachmentsColumn, fks...))
+ s.Where(sql.InValues(s.C(item.AttachmentsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -884,7 +988,7 @@ func (iq *ItemQuery) loadAttachments(ctx context.Context, query *AttachmentQuery
}
node, ok := nodeids[*fk]
if !ok {
- return fmt.Errorf(`unexpected foreign-key "item_attachments" returned %v for node %v`, *fk, n.ID)
+ return fmt.Errorf(`unexpected referenced foreign-key "item_attachments" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -893,41 +997,22 @@ func (iq *ItemQuery) loadAttachments(ctx context.Context, query *AttachmentQuery
func (iq *ItemQuery) sqlCount(ctx context.Context) (int, error) {
_spec := iq.querySpec()
- _spec.Node.Columns = iq.fields
- if len(iq.fields) > 0 {
- _spec.Unique = iq.unique != nil && *iq.unique
+ _spec.Node.Columns = iq.ctx.Fields
+ if len(iq.ctx.Fields) > 0 {
+ _spec.Unique = iq.ctx.Unique != nil && *iq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, iq.driver, _spec)
}
-func (iq *ItemQuery) sqlExist(ctx context.Context) (bool, error) {
- switch _, err := iq.FirstID(ctx); {
- case IsNotFound(err):
- return false, nil
- case err != nil:
- return false, fmt.Errorf("ent: check existence: %w", err)
- default:
- return true, nil
- }
-}
-
func (iq *ItemQuery) querySpec() *sqlgraph.QuerySpec {
- _spec := &sqlgraph.QuerySpec{
- Node: &sqlgraph.NodeSpec{
- Table: item.Table,
- Columns: item.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
- },
- From: iq.sql,
- Unique: true,
- }
- if unique := iq.unique; unique != nil {
+ _spec := sqlgraph.NewQuerySpec(item.Table, item.Columns, sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID))
+ _spec.From = iq.sql
+ if unique := iq.ctx.Unique; unique != nil {
_spec.Unique = *unique
+ } else if iq.path != nil {
+ _spec.Unique = true
}
- if fields := iq.fields; len(fields) > 0 {
+ if fields := iq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, item.FieldID)
for i := range fields {
@@ -943,10 +1028,10 @@ func (iq *ItemQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
- if limit := iq.limit; limit != nil {
+ if limit := iq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
- if offset := iq.offset; offset != nil {
+ if offset := iq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := iq.order; len(ps) > 0 {
@@ -962,7 +1047,7 @@ func (iq *ItemQuery) querySpec() *sqlgraph.QuerySpec {
func (iq *ItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(iq.driver.Dialect())
t1 := builder.Table(item.Table)
- columns := iq.fields
+ columns := iq.ctx.Fields
if len(columns) == 0 {
columns = item.Columns
}
@@ -971,7 +1056,7 @@ func (iq *ItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = iq.sql
selector.Select(selector.Columns(columns...)...)
}
- if iq.unique != nil && *iq.unique {
+ if iq.ctx.Unique != nil && *iq.ctx.Unique {
selector.Distinct()
}
for _, p := range iq.predicates {
@@ -980,12 +1065,12 @@ func (iq *ItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range iq.order {
p(selector)
}
- if offset := iq.offset; offset != nil {
+ if offset := iq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
- if limit := iq.limit; limit != nil {
+ if limit := iq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@@ -993,13 +1078,8 @@ func (iq *ItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
// ItemGroupBy is the group-by builder for Item entities.
type ItemGroupBy struct {
- config
selector
- fields []string
- fns []AggregateFunc
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
- path func(context.Context) (*sql.Selector, error)
+ build *ItemQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@@ -1008,74 +1088,77 @@ func (igb *ItemGroupBy) Aggregate(fns ...AggregateFunc) *ItemGroupBy {
return igb
}
-// Scan applies the group-by query and scans the result into the given value.
+// Scan applies the selector query and scans the result into the given value.
func (igb *ItemGroupBy) Scan(ctx context.Context, v any) error {
- query, err := igb.path(ctx)
- if err != nil {
+ ctx = setContextOp(ctx, igb.build.ctx, "GroupBy")
+ if err := igb.build.prepareQuery(ctx); err != nil {
return err
}
- igb.sql = query
- return igb.sqlScan(ctx, v)
+ return scanWithInterceptors[*ItemQuery, *ItemGroupBy](ctx, igb.build, igb, igb.build.inters, v)
}
-func (igb *ItemGroupBy) sqlScan(ctx context.Context, v any) error {
- for _, f := range igb.fields {
- if !item.ValidColumn(f) {
- return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
- }
- }
- selector := igb.sqlQuery()
- if err := selector.Err(); err != nil {
- return err
- }
- rows := &sql.Rows{}
- query, args := selector.Query()
- if err := igb.driver.Query(ctx, query, args, rows); err != nil {
- return err
- }
- defer rows.Close()
- return sql.ScanSlice(rows, v)
-}
-
-func (igb *ItemGroupBy) sqlQuery() *sql.Selector {
- selector := igb.sql.Select()
+func (igb *ItemGroupBy) sqlScan(ctx context.Context, root *ItemQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(igb.fns))
for _, fn := range igb.fns {
aggregation = append(aggregation, fn(selector))
}
- // If no columns were selected in a custom aggregation function, the default
- // selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
- columns := make([]string, 0, len(igb.fields)+len(igb.fns))
- for _, f := range igb.fields {
+ columns := make([]string, 0, len(*igb.flds)+len(igb.fns))
+ for _, f := range *igb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
- return selector.GroupBy(selector.Columns(igb.fields...)...)
+ selector.GroupBy(selector.Columns(*igb.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := igb.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
}
// ItemSelect is the builder for selecting fields of Item entities.
type ItemSelect struct {
*ItemQuery
selector
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (is *ItemSelect) Aggregate(fns ...AggregateFunc) *ItemSelect {
+ is.fns = append(is.fns, fns...)
+ return is
}
// Scan applies the selector query and scans the result into the given value.
func (is *ItemSelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, is.ctx, "Select")
if err := is.prepareQuery(ctx); err != nil {
return err
}
- is.sql = is.ItemQuery.sqlQuery(ctx)
- return is.sqlScan(ctx, v)
+ return scanWithInterceptors[*ItemQuery, *ItemSelect](ctx, is.ItemQuery, is, is.inters, v)
}
-func (is *ItemSelect) sqlScan(ctx context.Context, v any) error {
+func (is *ItemSelect) sqlScan(ctx context.Context, root *ItemQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(is.fns))
+ for _, fn := range is.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*is.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
rows := &sql.Rows{}
- query, args := is.sql.Query()
+ query, args := selector.Query()
if err := is.driver.Query(ctx, query, args, rows); err != nil {
return err
}
diff --git a/backend/internal/data/ent/item_update.go b/backend/internal/data/ent/item_update.go
index 4307051..8cd4722 100644
--- a/backend/internal/data/ent/item_update.go
+++ b/backend/internal/data/ent/item_update.go
@@ -18,6 +18,7 @@ import (
"github.com/hay-kot/homebox/backend/internal/data/ent/itemfield"
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
)
@@ -46,6 +47,14 @@ func (iu *ItemUpdate) SetName(s string) *ItemUpdate {
return iu
}
+// SetNillableName sets the "name" field if the given value is not nil.
+func (iu *ItemUpdate) SetNillableName(s *string) *ItemUpdate {
+ if s != nil {
+ iu.SetName(*s)
+ }
+ return iu
+}
+
// SetDescription sets the "description" field.
func (iu *ItemUpdate) SetDescription(s string) *ItemUpdate {
iu.mutation.SetDescription(s)
@@ -66,6 +75,26 @@ func (iu *ItemUpdate) ClearDescription() *ItemUpdate {
return iu
}
+// SetImportRef sets the "import_ref" field.
+func (iu *ItemUpdate) SetImportRef(s string) *ItemUpdate {
+ iu.mutation.SetImportRef(s)
+ return iu
+}
+
+// SetNillableImportRef sets the "import_ref" field if the given value is not nil.
+func (iu *ItemUpdate) SetNillableImportRef(s *string) *ItemUpdate {
+ if s != nil {
+ iu.SetImportRef(*s)
+ }
+ return iu
+}
+
+// ClearImportRef clears the value of the "import_ref" field.
+func (iu *ItemUpdate) ClearImportRef() *ItemUpdate {
+ iu.mutation.ClearImportRef()
+ return iu
+}
+
// SetNotes sets the "notes" field.
func (iu *ItemUpdate) SetNotes(s string) *ItemUpdate {
iu.mutation.SetNotes(s)
@@ -135,6 +164,27 @@ func (iu *ItemUpdate) SetNillableArchived(b *bool) *ItemUpdate {
return iu
}
+// SetAssetID sets the "asset_id" field.
+func (iu *ItemUpdate) SetAssetID(i int) *ItemUpdate {
+ iu.mutation.ResetAssetID()
+ iu.mutation.SetAssetID(i)
+ return iu
+}
+
+// SetNillableAssetID sets the "asset_id" field if the given value is not nil.
+func (iu *ItemUpdate) SetNillableAssetID(i *int) *ItemUpdate {
+ if i != nil {
+ iu.SetAssetID(*i)
+ }
+ return iu
+}
+
+// AddAssetID adds i to the "asset_id" field.
+func (iu *ItemUpdate) AddAssetID(i int) *ItemUpdate {
+ iu.mutation.AddAssetID(i)
+ return iu
+}
+
// SetSerialNumber sets the "serial_number" field.
func (iu *ItemUpdate) SetSerialNumber(s string) *ItemUpdate {
iu.mutation.SetSerialNumber(s)
@@ -391,6 +441,17 @@ func (iu *ItemUpdate) ClearSoldNotes() *ItemUpdate {
return iu
}
+// SetGroupID sets the "group" edge to the Group entity by ID.
+func (iu *ItemUpdate) SetGroupID(id uuid.UUID) *ItemUpdate {
+ iu.mutation.SetGroupID(id)
+ return iu
+}
+
+// SetGroup sets the "group" edge to the Group entity.
+func (iu *ItemUpdate) SetGroup(g *Group) *ItemUpdate {
+ return iu.SetGroupID(g.ID)
+}
+
// SetParentID sets the "parent" edge to the Item entity by ID.
func (iu *ItemUpdate) SetParentID(id uuid.UUID) *ItemUpdate {
iu.mutation.SetParentID(id)
@@ -425,17 +486,6 @@ func (iu *ItemUpdate) AddChildren(i ...*Item) *ItemUpdate {
return iu.AddChildIDs(ids...)
}
-// SetGroupID sets the "group" edge to the Group entity by ID.
-func (iu *ItemUpdate) SetGroupID(id uuid.UUID) *ItemUpdate {
- iu.mutation.SetGroupID(id)
- return iu
-}
-
-// SetGroup sets the "group" edge to the Group entity.
-func (iu *ItemUpdate) SetGroup(g *Group) *ItemUpdate {
- return iu.SetGroupID(g.ID)
-}
-
// AddLabelIDs adds the "label" edge to the Label entity by IDs.
func (iu *ItemUpdate) AddLabelIDs(ids ...uuid.UUID) *ItemUpdate {
iu.mutation.AddLabelIDs(ids...)
@@ -485,6 +535,21 @@ func (iu *ItemUpdate) AddFields(i ...*ItemField) *ItemUpdate {
return iu.AddFieldIDs(ids...)
}
+// AddMaintenanceEntryIDs adds the "maintenance_entries" edge to the MaintenanceEntry entity by IDs.
+func (iu *ItemUpdate) AddMaintenanceEntryIDs(ids ...uuid.UUID) *ItemUpdate {
+ iu.mutation.AddMaintenanceEntryIDs(ids...)
+ return iu
+}
+
+// AddMaintenanceEntries adds the "maintenance_entries" edges to the MaintenanceEntry entity.
+func (iu *ItemUpdate) AddMaintenanceEntries(m ...*MaintenanceEntry) *ItemUpdate {
+ ids := make([]uuid.UUID, len(m))
+ for i := range m {
+ ids[i] = m[i].ID
+ }
+ return iu.AddMaintenanceEntryIDs(ids...)
+}
+
// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs.
func (iu *ItemUpdate) AddAttachmentIDs(ids ...uuid.UUID) *ItemUpdate {
iu.mutation.AddAttachmentIDs(ids...)
@@ -505,6 +570,12 @@ func (iu *ItemUpdate) Mutation() *ItemMutation {
return iu.mutation
}
+// ClearGroup clears the "group" edge to the Group entity.
+func (iu *ItemUpdate) ClearGroup() *ItemUpdate {
+ iu.mutation.ClearGroup()
+ return iu
+}
+
// ClearParent clears the "parent" edge to the Item entity.
func (iu *ItemUpdate) ClearParent() *ItemUpdate {
iu.mutation.ClearParent()
@@ -532,12 +603,6 @@ func (iu *ItemUpdate) RemoveChildren(i ...*Item) *ItemUpdate {
return iu.RemoveChildIDs(ids...)
}
-// ClearGroup clears the "group" edge to the Group entity.
-func (iu *ItemUpdate) ClearGroup() *ItemUpdate {
- iu.mutation.ClearGroup()
- return iu
-}
-
// ClearLabel clears all "label" edges to the Label entity.
func (iu *ItemUpdate) ClearLabel() *ItemUpdate {
iu.mutation.ClearLabel()
@@ -586,6 +651,27 @@ func (iu *ItemUpdate) RemoveFields(i ...*ItemField) *ItemUpdate {
return iu.RemoveFieldIDs(ids...)
}
+// ClearMaintenanceEntries clears all "maintenance_entries" edges to the MaintenanceEntry entity.
+func (iu *ItemUpdate) ClearMaintenanceEntries() *ItemUpdate {
+ iu.mutation.ClearMaintenanceEntries()
+ return iu
+}
+
+// RemoveMaintenanceEntryIDs removes the "maintenance_entries" edge to MaintenanceEntry entities by IDs.
+func (iu *ItemUpdate) RemoveMaintenanceEntryIDs(ids ...uuid.UUID) *ItemUpdate {
+ iu.mutation.RemoveMaintenanceEntryIDs(ids...)
+ return iu
+}
+
+// RemoveMaintenanceEntries removes "maintenance_entries" edges to MaintenanceEntry entities.
+func (iu *ItemUpdate) RemoveMaintenanceEntries(m ...*MaintenanceEntry) *ItemUpdate {
+ ids := make([]uuid.UUID, len(m))
+ for i := range m {
+ ids[i] = m[i].ID
+ }
+ return iu.RemoveMaintenanceEntryIDs(ids...)
+}
+
// ClearAttachments clears all "attachments" edges to the Attachment entity.
func (iu *ItemUpdate) ClearAttachments() *ItemUpdate {
iu.mutation.ClearAttachments()
@@ -609,41 +695,8 @@ func (iu *ItemUpdate) RemoveAttachments(a ...*Attachment) *ItemUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (iu *ItemUpdate) Save(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
iu.defaults()
- if len(iu.hooks) == 0 {
- if err = iu.check(); err != nil {
- return 0, err
- }
- affected, err = iu.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*ItemMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = iu.check(); err != nil {
- return 0, err
- }
- iu.mutation = mutation
- affected, err = iu.sqlSave(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(iu.hooks) - 1; i >= 0; i-- {
- if iu.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = iu.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, iu.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, iu.sqlSave, iu.mutation, iu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -688,6 +741,11 @@ func (iu *ItemUpdate) check() error {
return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "Item.description": %w`, err)}
}
}
+ if v, ok := iu.mutation.ImportRef(); ok {
+ if err := item.ImportRefValidator(v); err != nil {
+ return &ValidationError{Name: "import_ref", err: fmt.Errorf(`ent: validator failed for field "Item.import_ref": %w`, err)}
+ }
+ }
if v, ok := iu.mutation.Notes(); ok {
if err := item.NotesValidator(v); err != nil {
return &ValidationError{Name: "notes", err: fmt.Errorf(`ent: validator failed for field "Item.notes": %w`, err)}
@@ -725,16 +783,10 @@ func (iu *ItemUpdate) check() error {
}
func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: item.Table,
- Columns: item.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
- },
+ if err := iu.check(); err != nil {
+ return n, err
}
+ _spec := sqlgraph.NewUpdateSpec(item.Table, item.Columns, sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID))
if ps := iu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -743,243 +795,150 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := iu.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: item.FieldUpdatedAt,
- })
+ _spec.SetField(item.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := iu.mutation.Name(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldName,
- })
+ _spec.SetField(item.FieldName, field.TypeString, value)
}
if value, ok := iu.mutation.Description(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldDescription,
- })
+ _spec.SetField(item.FieldDescription, field.TypeString, value)
}
if iu.mutation.DescriptionCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldDescription,
- })
+ _spec.ClearField(item.FieldDescription, field.TypeString)
+ }
+ if value, ok := iu.mutation.ImportRef(); ok {
+ _spec.SetField(item.FieldImportRef, field.TypeString, value)
}
if iu.mutation.ImportRefCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldImportRef,
- })
+ _spec.ClearField(item.FieldImportRef, field.TypeString)
}
if value, ok := iu.mutation.Notes(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldNotes,
- })
+ _spec.SetField(item.FieldNotes, field.TypeString, value)
}
if iu.mutation.NotesCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldNotes,
- })
+ _spec.ClearField(item.FieldNotes, field.TypeString)
}
if value, ok := iu.mutation.Quantity(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: item.FieldQuantity,
- })
+ _spec.SetField(item.FieldQuantity, field.TypeInt, value)
}
if value, ok := iu.mutation.AddedQuantity(); ok {
- _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: item.FieldQuantity,
- })
+ _spec.AddField(item.FieldQuantity, field.TypeInt, value)
}
if value, ok := iu.mutation.Insured(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: item.FieldInsured,
- })
+ _spec.SetField(item.FieldInsured, field.TypeBool, value)
}
if value, ok := iu.mutation.Archived(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: item.FieldArchived,
- })
+ _spec.SetField(item.FieldArchived, field.TypeBool, value)
+ }
+ if value, ok := iu.mutation.AssetID(); ok {
+ _spec.SetField(item.FieldAssetID, field.TypeInt, value)
+ }
+ if value, ok := iu.mutation.AddedAssetID(); ok {
+ _spec.AddField(item.FieldAssetID, field.TypeInt, value)
}
if value, ok := iu.mutation.SerialNumber(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldSerialNumber,
- })
+ _spec.SetField(item.FieldSerialNumber, field.TypeString, value)
}
if iu.mutation.SerialNumberCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldSerialNumber,
- })
+ _spec.ClearField(item.FieldSerialNumber, field.TypeString)
}
if value, ok := iu.mutation.ModelNumber(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldModelNumber,
- })
+ _spec.SetField(item.FieldModelNumber, field.TypeString, value)
}
if iu.mutation.ModelNumberCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldModelNumber,
- })
+ _spec.ClearField(item.FieldModelNumber, field.TypeString)
}
if value, ok := iu.mutation.Manufacturer(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldManufacturer,
- })
+ _spec.SetField(item.FieldManufacturer, field.TypeString, value)
}
if iu.mutation.ManufacturerCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldManufacturer,
- })
+ _spec.ClearField(item.FieldManufacturer, field.TypeString)
}
if value, ok := iu.mutation.LifetimeWarranty(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: item.FieldLifetimeWarranty,
- })
+ _spec.SetField(item.FieldLifetimeWarranty, field.TypeBool, value)
}
if value, ok := iu.mutation.WarrantyExpires(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: item.FieldWarrantyExpires,
- })
+ _spec.SetField(item.FieldWarrantyExpires, field.TypeTime, value)
}
if iu.mutation.WarrantyExpiresCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Column: item.FieldWarrantyExpires,
- })
+ _spec.ClearField(item.FieldWarrantyExpires, field.TypeTime)
}
if value, ok := iu.mutation.WarrantyDetails(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldWarrantyDetails,
- })
+ _spec.SetField(item.FieldWarrantyDetails, field.TypeString, value)
}
if iu.mutation.WarrantyDetailsCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldWarrantyDetails,
- })
+ _spec.ClearField(item.FieldWarrantyDetails, field.TypeString)
}
if value, ok := iu.mutation.PurchaseTime(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: item.FieldPurchaseTime,
- })
+ _spec.SetField(item.FieldPurchaseTime, field.TypeTime, value)
}
if iu.mutation.PurchaseTimeCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Column: item.FieldPurchaseTime,
- })
+ _spec.ClearField(item.FieldPurchaseTime, field.TypeTime)
}
if value, ok := iu.mutation.PurchaseFrom(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldPurchaseFrom,
- })
+ _spec.SetField(item.FieldPurchaseFrom, field.TypeString, value)
}
if iu.mutation.PurchaseFromCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldPurchaseFrom,
- })
+ _spec.ClearField(item.FieldPurchaseFrom, field.TypeString)
}
if value, ok := iu.mutation.PurchasePrice(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeFloat64,
- Value: value,
- Column: item.FieldPurchasePrice,
- })
+ _spec.SetField(item.FieldPurchasePrice, field.TypeFloat64, value)
}
if value, ok := iu.mutation.AddedPurchasePrice(); ok {
- _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
- Type: field.TypeFloat64,
- Value: value,
- Column: item.FieldPurchasePrice,
- })
+ _spec.AddField(item.FieldPurchasePrice, field.TypeFloat64, value)
}
if value, ok := iu.mutation.SoldTime(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: item.FieldSoldTime,
- })
+ _spec.SetField(item.FieldSoldTime, field.TypeTime, value)
}
if iu.mutation.SoldTimeCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Column: item.FieldSoldTime,
- })
+ _spec.ClearField(item.FieldSoldTime, field.TypeTime)
}
if value, ok := iu.mutation.SoldTo(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldSoldTo,
- })
+ _spec.SetField(item.FieldSoldTo, field.TypeString, value)
}
if iu.mutation.SoldToCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldSoldTo,
- })
+ _spec.ClearField(item.FieldSoldTo, field.TypeString)
}
if value, ok := iu.mutation.SoldPrice(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeFloat64,
- Value: value,
- Column: item.FieldSoldPrice,
- })
+ _spec.SetField(item.FieldSoldPrice, field.TypeFloat64, value)
}
if value, ok := iu.mutation.AddedSoldPrice(); ok {
- _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
- Type: field.TypeFloat64,
- Value: value,
- Column: item.FieldSoldPrice,
- })
+ _spec.AddField(item.FieldSoldPrice, field.TypeFloat64, value)
}
if value, ok := iu.mutation.SoldNotes(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldSoldNotes,
- })
+ _spec.SetField(item.FieldSoldNotes, field.TypeString, value)
}
if iu.mutation.SoldNotesCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldSoldNotes,
- })
+ _spec.ClearField(item.FieldSoldNotes, field.TypeString)
+ }
+ if iu.mutation.GroupCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: item.GroupTable,
+ Columns: []string{item.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := iu.mutation.GroupIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: item.GroupTable,
+ Columns: []string{item.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if iu.mutation.ParentCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -989,10 +948,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{item.ParentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1005,10 +961,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{item.ParentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1024,10 +977,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{item.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1040,10 +990,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{item.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1059,45 +1006,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{item.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _spec.Edges.Add = append(_spec.Edges.Add, edge)
- }
- if iu.mutation.GroupCleared() {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: item.GroupTable,
- Columns: []string{item.GroupColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
- },
- }
- _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
- }
- if nodes := iu.mutation.GroupIDs(); len(nodes) > 0 {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: item.GroupTable,
- Columns: []string{item.GroupColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1113,10 +1022,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: item.LabelPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1129,10 +1035,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: item.LabelPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1148,10 +1051,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: item.LabelPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1167,10 +1067,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{item.LocationColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1183,10 +1080,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{item.LocationColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1202,10 +1096,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{item.FieldsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: itemfield.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1218,10 +1109,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{item.FieldsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: itemfield.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1237,10 +1125,52 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{item.FieldsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: itemfield.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if iu.mutation.MaintenanceEntriesCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: item.MaintenanceEntriesTable,
+ Columns: []string{item.MaintenanceEntriesColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := iu.mutation.RemovedMaintenanceEntriesIDs(); len(nodes) > 0 && !iu.mutation.MaintenanceEntriesCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: item.MaintenanceEntriesTable,
+ Columns: []string{item.MaintenanceEntriesColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := iu.mutation.MaintenanceEntriesIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: item.MaintenanceEntriesTable,
+ Columns: []string{item.MaintenanceEntriesColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1256,10 +1186,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{item.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1272,10 +1199,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{item.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1291,10 +1215,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{item.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1310,6 +1231,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
+ iu.mutation.done = true
return n, nil
}
@@ -1333,6 +1255,14 @@ func (iuo *ItemUpdateOne) SetName(s string) *ItemUpdateOne {
return iuo
}
+// SetNillableName sets the "name" field if the given value is not nil.
+func (iuo *ItemUpdateOne) SetNillableName(s *string) *ItemUpdateOne {
+ if s != nil {
+ iuo.SetName(*s)
+ }
+ return iuo
+}
+
// SetDescription sets the "description" field.
func (iuo *ItemUpdateOne) SetDescription(s string) *ItemUpdateOne {
iuo.mutation.SetDescription(s)
@@ -1353,6 +1283,26 @@ func (iuo *ItemUpdateOne) ClearDescription() *ItemUpdateOne {
return iuo
}
+// SetImportRef sets the "import_ref" field.
+func (iuo *ItemUpdateOne) SetImportRef(s string) *ItemUpdateOne {
+ iuo.mutation.SetImportRef(s)
+ return iuo
+}
+
+// SetNillableImportRef sets the "import_ref" field if the given value is not nil.
+func (iuo *ItemUpdateOne) SetNillableImportRef(s *string) *ItemUpdateOne {
+ if s != nil {
+ iuo.SetImportRef(*s)
+ }
+ return iuo
+}
+
+// ClearImportRef clears the value of the "import_ref" field.
+func (iuo *ItemUpdateOne) ClearImportRef() *ItemUpdateOne {
+ iuo.mutation.ClearImportRef()
+ return iuo
+}
+
// SetNotes sets the "notes" field.
func (iuo *ItemUpdateOne) SetNotes(s string) *ItemUpdateOne {
iuo.mutation.SetNotes(s)
@@ -1422,6 +1372,27 @@ func (iuo *ItemUpdateOne) SetNillableArchived(b *bool) *ItemUpdateOne {
return iuo
}
+// SetAssetID sets the "asset_id" field.
+func (iuo *ItemUpdateOne) SetAssetID(i int) *ItemUpdateOne {
+ iuo.mutation.ResetAssetID()
+ iuo.mutation.SetAssetID(i)
+ return iuo
+}
+
+// SetNillableAssetID sets the "asset_id" field if the given value is not nil.
+func (iuo *ItemUpdateOne) SetNillableAssetID(i *int) *ItemUpdateOne {
+ if i != nil {
+ iuo.SetAssetID(*i)
+ }
+ return iuo
+}
+
+// AddAssetID adds i to the "asset_id" field.
+func (iuo *ItemUpdateOne) AddAssetID(i int) *ItemUpdateOne {
+ iuo.mutation.AddAssetID(i)
+ return iuo
+}
+
// SetSerialNumber sets the "serial_number" field.
func (iuo *ItemUpdateOne) SetSerialNumber(s string) *ItemUpdateOne {
iuo.mutation.SetSerialNumber(s)
@@ -1678,6 +1649,17 @@ func (iuo *ItemUpdateOne) ClearSoldNotes() *ItemUpdateOne {
return iuo
}
+// SetGroupID sets the "group" edge to the Group entity by ID.
+func (iuo *ItemUpdateOne) SetGroupID(id uuid.UUID) *ItemUpdateOne {
+ iuo.mutation.SetGroupID(id)
+ return iuo
+}
+
+// SetGroup sets the "group" edge to the Group entity.
+func (iuo *ItemUpdateOne) SetGroup(g *Group) *ItemUpdateOne {
+ return iuo.SetGroupID(g.ID)
+}
+
// SetParentID sets the "parent" edge to the Item entity by ID.
func (iuo *ItemUpdateOne) SetParentID(id uuid.UUID) *ItemUpdateOne {
iuo.mutation.SetParentID(id)
@@ -1712,17 +1694,6 @@ func (iuo *ItemUpdateOne) AddChildren(i ...*Item) *ItemUpdateOne {
return iuo.AddChildIDs(ids...)
}
-// SetGroupID sets the "group" edge to the Group entity by ID.
-func (iuo *ItemUpdateOne) SetGroupID(id uuid.UUID) *ItemUpdateOne {
- iuo.mutation.SetGroupID(id)
- return iuo
-}
-
-// SetGroup sets the "group" edge to the Group entity.
-func (iuo *ItemUpdateOne) SetGroup(g *Group) *ItemUpdateOne {
- return iuo.SetGroupID(g.ID)
-}
-
// AddLabelIDs adds the "label" edge to the Label entity by IDs.
func (iuo *ItemUpdateOne) AddLabelIDs(ids ...uuid.UUID) *ItemUpdateOne {
iuo.mutation.AddLabelIDs(ids...)
@@ -1772,6 +1743,21 @@ func (iuo *ItemUpdateOne) AddFields(i ...*ItemField) *ItemUpdateOne {
return iuo.AddFieldIDs(ids...)
}
+// AddMaintenanceEntryIDs adds the "maintenance_entries" edge to the MaintenanceEntry entity by IDs.
+func (iuo *ItemUpdateOne) AddMaintenanceEntryIDs(ids ...uuid.UUID) *ItemUpdateOne {
+ iuo.mutation.AddMaintenanceEntryIDs(ids...)
+ return iuo
+}
+
+// AddMaintenanceEntries adds the "maintenance_entries" edges to the MaintenanceEntry entity.
+func (iuo *ItemUpdateOne) AddMaintenanceEntries(m ...*MaintenanceEntry) *ItemUpdateOne {
+ ids := make([]uuid.UUID, len(m))
+ for i := range m {
+ ids[i] = m[i].ID
+ }
+ return iuo.AddMaintenanceEntryIDs(ids...)
+}
+
// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs.
func (iuo *ItemUpdateOne) AddAttachmentIDs(ids ...uuid.UUID) *ItemUpdateOne {
iuo.mutation.AddAttachmentIDs(ids...)
@@ -1792,6 +1778,12 @@ func (iuo *ItemUpdateOne) Mutation() *ItemMutation {
return iuo.mutation
}
+// ClearGroup clears the "group" edge to the Group entity.
+func (iuo *ItemUpdateOne) ClearGroup() *ItemUpdateOne {
+ iuo.mutation.ClearGroup()
+ return iuo
+}
+
// ClearParent clears the "parent" edge to the Item entity.
func (iuo *ItemUpdateOne) ClearParent() *ItemUpdateOne {
iuo.mutation.ClearParent()
@@ -1819,12 +1811,6 @@ func (iuo *ItemUpdateOne) RemoveChildren(i ...*Item) *ItemUpdateOne {
return iuo.RemoveChildIDs(ids...)
}
-// ClearGroup clears the "group" edge to the Group entity.
-func (iuo *ItemUpdateOne) ClearGroup() *ItemUpdateOne {
- iuo.mutation.ClearGroup()
- return iuo
-}
-
// ClearLabel clears all "label" edges to the Label entity.
func (iuo *ItemUpdateOne) ClearLabel() *ItemUpdateOne {
iuo.mutation.ClearLabel()
@@ -1873,6 +1859,27 @@ func (iuo *ItemUpdateOne) RemoveFields(i ...*ItemField) *ItemUpdateOne {
return iuo.RemoveFieldIDs(ids...)
}
+// ClearMaintenanceEntries clears all "maintenance_entries" edges to the MaintenanceEntry entity.
+func (iuo *ItemUpdateOne) ClearMaintenanceEntries() *ItemUpdateOne {
+ iuo.mutation.ClearMaintenanceEntries()
+ return iuo
+}
+
+// RemoveMaintenanceEntryIDs removes the "maintenance_entries" edge to MaintenanceEntry entities by IDs.
+func (iuo *ItemUpdateOne) RemoveMaintenanceEntryIDs(ids ...uuid.UUID) *ItemUpdateOne {
+ iuo.mutation.RemoveMaintenanceEntryIDs(ids...)
+ return iuo
+}
+
+// RemoveMaintenanceEntries removes "maintenance_entries" edges to MaintenanceEntry entities.
+func (iuo *ItemUpdateOne) RemoveMaintenanceEntries(m ...*MaintenanceEntry) *ItemUpdateOne {
+ ids := make([]uuid.UUID, len(m))
+ for i := range m {
+ ids[i] = m[i].ID
+ }
+ return iuo.RemoveMaintenanceEntryIDs(ids...)
+}
+
// ClearAttachments clears all "attachments" edges to the Attachment entity.
func (iuo *ItemUpdateOne) ClearAttachments() *ItemUpdateOne {
iuo.mutation.ClearAttachments()
@@ -1894,6 +1901,12 @@ func (iuo *ItemUpdateOne) RemoveAttachments(a ...*Attachment) *ItemUpdateOne {
return iuo.RemoveAttachmentIDs(ids...)
}
+// Where appends a list predicates to the ItemUpdate builder.
+func (iuo *ItemUpdateOne) Where(ps ...predicate.Item) *ItemUpdateOne {
+ iuo.mutation.Where(ps...)
+ return iuo
+}
+
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (iuo *ItemUpdateOne) Select(field string, fields ...string) *ItemUpdateOne {
@@ -1903,47 +1916,8 @@ func (iuo *ItemUpdateOne) Select(field string, fields ...string) *ItemUpdateOne
// Save executes the query and returns the updated Item entity.
func (iuo *ItemUpdateOne) Save(ctx context.Context) (*Item, error) {
- var (
- err error
- node *Item
- )
iuo.defaults()
- if len(iuo.hooks) == 0 {
- if err = iuo.check(); err != nil {
- return nil, err
- }
- node, err = iuo.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*ItemMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = iuo.check(); err != nil {
- return nil, err
- }
- iuo.mutation = mutation
- node, err = iuo.sqlSave(ctx)
- mutation.done = true
- return node, err
- })
- for i := len(iuo.hooks) - 1; i >= 0; i-- {
- if iuo.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = iuo.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, iuo.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*Item)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from ItemMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, iuo.sqlSave, iuo.mutation, iuo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -1988,6 +1962,11 @@ func (iuo *ItemUpdateOne) check() error {
return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "Item.description": %w`, err)}
}
}
+ if v, ok := iuo.mutation.ImportRef(); ok {
+ if err := item.ImportRefValidator(v); err != nil {
+ return &ValidationError{Name: "import_ref", err: fmt.Errorf(`ent: validator failed for field "Item.import_ref": %w`, err)}
+ }
+ }
if v, ok := iuo.mutation.Notes(); ok {
if err := item.NotesValidator(v); err != nil {
return &ValidationError{Name: "notes", err: fmt.Errorf(`ent: validator failed for field "Item.notes": %w`, err)}
@@ -2025,16 +2004,10 @@ func (iuo *ItemUpdateOne) check() error {
}
func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: item.Table,
- Columns: item.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
- },
+ if err := iuo.check(); err != nil {
+ return _node, err
}
+ _spec := sqlgraph.NewUpdateSpec(item.Table, item.Columns, sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID))
id, ok := iuo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Item.id" for update`)}
@@ -2060,243 +2033,150 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
}
}
if value, ok := iuo.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: item.FieldUpdatedAt,
- })
+ _spec.SetField(item.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := iuo.mutation.Name(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldName,
- })
+ _spec.SetField(item.FieldName, field.TypeString, value)
}
if value, ok := iuo.mutation.Description(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldDescription,
- })
+ _spec.SetField(item.FieldDescription, field.TypeString, value)
}
if iuo.mutation.DescriptionCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldDescription,
- })
+ _spec.ClearField(item.FieldDescription, field.TypeString)
+ }
+ if value, ok := iuo.mutation.ImportRef(); ok {
+ _spec.SetField(item.FieldImportRef, field.TypeString, value)
}
if iuo.mutation.ImportRefCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldImportRef,
- })
+ _spec.ClearField(item.FieldImportRef, field.TypeString)
}
if value, ok := iuo.mutation.Notes(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldNotes,
- })
+ _spec.SetField(item.FieldNotes, field.TypeString, value)
}
if iuo.mutation.NotesCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldNotes,
- })
+ _spec.ClearField(item.FieldNotes, field.TypeString)
}
if value, ok := iuo.mutation.Quantity(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: item.FieldQuantity,
- })
+ _spec.SetField(item.FieldQuantity, field.TypeInt, value)
}
if value, ok := iuo.mutation.AddedQuantity(); ok {
- _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: item.FieldQuantity,
- })
+ _spec.AddField(item.FieldQuantity, field.TypeInt, value)
}
if value, ok := iuo.mutation.Insured(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: item.FieldInsured,
- })
+ _spec.SetField(item.FieldInsured, field.TypeBool, value)
}
if value, ok := iuo.mutation.Archived(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: item.FieldArchived,
- })
+ _spec.SetField(item.FieldArchived, field.TypeBool, value)
+ }
+ if value, ok := iuo.mutation.AssetID(); ok {
+ _spec.SetField(item.FieldAssetID, field.TypeInt, value)
+ }
+ if value, ok := iuo.mutation.AddedAssetID(); ok {
+ _spec.AddField(item.FieldAssetID, field.TypeInt, value)
}
if value, ok := iuo.mutation.SerialNumber(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldSerialNumber,
- })
+ _spec.SetField(item.FieldSerialNumber, field.TypeString, value)
}
if iuo.mutation.SerialNumberCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldSerialNumber,
- })
+ _spec.ClearField(item.FieldSerialNumber, field.TypeString)
}
if value, ok := iuo.mutation.ModelNumber(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldModelNumber,
- })
+ _spec.SetField(item.FieldModelNumber, field.TypeString, value)
}
if iuo.mutation.ModelNumberCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldModelNumber,
- })
+ _spec.ClearField(item.FieldModelNumber, field.TypeString)
}
if value, ok := iuo.mutation.Manufacturer(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldManufacturer,
- })
+ _spec.SetField(item.FieldManufacturer, field.TypeString, value)
}
if iuo.mutation.ManufacturerCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldManufacturer,
- })
+ _spec.ClearField(item.FieldManufacturer, field.TypeString)
}
if value, ok := iuo.mutation.LifetimeWarranty(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: item.FieldLifetimeWarranty,
- })
+ _spec.SetField(item.FieldLifetimeWarranty, field.TypeBool, value)
}
if value, ok := iuo.mutation.WarrantyExpires(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: item.FieldWarrantyExpires,
- })
+ _spec.SetField(item.FieldWarrantyExpires, field.TypeTime, value)
}
if iuo.mutation.WarrantyExpiresCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Column: item.FieldWarrantyExpires,
- })
+ _spec.ClearField(item.FieldWarrantyExpires, field.TypeTime)
}
if value, ok := iuo.mutation.WarrantyDetails(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldWarrantyDetails,
- })
+ _spec.SetField(item.FieldWarrantyDetails, field.TypeString, value)
}
if iuo.mutation.WarrantyDetailsCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldWarrantyDetails,
- })
+ _spec.ClearField(item.FieldWarrantyDetails, field.TypeString)
}
if value, ok := iuo.mutation.PurchaseTime(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: item.FieldPurchaseTime,
- })
+ _spec.SetField(item.FieldPurchaseTime, field.TypeTime, value)
}
if iuo.mutation.PurchaseTimeCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Column: item.FieldPurchaseTime,
- })
+ _spec.ClearField(item.FieldPurchaseTime, field.TypeTime)
}
if value, ok := iuo.mutation.PurchaseFrom(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldPurchaseFrom,
- })
+ _spec.SetField(item.FieldPurchaseFrom, field.TypeString, value)
}
if iuo.mutation.PurchaseFromCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldPurchaseFrom,
- })
+ _spec.ClearField(item.FieldPurchaseFrom, field.TypeString)
}
if value, ok := iuo.mutation.PurchasePrice(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeFloat64,
- Value: value,
- Column: item.FieldPurchasePrice,
- })
+ _spec.SetField(item.FieldPurchasePrice, field.TypeFloat64, value)
}
if value, ok := iuo.mutation.AddedPurchasePrice(); ok {
- _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
- Type: field.TypeFloat64,
- Value: value,
- Column: item.FieldPurchasePrice,
- })
+ _spec.AddField(item.FieldPurchasePrice, field.TypeFloat64, value)
}
if value, ok := iuo.mutation.SoldTime(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: item.FieldSoldTime,
- })
+ _spec.SetField(item.FieldSoldTime, field.TypeTime, value)
}
if iuo.mutation.SoldTimeCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Column: item.FieldSoldTime,
- })
+ _spec.ClearField(item.FieldSoldTime, field.TypeTime)
}
if value, ok := iuo.mutation.SoldTo(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldSoldTo,
- })
+ _spec.SetField(item.FieldSoldTo, field.TypeString, value)
}
if iuo.mutation.SoldToCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldSoldTo,
- })
+ _spec.ClearField(item.FieldSoldTo, field.TypeString)
}
if value, ok := iuo.mutation.SoldPrice(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeFloat64,
- Value: value,
- Column: item.FieldSoldPrice,
- })
+ _spec.SetField(item.FieldSoldPrice, field.TypeFloat64, value)
}
if value, ok := iuo.mutation.AddedSoldPrice(); ok {
- _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
- Type: field.TypeFloat64,
- Value: value,
- Column: item.FieldSoldPrice,
- })
+ _spec.AddField(item.FieldSoldPrice, field.TypeFloat64, value)
}
if value, ok := iuo.mutation.SoldNotes(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: item.FieldSoldNotes,
- })
+ _spec.SetField(item.FieldSoldNotes, field.TypeString, value)
}
if iuo.mutation.SoldNotesCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: item.FieldSoldNotes,
- })
+ _spec.ClearField(item.FieldSoldNotes, field.TypeString)
+ }
+ if iuo.mutation.GroupCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: item.GroupTable,
+ Columns: []string{item.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := iuo.mutation.GroupIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: item.GroupTable,
+ Columns: []string{item.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if iuo.mutation.ParentCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -2306,10 +2186,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: []string{item.ParentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -2322,10 +2199,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: []string{item.ParentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -2341,10 +2215,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: []string{item.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -2357,10 +2228,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: []string{item.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -2376,45 +2244,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: []string{item.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _spec.Edges.Add = append(_spec.Edges.Add, edge)
- }
- if iuo.mutation.GroupCleared() {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: item.GroupTable,
- Columns: []string{item.GroupColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
- },
- }
- _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
- }
- if nodes := iuo.mutation.GroupIDs(); len(nodes) > 0 {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: item.GroupTable,
- Columns: []string{item.GroupColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -2430,10 +2260,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: item.LabelPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -2446,10 +2273,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: item.LabelPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -2465,10 +2289,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: item.LabelPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -2484,10 +2305,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: []string{item.LocationColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -2500,10 +2318,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: []string{item.LocationColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -2519,10 +2334,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: []string{item.FieldsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: itemfield.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -2535,10 +2347,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: []string{item.FieldsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: itemfield.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -2554,10 +2363,52 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: []string{item.FieldsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: itemfield.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if iuo.mutation.MaintenanceEntriesCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: item.MaintenanceEntriesTable,
+ Columns: []string{item.MaintenanceEntriesColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := iuo.mutation.RemovedMaintenanceEntriesIDs(); len(nodes) > 0 && !iuo.mutation.MaintenanceEntriesCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: item.MaintenanceEntriesTable,
+ Columns: []string{item.MaintenanceEntriesColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := iuo.mutation.MaintenanceEntriesIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: item.MaintenanceEntriesTable,
+ Columns: []string{item.MaintenanceEntriesColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -2573,10 +2424,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: []string{item.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -2589,10 +2437,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: []string{item.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -2608,10 +2453,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
Columns: []string{item.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: attachment.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -2630,5 +2472,6 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
}
return nil, err
}
+ iuo.mutation.done = true
return _node, nil
}
diff --git a/backend/internal/data/ent/itemfield.go b/backend/internal/data/ent/itemfield.go
index be68023..b2b8b8d 100644
--- a/backend/internal/data/ent/itemfield.go
+++ b/backend/internal/data/ent/itemfield.go
@@ -7,6 +7,7 @@ import (
"strings"
"time"
+ "entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/item"
@@ -38,8 +39,9 @@ type ItemField struct {
TimeValue time.Time `json:"time_value,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the ItemFieldQuery when eager-loading is set.
- Edges ItemFieldEdges `json:"edges"`
- item_fields *uuid.UUID
+ Edges ItemFieldEdges `json:"edges"`
+ item_fields *uuid.UUID
+ selectValues sql.SelectValues
}
// ItemFieldEdges holds the relations/edges for other nodes in the graph.
@@ -82,7 +84,7 @@ func (*ItemField) scanValues(columns []string) ([]any, error) {
case itemfield.ForeignKeys[0]: // item_fields
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
- return nil, fmt.Errorf("unexpected column %q for type ItemField", columns[i])
+ values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -163,21 +165,29 @@ func (_if *ItemField) assignValues(columns []string, values []any) error {
_if.item_fields = new(uuid.UUID)
*_if.item_fields = *value.S.(*uuid.UUID)
}
+ default:
+ _if.selectValues.Set(columns[i], values[i])
}
}
return nil
}
+// Value returns the ent.Value that was dynamically selected and assigned to the ItemField.
+// This includes values selected through modifiers, order, etc.
+func (_if *ItemField) Value(name string) (ent.Value, error) {
+ return _if.selectValues.Get(name)
+}
+
// QueryItem queries the "item" edge of the ItemField entity.
func (_if *ItemField) QueryItem() *ItemQuery {
- return (&ItemFieldClient{config: _if.config}).QueryItem(_if)
+ return NewItemFieldClient(_if.config).QueryItem(_if)
}
// Update returns a builder for updating this ItemField.
// Note that you need to call ItemField.Unwrap() before calling this method if this ItemField
// was returned from a transaction, and the transaction was committed or rolled back.
func (_if *ItemField) Update() *ItemFieldUpdateOne {
- return (&ItemFieldClient{config: _if.config}).UpdateOne(_if)
+ return NewItemFieldClient(_if.config).UpdateOne(_if)
}
// Unwrap unwraps the ItemField entity that was returned from a transaction after it was closed,
@@ -228,9 +238,3 @@ func (_if *ItemField) String() string {
// ItemFields is a parsable slice of ItemField.
type ItemFields []*ItemField
-
-func (_if ItemFields) config(cfg config) {
- for _i := range _if {
- _if[_i].config = cfg
- }
-}
diff --git a/backend/internal/data/ent/itemfield/itemfield.go b/backend/internal/data/ent/itemfield/itemfield.go
index ccad0fe..dfbf378 100644
--- a/backend/internal/data/ent/itemfield/itemfield.go
+++ b/backend/internal/data/ent/itemfield/itemfield.go
@@ -6,6 +6,8 @@ import (
"fmt"
"time"
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -125,3 +127,70 @@ func TypeValidator(_type Type) error {
return fmt.Errorf("itemfield: invalid enum value for type field: %q", _type)
}
}
+
+// OrderOption defines the ordering options for the ItemField queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByName orders the results by the name field.
+func ByName(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldName, opts...).ToFunc()
+}
+
+// ByDescription orders the results by the description field.
+func ByDescription(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldDescription, opts...).ToFunc()
+}
+
+// ByType orders the results by the type field.
+func ByType(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldType, opts...).ToFunc()
+}
+
+// ByTextValue orders the results by the text_value field.
+func ByTextValue(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldTextValue, opts...).ToFunc()
+}
+
+// ByNumberValue orders the results by the number_value field.
+func ByNumberValue(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldNumberValue, opts...).ToFunc()
+}
+
+// ByBooleanValue orders the results by the boolean_value field.
+func ByBooleanValue(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldBooleanValue, opts...).ToFunc()
+}
+
+// ByTimeValue orders the results by the time_value field.
+func ByTimeValue(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldTimeValue, opts...).ToFunc()
+}
+
+// ByItemField orders the results by item field.
+func ByItemField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newItemStep(), sql.OrderByField(field, opts...))
+ }
+}
+func newItemStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(ItemInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
+ )
+}
diff --git a/backend/internal/data/ent/itemfield/where.go b/backend/internal/data/ent/itemfield/where.go
index 2af2d7a..8a2d4aa 100644
--- a/backend/internal/data/ent/itemfield/where.go
+++ b/backend/internal/data/ent/itemfield/where.go
@@ -13,774 +13,502 @@ import (
// ID filters vertices based on their ID field.
func ID(id uuid.UUID) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id uuid.UUID) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id uuid.UUID) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldID), id))
- })
+ return predicate.ItemField(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...uuid.UUID) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.In(s.C(FieldID), v...))
- })
+ return predicate.ItemField(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...uuid.UUID) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.NotIn(s.C(FieldID), v...))
- })
+ return predicate.ItemField(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id uuid.UUID) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldID), id))
- })
+ return predicate.ItemField(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id uuid.UUID) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldID), id))
- })
+ return predicate.ItemField(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id uuid.UUID) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldID), id))
- })
+ return predicate.ItemField(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id uuid.UUID) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldID), id))
- })
+ return predicate.ItemField(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldUpdatedAt, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldName), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldName, v))
}
// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ.
func Description(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldDescription), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldDescription, v))
}
// TextValue applies equality check predicate on the "text_value" field. It's identical to TextValueEQ.
func TextValue(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldTextValue), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldTextValue, v))
}
// NumberValue applies equality check predicate on the "number_value" field. It's identical to NumberValueEQ.
func NumberValue(v int) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldNumberValue), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldNumberValue, v))
}
// BooleanValue applies equality check predicate on the "boolean_value" field. It's identical to BooleanValueEQ.
func BooleanValue(v bool) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldBooleanValue), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldBooleanValue, v))
}
// TimeValue applies equality check predicate on the "time_value" field. It's identical to TimeValueEQ.
func TimeValue(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldTimeValue), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldTimeValue, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.ItemField(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldCreatedAt), v...))
- })
+ return predicate.ItemField(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
- })
+ return predicate.ItemField(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldCreatedAt), v))
- })
+ return predicate.ItemField(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.ItemField(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldCreatedAt), v))
- })
+ return predicate.ItemField(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.ItemField(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.ItemField(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.ItemField(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.ItemField(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.ItemField(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.ItemField(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.ItemField(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.ItemField(sql.FieldLTE(FieldUpdatedAt, v))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldName), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldName), v))
- })
+ return predicate.ItemField(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldName), v...))
- })
+ return predicate.ItemField(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldName), v...))
- })
+ return predicate.ItemField(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldName), v))
- })
+ return predicate.ItemField(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldName), v))
- })
+ return predicate.ItemField(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldName), v))
- })
+ return predicate.ItemField(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldName), v))
- })
+ return predicate.ItemField(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldName), v))
- })
+ return predicate.ItemField(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldName), v))
- })
+ return predicate.ItemField(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldName), v))
- })
+ return predicate.ItemField(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldName), v))
- })
+ return predicate.ItemField(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldName), v))
- })
+ return predicate.ItemField(sql.FieldContainsFold(FieldName, v))
}
// DescriptionEQ applies the EQ predicate on the "description" field.
func DescriptionEQ(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldDescription), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldDescription, v))
}
// DescriptionNEQ applies the NEQ predicate on the "description" field.
func DescriptionNEQ(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldDescription), v))
- })
+ return predicate.ItemField(sql.FieldNEQ(FieldDescription, v))
}
// DescriptionIn applies the In predicate on the "description" field.
func DescriptionIn(vs ...string) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldDescription), v...))
- })
+ return predicate.ItemField(sql.FieldIn(FieldDescription, vs...))
}
// DescriptionNotIn applies the NotIn predicate on the "description" field.
func DescriptionNotIn(vs ...string) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldDescription), v...))
- })
+ return predicate.ItemField(sql.FieldNotIn(FieldDescription, vs...))
}
// DescriptionGT applies the GT predicate on the "description" field.
func DescriptionGT(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldDescription), v))
- })
+ return predicate.ItemField(sql.FieldGT(FieldDescription, v))
}
// DescriptionGTE applies the GTE predicate on the "description" field.
func DescriptionGTE(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldDescription), v))
- })
+ return predicate.ItemField(sql.FieldGTE(FieldDescription, v))
}
// DescriptionLT applies the LT predicate on the "description" field.
func DescriptionLT(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldDescription), v))
- })
+ return predicate.ItemField(sql.FieldLT(FieldDescription, v))
}
// DescriptionLTE applies the LTE predicate on the "description" field.
func DescriptionLTE(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldDescription), v))
- })
+ return predicate.ItemField(sql.FieldLTE(FieldDescription, v))
}
// DescriptionContains applies the Contains predicate on the "description" field.
func DescriptionContains(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldDescription), v))
- })
+ return predicate.ItemField(sql.FieldContains(FieldDescription, v))
}
// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field.
func DescriptionHasPrefix(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldDescription), v))
- })
+ return predicate.ItemField(sql.FieldHasPrefix(FieldDescription, v))
}
// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field.
func DescriptionHasSuffix(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldDescription), v))
- })
+ return predicate.ItemField(sql.FieldHasSuffix(FieldDescription, v))
}
// DescriptionIsNil applies the IsNil predicate on the "description" field.
func DescriptionIsNil() predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldDescription)))
- })
+ return predicate.ItemField(sql.FieldIsNull(FieldDescription))
}
// DescriptionNotNil applies the NotNil predicate on the "description" field.
func DescriptionNotNil() predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldDescription)))
- })
+ return predicate.ItemField(sql.FieldNotNull(FieldDescription))
}
// DescriptionEqualFold applies the EqualFold predicate on the "description" field.
func DescriptionEqualFold(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldDescription), v))
- })
+ return predicate.ItemField(sql.FieldEqualFold(FieldDescription, v))
}
// DescriptionContainsFold applies the ContainsFold predicate on the "description" field.
func DescriptionContainsFold(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldDescription), v))
- })
+ return predicate.ItemField(sql.FieldContainsFold(FieldDescription, v))
}
// TypeEQ applies the EQ predicate on the "type" field.
func TypeEQ(v Type) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldType), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldType, v))
}
// TypeNEQ applies the NEQ predicate on the "type" field.
func TypeNEQ(v Type) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldType), v))
- })
+ return predicate.ItemField(sql.FieldNEQ(FieldType, v))
}
// TypeIn applies the In predicate on the "type" field.
func TypeIn(vs ...Type) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldType), v...))
- })
+ return predicate.ItemField(sql.FieldIn(FieldType, vs...))
}
// TypeNotIn applies the NotIn predicate on the "type" field.
func TypeNotIn(vs ...Type) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldType), v...))
- })
+ return predicate.ItemField(sql.FieldNotIn(FieldType, vs...))
}
// TextValueEQ applies the EQ predicate on the "text_value" field.
func TextValueEQ(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldTextValue), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldTextValue, v))
}
// TextValueNEQ applies the NEQ predicate on the "text_value" field.
func TextValueNEQ(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldTextValue), v))
- })
+ return predicate.ItemField(sql.FieldNEQ(FieldTextValue, v))
}
// TextValueIn applies the In predicate on the "text_value" field.
func TextValueIn(vs ...string) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldTextValue), v...))
- })
+ return predicate.ItemField(sql.FieldIn(FieldTextValue, vs...))
}
// TextValueNotIn applies the NotIn predicate on the "text_value" field.
func TextValueNotIn(vs ...string) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldTextValue), v...))
- })
+ return predicate.ItemField(sql.FieldNotIn(FieldTextValue, vs...))
}
// TextValueGT applies the GT predicate on the "text_value" field.
func TextValueGT(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldTextValue), v))
- })
+ return predicate.ItemField(sql.FieldGT(FieldTextValue, v))
}
// TextValueGTE applies the GTE predicate on the "text_value" field.
func TextValueGTE(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldTextValue), v))
- })
+ return predicate.ItemField(sql.FieldGTE(FieldTextValue, v))
}
// TextValueLT applies the LT predicate on the "text_value" field.
func TextValueLT(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldTextValue), v))
- })
+ return predicate.ItemField(sql.FieldLT(FieldTextValue, v))
}
// TextValueLTE applies the LTE predicate on the "text_value" field.
func TextValueLTE(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldTextValue), v))
- })
+ return predicate.ItemField(sql.FieldLTE(FieldTextValue, v))
}
// TextValueContains applies the Contains predicate on the "text_value" field.
func TextValueContains(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldTextValue), v))
- })
+ return predicate.ItemField(sql.FieldContains(FieldTextValue, v))
}
// TextValueHasPrefix applies the HasPrefix predicate on the "text_value" field.
func TextValueHasPrefix(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldTextValue), v))
- })
+ return predicate.ItemField(sql.FieldHasPrefix(FieldTextValue, v))
}
// TextValueHasSuffix applies the HasSuffix predicate on the "text_value" field.
func TextValueHasSuffix(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldTextValue), v))
- })
+ return predicate.ItemField(sql.FieldHasSuffix(FieldTextValue, v))
}
// TextValueIsNil applies the IsNil predicate on the "text_value" field.
func TextValueIsNil() predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldTextValue)))
- })
+ return predicate.ItemField(sql.FieldIsNull(FieldTextValue))
}
// TextValueNotNil applies the NotNil predicate on the "text_value" field.
func TextValueNotNil() predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldTextValue)))
- })
+ return predicate.ItemField(sql.FieldNotNull(FieldTextValue))
}
// TextValueEqualFold applies the EqualFold predicate on the "text_value" field.
func TextValueEqualFold(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldTextValue), v))
- })
+ return predicate.ItemField(sql.FieldEqualFold(FieldTextValue, v))
}
// TextValueContainsFold applies the ContainsFold predicate on the "text_value" field.
func TextValueContainsFold(v string) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldTextValue), v))
- })
+ return predicate.ItemField(sql.FieldContainsFold(FieldTextValue, v))
}
// NumberValueEQ applies the EQ predicate on the "number_value" field.
func NumberValueEQ(v int) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldNumberValue), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldNumberValue, v))
}
// NumberValueNEQ applies the NEQ predicate on the "number_value" field.
func NumberValueNEQ(v int) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldNumberValue), v))
- })
+ return predicate.ItemField(sql.FieldNEQ(FieldNumberValue, v))
}
// NumberValueIn applies the In predicate on the "number_value" field.
func NumberValueIn(vs ...int) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldNumberValue), v...))
- })
+ return predicate.ItemField(sql.FieldIn(FieldNumberValue, vs...))
}
// NumberValueNotIn applies the NotIn predicate on the "number_value" field.
func NumberValueNotIn(vs ...int) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldNumberValue), v...))
- })
+ return predicate.ItemField(sql.FieldNotIn(FieldNumberValue, vs...))
}
// NumberValueGT applies the GT predicate on the "number_value" field.
func NumberValueGT(v int) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldNumberValue), v))
- })
+ return predicate.ItemField(sql.FieldGT(FieldNumberValue, v))
}
// NumberValueGTE applies the GTE predicate on the "number_value" field.
func NumberValueGTE(v int) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldNumberValue), v))
- })
+ return predicate.ItemField(sql.FieldGTE(FieldNumberValue, v))
}
// NumberValueLT applies the LT predicate on the "number_value" field.
func NumberValueLT(v int) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldNumberValue), v))
- })
+ return predicate.ItemField(sql.FieldLT(FieldNumberValue, v))
}
// NumberValueLTE applies the LTE predicate on the "number_value" field.
func NumberValueLTE(v int) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldNumberValue), v))
- })
+ return predicate.ItemField(sql.FieldLTE(FieldNumberValue, v))
}
// NumberValueIsNil applies the IsNil predicate on the "number_value" field.
func NumberValueIsNil() predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldNumberValue)))
- })
+ return predicate.ItemField(sql.FieldIsNull(FieldNumberValue))
}
// NumberValueNotNil applies the NotNil predicate on the "number_value" field.
func NumberValueNotNil() predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldNumberValue)))
- })
+ return predicate.ItemField(sql.FieldNotNull(FieldNumberValue))
}
// BooleanValueEQ applies the EQ predicate on the "boolean_value" field.
func BooleanValueEQ(v bool) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldBooleanValue), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldBooleanValue, v))
}
// BooleanValueNEQ applies the NEQ predicate on the "boolean_value" field.
func BooleanValueNEQ(v bool) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldBooleanValue), v))
- })
+ return predicate.ItemField(sql.FieldNEQ(FieldBooleanValue, v))
}
// TimeValueEQ applies the EQ predicate on the "time_value" field.
func TimeValueEQ(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldTimeValue), v))
- })
+ return predicate.ItemField(sql.FieldEQ(FieldTimeValue, v))
}
// TimeValueNEQ applies the NEQ predicate on the "time_value" field.
func TimeValueNEQ(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldTimeValue), v))
- })
+ return predicate.ItemField(sql.FieldNEQ(FieldTimeValue, v))
}
// TimeValueIn applies the In predicate on the "time_value" field.
func TimeValueIn(vs ...time.Time) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldTimeValue), v...))
- })
+ return predicate.ItemField(sql.FieldIn(FieldTimeValue, vs...))
}
// TimeValueNotIn applies the NotIn predicate on the "time_value" field.
func TimeValueNotIn(vs ...time.Time) predicate.ItemField {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldTimeValue), v...))
- })
+ return predicate.ItemField(sql.FieldNotIn(FieldTimeValue, vs...))
}
// TimeValueGT applies the GT predicate on the "time_value" field.
func TimeValueGT(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldTimeValue), v))
- })
+ return predicate.ItemField(sql.FieldGT(FieldTimeValue, v))
}
// TimeValueGTE applies the GTE predicate on the "time_value" field.
func TimeValueGTE(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldTimeValue), v))
- })
+ return predicate.ItemField(sql.FieldGTE(FieldTimeValue, v))
}
// TimeValueLT applies the LT predicate on the "time_value" field.
func TimeValueLT(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldTimeValue), v))
- })
+ return predicate.ItemField(sql.FieldLT(FieldTimeValue, v))
}
// TimeValueLTE applies the LTE predicate on the "time_value" field.
func TimeValueLTE(v time.Time) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldTimeValue), v))
- })
+ return predicate.ItemField(sql.FieldLTE(FieldTimeValue, v))
}
// HasItem applies the HasEdge predicate on the "item" edge.
@@ -788,7 +516,6 @@ func HasItem() predicate.ItemField {
return predicate.ItemField(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(ItemTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -798,11 +525,7 @@ func HasItem() predicate.ItemField {
// HasItemWith applies the HasEdge predicate on the "item" edge with a given conditions (other predicates).
func HasItemWith(preds ...predicate.Item) predicate.ItemField {
return predicate.ItemField(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(ItemInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
- )
+ step := newItemStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -813,32 +536,15 @@ func HasItemWith(preds ...predicate.Item) predicate.ItemField {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.ItemField) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for _, p := range predicates {
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.ItemField(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.ItemField) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for i, p := range predicates {
- if i > 0 {
- s1.Or()
- }
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.ItemField(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.ItemField) predicate.ItemField {
- return predicate.ItemField(func(s *sql.Selector) {
- p(s.Not())
- })
+ return predicate.ItemField(sql.NotPredicates(p))
}
diff --git a/backend/internal/data/ent/itemfield_create.go b/backend/internal/data/ent/itemfield_create.go
index 2a00749..65a22fb 100644
--- a/backend/internal/data/ent/itemfield_create.go
+++ b/backend/internal/data/ent/itemfield_create.go
@@ -172,50 +172,8 @@ func (ifc *ItemFieldCreate) Mutation() *ItemFieldMutation {
// Save creates the ItemField in the database.
func (ifc *ItemFieldCreate) Save(ctx context.Context) (*ItemField, error) {
- var (
- err error
- node *ItemField
- )
ifc.defaults()
- if len(ifc.hooks) == 0 {
- if err = ifc.check(); err != nil {
- return nil, err
- }
- node, err = ifc.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*ItemFieldMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = ifc.check(); err != nil {
- return nil, err
- }
- ifc.mutation = mutation
- if node, err = ifc.sqlSave(ctx); err != nil {
- return nil, err
- }
- mutation.id = &node.ID
- mutation.done = true
- return node, err
- })
- for i := len(ifc.hooks) - 1; i >= 0; i-- {
- if ifc.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = ifc.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, ifc.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*ItemField)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from ItemFieldMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, ifc.sqlSave, ifc.mutation, ifc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -308,6 +266,9 @@ func (ifc *ItemFieldCreate) check() error {
}
func (ifc *ItemFieldCreate) sqlSave(ctx context.Context) (*ItemField, error) {
+ if err := ifc.check(); err != nil {
+ return nil, err
+ }
_node, _spec := ifc.createSpec()
if err := sqlgraph.CreateNode(ctx, ifc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@@ -322,94 +283,54 @@ func (ifc *ItemFieldCreate) sqlSave(ctx context.Context) (*ItemField, error) {
return nil, err
}
}
+ ifc.mutation.id = &_node.ID
+ ifc.mutation.done = true
return _node, nil
}
func (ifc *ItemFieldCreate) createSpec() (*ItemField, *sqlgraph.CreateSpec) {
var (
_node = &ItemField{config: ifc.config}
- _spec = &sqlgraph.CreateSpec{
- Table: itemfield.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: itemfield.FieldID,
- },
- }
+ _spec = sqlgraph.NewCreateSpec(itemfield.Table, sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID))
)
if id, ok := ifc.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = &id
}
if value, ok := ifc.mutation.CreatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: itemfield.FieldCreatedAt,
- })
+ _spec.SetField(itemfield.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := ifc.mutation.UpdatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: itemfield.FieldUpdatedAt,
- })
+ _spec.SetField(itemfield.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := ifc.mutation.Name(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: itemfield.FieldName,
- })
+ _spec.SetField(itemfield.FieldName, field.TypeString, value)
_node.Name = value
}
if value, ok := ifc.mutation.Description(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: itemfield.FieldDescription,
- })
+ _spec.SetField(itemfield.FieldDescription, field.TypeString, value)
_node.Description = value
}
if value, ok := ifc.mutation.GetType(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeEnum,
- Value: value,
- Column: itemfield.FieldType,
- })
+ _spec.SetField(itemfield.FieldType, field.TypeEnum, value)
_node.Type = value
}
if value, ok := ifc.mutation.TextValue(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: itemfield.FieldTextValue,
- })
+ _spec.SetField(itemfield.FieldTextValue, field.TypeString, value)
_node.TextValue = value
}
if value, ok := ifc.mutation.NumberValue(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: itemfield.FieldNumberValue,
- })
+ _spec.SetField(itemfield.FieldNumberValue, field.TypeInt, value)
_node.NumberValue = value
}
if value, ok := ifc.mutation.BooleanValue(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: itemfield.FieldBooleanValue,
- })
+ _spec.SetField(itemfield.FieldBooleanValue, field.TypeBool, value)
_node.BooleanValue = value
}
if value, ok := ifc.mutation.TimeValue(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: itemfield.FieldTimeValue,
- })
+ _spec.SetField(itemfield.FieldTimeValue, field.TypeTime, value)
_node.TimeValue = value
}
if nodes := ifc.mutation.ItemIDs(); len(nodes) > 0 {
@@ -420,10 +341,7 @@ func (ifc *ItemFieldCreate) createSpec() (*ItemField, *sqlgraph.CreateSpec) {
Columns: []string{itemfield.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -438,11 +356,15 @@ func (ifc *ItemFieldCreate) createSpec() (*ItemField, *sqlgraph.CreateSpec) {
// ItemFieldCreateBulk is the builder for creating many ItemField entities in bulk.
type ItemFieldCreateBulk struct {
config
+ err error
builders []*ItemFieldCreate
}
// Save creates the ItemField entities in the database.
func (ifcb *ItemFieldCreateBulk) Save(ctx context.Context) ([]*ItemField, error) {
+ if ifcb.err != nil {
+ return nil, ifcb.err
+ }
specs := make([]*sqlgraph.CreateSpec, len(ifcb.builders))
nodes := make([]*ItemField, len(ifcb.builders))
mutators := make([]Mutator, len(ifcb.builders))
@@ -459,8 +381,8 @@ func (ifcb *ItemFieldCreateBulk) Save(ctx context.Context) ([]*ItemField, error)
return nil, err
}
builder.mutation = mutation
- nodes[i], specs[i] = builder.createSpec()
var err error
+ nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, ifcb.builders[i+1].mutation)
} else {
diff --git a/backend/internal/data/ent/itemfield_delete.go b/backend/internal/data/ent/itemfield_delete.go
index e1933d6..ba85cbc 100644
--- a/backend/internal/data/ent/itemfield_delete.go
+++ b/backend/internal/data/ent/itemfield_delete.go
@@ -4,7 +4,6 @@ package ent
import (
"context"
- "fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@@ -28,34 +27,7 @@ func (ifd *ItemFieldDelete) Where(ps ...predicate.ItemField) *ItemFieldDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (ifd *ItemFieldDelete) Exec(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
- if len(ifd.hooks) == 0 {
- affected, err = ifd.sqlExec(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*ItemFieldMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- ifd.mutation = mutation
- affected, err = ifd.sqlExec(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(ifd.hooks) - 1; i >= 0; i-- {
- if ifd.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = ifd.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, ifd.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, ifd.sqlExec, ifd.mutation, ifd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@@ -68,15 +40,7 @@ func (ifd *ItemFieldDelete) ExecX(ctx context.Context) int {
}
func (ifd *ItemFieldDelete) sqlExec(ctx context.Context) (int, error) {
- _spec := &sqlgraph.DeleteSpec{
- Node: &sqlgraph.NodeSpec{
- Table: itemfield.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: itemfield.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewDeleteSpec(itemfield.Table, sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID))
if ps := ifd.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -88,6 +52,7 @@ func (ifd *ItemFieldDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
+ ifd.mutation.done = true
return affected, err
}
@@ -96,6 +61,12 @@ type ItemFieldDeleteOne struct {
ifd *ItemFieldDelete
}
+// Where appends a list predicates to the ItemFieldDelete builder.
+func (ifdo *ItemFieldDeleteOne) Where(ps ...predicate.ItemField) *ItemFieldDeleteOne {
+ ifdo.ifd.mutation.Where(ps...)
+ return ifdo
+}
+
// Exec executes the deletion query.
func (ifdo *ItemFieldDeleteOne) Exec(ctx context.Context) error {
n, err := ifdo.ifd.Exec(ctx)
@@ -111,5 +82,7 @@ func (ifdo *ItemFieldDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (ifdo *ItemFieldDeleteOne) ExecX(ctx context.Context) {
- ifdo.ifd.ExecX(ctx)
+ if err := ifdo.Exec(ctx); err != nil {
+ panic(err)
+ }
}
diff --git a/backend/internal/data/ent/itemfield_query.go b/backend/internal/data/ent/itemfield_query.go
index fb9a5ea..21bffb8 100644
--- a/backend/internal/data/ent/itemfield_query.go
+++ b/backend/internal/data/ent/itemfield_query.go
@@ -19,11 +19,9 @@ import (
// ItemFieldQuery is the builder for querying ItemField entities.
type ItemFieldQuery struct {
config
- limit *int
- offset *int
- unique *bool
- order []OrderFunc
- fields []string
+ ctx *QueryContext
+ order []itemfield.OrderOption
+ inters []Interceptor
predicates []predicate.ItemField
withItem *ItemQuery
withFKs bool
@@ -38,34 +36,34 @@ func (ifq *ItemFieldQuery) Where(ps ...predicate.ItemField) *ItemFieldQuery {
return ifq
}
-// Limit adds a limit step to the query.
+// Limit the number of records to be returned by this query.
func (ifq *ItemFieldQuery) Limit(limit int) *ItemFieldQuery {
- ifq.limit = &limit
+ ifq.ctx.Limit = &limit
return ifq
}
-// Offset adds an offset step to the query.
+// Offset to start from.
func (ifq *ItemFieldQuery) Offset(offset int) *ItemFieldQuery {
- ifq.offset = &offset
+ ifq.ctx.Offset = &offset
return ifq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (ifq *ItemFieldQuery) Unique(unique bool) *ItemFieldQuery {
- ifq.unique = &unique
+ ifq.ctx.Unique = &unique
return ifq
}
-// Order adds an order step to the query.
-func (ifq *ItemFieldQuery) Order(o ...OrderFunc) *ItemFieldQuery {
+// Order specifies how the records should be ordered.
+func (ifq *ItemFieldQuery) Order(o ...itemfield.OrderOption) *ItemFieldQuery {
ifq.order = append(ifq.order, o...)
return ifq
}
// QueryItem chains the current query on the "item" edge.
func (ifq *ItemFieldQuery) QueryItem() *ItemQuery {
- query := &ItemQuery{config: ifq.config}
+ query := (&ItemClient{config: ifq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := ifq.prepareQuery(ctx); err != nil {
return nil, err
@@ -88,7 +86,7 @@ func (ifq *ItemFieldQuery) QueryItem() *ItemQuery {
// First returns the first ItemField entity from the query.
// Returns a *NotFoundError when no ItemField was found.
func (ifq *ItemFieldQuery) First(ctx context.Context) (*ItemField, error) {
- nodes, err := ifq.Limit(1).All(ctx)
+ nodes, err := ifq.Limit(1).All(setContextOp(ctx, ifq.ctx, "First"))
if err != nil {
return nil, err
}
@@ -111,7 +109,7 @@ func (ifq *ItemFieldQuery) FirstX(ctx context.Context) *ItemField {
// Returns a *NotFoundError when no ItemField ID was found.
func (ifq *ItemFieldQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = ifq.Limit(1).IDs(ctx); err != nil {
+ if ids, err = ifq.Limit(1).IDs(setContextOp(ctx, ifq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@@ -134,7 +132,7 @@ func (ifq *ItemFieldQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one ItemField entity is found.
// Returns a *NotFoundError when no ItemField entities are found.
func (ifq *ItemFieldQuery) Only(ctx context.Context) (*ItemField, error) {
- nodes, err := ifq.Limit(2).All(ctx)
+ nodes, err := ifq.Limit(2).All(setContextOp(ctx, ifq.ctx, "Only"))
if err != nil {
return nil, err
}
@@ -162,7 +160,7 @@ func (ifq *ItemFieldQuery) OnlyX(ctx context.Context) *ItemField {
// Returns a *NotFoundError when no entities are found.
func (ifq *ItemFieldQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = ifq.Limit(2).IDs(ctx); err != nil {
+ if ids, err = ifq.Limit(2).IDs(setContextOp(ctx, ifq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@@ -187,10 +185,12 @@ func (ifq *ItemFieldQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of ItemFields.
func (ifq *ItemFieldQuery) All(ctx context.Context) ([]*ItemField, error) {
+ ctx = setContextOp(ctx, ifq.ctx, "All")
if err := ifq.prepareQuery(ctx); err != nil {
return nil, err
}
- return ifq.sqlAll(ctx)
+ qr := querierAll[[]*ItemField, *ItemFieldQuery]()
+ return withInterceptors[[]*ItemField](ctx, ifq, qr, ifq.inters)
}
// AllX is like All, but panics if an error occurs.
@@ -203,9 +203,12 @@ func (ifq *ItemFieldQuery) AllX(ctx context.Context) []*ItemField {
}
// IDs executes the query and returns a list of ItemField IDs.
-func (ifq *ItemFieldQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
- var ids []uuid.UUID
- if err := ifq.Select(itemfield.FieldID).Scan(ctx, &ids); err != nil {
+func (ifq *ItemFieldQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
+ if ifq.ctx.Unique == nil && ifq.path != nil {
+ ifq.Unique(true)
+ }
+ ctx = setContextOp(ctx, ifq.ctx, "IDs")
+ if err = ifq.Select(itemfield.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@@ -222,10 +225,11 @@ func (ifq *ItemFieldQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query.
func (ifq *ItemFieldQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, ifq.ctx, "Count")
if err := ifq.prepareQuery(ctx); err != nil {
return 0, err
}
- return ifq.sqlCount(ctx)
+ return withInterceptors[int](ctx, ifq, querierCount[*ItemFieldQuery](), ifq.inters)
}
// CountX is like Count, but panics if an error occurs.
@@ -239,10 +243,15 @@ func (ifq *ItemFieldQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (ifq *ItemFieldQuery) Exist(ctx context.Context) (bool, error) {
- if err := ifq.prepareQuery(ctx); err != nil {
- return false, err
+ ctx = setContextOp(ctx, ifq.ctx, "Exist")
+ switch _, err := ifq.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
}
- return ifq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@@ -262,22 +271,21 @@ func (ifq *ItemFieldQuery) Clone() *ItemFieldQuery {
}
return &ItemFieldQuery{
config: ifq.config,
- limit: ifq.limit,
- offset: ifq.offset,
- order: append([]OrderFunc{}, ifq.order...),
+ ctx: ifq.ctx.Clone(),
+ order: append([]itemfield.OrderOption{}, ifq.order...),
+ inters: append([]Interceptor{}, ifq.inters...),
predicates: append([]predicate.ItemField{}, ifq.predicates...),
withItem: ifq.withItem.Clone(),
// clone intermediate query.
- sql: ifq.sql.Clone(),
- path: ifq.path,
- unique: ifq.unique,
+ sql: ifq.sql.Clone(),
+ path: ifq.path,
}
}
// WithItem tells the query-builder to eager-load the nodes that are connected to
// the "item" edge. The optional arguments are used to configure the query builder of the edge.
func (ifq *ItemFieldQuery) WithItem(opts ...func(*ItemQuery)) *ItemFieldQuery {
- query := &ItemQuery{config: ifq.config}
+ query := (&ItemClient{config: ifq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -300,16 +308,11 @@ func (ifq *ItemFieldQuery) WithItem(opts ...func(*ItemQuery)) *ItemFieldQuery {
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (ifq *ItemFieldQuery) GroupBy(field string, fields ...string) *ItemFieldGroupBy {
- grbuild := &ItemFieldGroupBy{config: ifq.config}
- grbuild.fields = append([]string{field}, fields...)
- grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
- if err := ifq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- return ifq.sqlQuery(ctx), nil
- }
+ ifq.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &ItemFieldGroupBy{build: ifq}
+ grbuild.flds = &ifq.ctx.Fields
grbuild.label = itemfield.Label
- grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
+ grbuild.scan = grbuild.Scan
return grbuild
}
@@ -326,15 +329,30 @@ func (ifq *ItemFieldQuery) GroupBy(field string, fields ...string) *ItemFieldGro
// Select(itemfield.FieldCreatedAt).
// Scan(ctx, &v)
func (ifq *ItemFieldQuery) Select(fields ...string) *ItemFieldSelect {
- ifq.fields = append(ifq.fields, fields...)
- selbuild := &ItemFieldSelect{ItemFieldQuery: ifq}
- selbuild.label = itemfield.Label
- selbuild.flds, selbuild.scan = &ifq.fields, selbuild.Scan
- return selbuild
+ ifq.ctx.Fields = append(ifq.ctx.Fields, fields...)
+ sbuild := &ItemFieldSelect{ItemFieldQuery: ifq}
+ sbuild.label = itemfield.Label
+ sbuild.flds, sbuild.scan = &ifq.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a ItemFieldSelect configured with the given aggregations.
+func (ifq *ItemFieldQuery) Aggregate(fns ...AggregateFunc) *ItemFieldSelect {
+ return ifq.Select().Aggregate(fns...)
}
func (ifq *ItemFieldQuery) prepareQuery(ctx context.Context) error {
- for _, f := range ifq.fields {
+ for _, inter := range ifq.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, ifq); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range ifq.ctx.Fields {
if !itemfield.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@@ -404,6 +422,9 @@ func (ifq *ItemFieldQuery) loadItem(ctx context.Context, query *ItemQuery, nodes
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
+ if len(ids) == 0 {
+ return nil
+ }
query.Where(item.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@@ -423,41 +444,22 @@ func (ifq *ItemFieldQuery) loadItem(ctx context.Context, query *ItemQuery, nodes
func (ifq *ItemFieldQuery) sqlCount(ctx context.Context) (int, error) {
_spec := ifq.querySpec()
- _spec.Node.Columns = ifq.fields
- if len(ifq.fields) > 0 {
- _spec.Unique = ifq.unique != nil && *ifq.unique
+ _spec.Node.Columns = ifq.ctx.Fields
+ if len(ifq.ctx.Fields) > 0 {
+ _spec.Unique = ifq.ctx.Unique != nil && *ifq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, ifq.driver, _spec)
}
-func (ifq *ItemFieldQuery) sqlExist(ctx context.Context) (bool, error) {
- switch _, err := ifq.FirstID(ctx); {
- case IsNotFound(err):
- return false, nil
- case err != nil:
- return false, fmt.Errorf("ent: check existence: %w", err)
- default:
- return true, nil
- }
-}
-
func (ifq *ItemFieldQuery) querySpec() *sqlgraph.QuerySpec {
- _spec := &sqlgraph.QuerySpec{
- Node: &sqlgraph.NodeSpec{
- Table: itemfield.Table,
- Columns: itemfield.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: itemfield.FieldID,
- },
- },
- From: ifq.sql,
- Unique: true,
- }
- if unique := ifq.unique; unique != nil {
+ _spec := sqlgraph.NewQuerySpec(itemfield.Table, itemfield.Columns, sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID))
+ _spec.From = ifq.sql
+ if unique := ifq.ctx.Unique; unique != nil {
_spec.Unique = *unique
+ } else if ifq.path != nil {
+ _spec.Unique = true
}
- if fields := ifq.fields; len(fields) > 0 {
+ if fields := ifq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, itemfield.FieldID)
for i := range fields {
@@ -473,10 +475,10 @@ func (ifq *ItemFieldQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
- if limit := ifq.limit; limit != nil {
+ if limit := ifq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
- if offset := ifq.offset; offset != nil {
+ if offset := ifq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := ifq.order; len(ps) > 0 {
@@ -492,7 +494,7 @@ func (ifq *ItemFieldQuery) querySpec() *sqlgraph.QuerySpec {
func (ifq *ItemFieldQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(ifq.driver.Dialect())
t1 := builder.Table(itemfield.Table)
- columns := ifq.fields
+ columns := ifq.ctx.Fields
if len(columns) == 0 {
columns = itemfield.Columns
}
@@ -501,7 +503,7 @@ func (ifq *ItemFieldQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = ifq.sql
selector.Select(selector.Columns(columns...)...)
}
- if ifq.unique != nil && *ifq.unique {
+ if ifq.ctx.Unique != nil && *ifq.ctx.Unique {
selector.Distinct()
}
for _, p := range ifq.predicates {
@@ -510,12 +512,12 @@ func (ifq *ItemFieldQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range ifq.order {
p(selector)
}
- if offset := ifq.offset; offset != nil {
+ if offset := ifq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
- if limit := ifq.limit; limit != nil {
+ if limit := ifq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@@ -523,13 +525,8 @@ func (ifq *ItemFieldQuery) sqlQuery(ctx context.Context) *sql.Selector {
// ItemFieldGroupBy is the group-by builder for ItemField entities.
type ItemFieldGroupBy struct {
- config
selector
- fields []string
- fns []AggregateFunc
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
- path func(context.Context) (*sql.Selector, error)
+ build *ItemFieldQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@@ -538,74 +535,77 @@ func (ifgb *ItemFieldGroupBy) Aggregate(fns ...AggregateFunc) *ItemFieldGroupBy
return ifgb
}
-// Scan applies the group-by query and scans the result into the given value.
+// Scan applies the selector query and scans the result into the given value.
func (ifgb *ItemFieldGroupBy) Scan(ctx context.Context, v any) error {
- query, err := ifgb.path(ctx)
- if err != nil {
+ ctx = setContextOp(ctx, ifgb.build.ctx, "GroupBy")
+ if err := ifgb.build.prepareQuery(ctx); err != nil {
return err
}
- ifgb.sql = query
- return ifgb.sqlScan(ctx, v)
+ return scanWithInterceptors[*ItemFieldQuery, *ItemFieldGroupBy](ctx, ifgb.build, ifgb, ifgb.build.inters, v)
}
-func (ifgb *ItemFieldGroupBy) sqlScan(ctx context.Context, v any) error {
- for _, f := range ifgb.fields {
- if !itemfield.ValidColumn(f) {
- return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
- }
- }
- selector := ifgb.sqlQuery()
- if err := selector.Err(); err != nil {
- return err
- }
- rows := &sql.Rows{}
- query, args := selector.Query()
- if err := ifgb.driver.Query(ctx, query, args, rows); err != nil {
- return err
- }
- defer rows.Close()
- return sql.ScanSlice(rows, v)
-}
-
-func (ifgb *ItemFieldGroupBy) sqlQuery() *sql.Selector {
- selector := ifgb.sql.Select()
+func (ifgb *ItemFieldGroupBy) sqlScan(ctx context.Context, root *ItemFieldQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(ifgb.fns))
for _, fn := range ifgb.fns {
aggregation = append(aggregation, fn(selector))
}
- // If no columns were selected in a custom aggregation function, the default
- // selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
- columns := make([]string, 0, len(ifgb.fields)+len(ifgb.fns))
- for _, f := range ifgb.fields {
+ columns := make([]string, 0, len(*ifgb.flds)+len(ifgb.fns))
+ for _, f := range *ifgb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
- return selector.GroupBy(selector.Columns(ifgb.fields...)...)
+ selector.GroupBy(selector.Columns(*ifgb.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := ifgb.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
}
// ItemFieldSelect is the builder for selecting fields of ItemField entities.
type ItemFieldSelect struct {
*ItemFieldQuery
selector
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (ifs *ItemFieldSelect) Aggregate(fns ...AggregateFunc) *ItemFieldSelect {
+ ifs.fns = append(ifs.fns, fns...)
+ return ifs
}
// Scan applies the selector query and scans the result into the given value.
func (ifs *ItemFieldSelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, ifs.ctx, "Select")
if err := ifs.prepareQuery(ctx); err != nil {
return err
}
- ifs.sql = ifs.ItemFieldQuery.sqlQuery(ctx)
- return ifs.sqlScan(ctx, v)
+ return scanWithInterceptors[*ItemFieldQuery, *ItemFieldSelect](ctx, ifs.ItemFieldQuery, ifs, ifs.inters, v)
}
-func (ifs *ItemFieldSelect) sqlScan(ctx context.Context, v any) error {
+func (ifs *ItemFieldSelect) sqlScan(ctx context.Context, root *ItemFieldQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(ifs.fns))
+ for _, fn := range ifs.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*ifs.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
rows := &sql.Rows{}
- query, args := ifs.sql.Query()
+ query, args := selector.Query()
if err := ifs.driver.Query(ctx, query, args, rows); err != nil {
return err
}
diff --git a/backend/internal/data/ent/itemfield_update.go b/backend/internal/data/ent/itemfield_update.go
index 6b89324..3f44dc1 100644
--- a/backend/internal/data/ent/itemfield_update.go
+++ b/backend/internal/data/ent/itemfield_update.go
@@ -42,6 +42,14 @@ func (ifu *ItemFieldUpdate) SetName(s string) *ItemFieldUpdate {
return ifu
}
+// SetNillableName sets the "name" field if the given value is not nil.
+func (ifu *ItemFieldUpdate) SetNillableName(s *string) *ItemFieldUpdate {
+ if s != nil {
+ ifu.SetName(*s)
+ }
+ return ifu
+}
+
// SetDescription sets the "description" field.
func (ifu *ItemFieldUpdate) SetDescription(s string) *ItemFieldUpdate {
ifu.mutation.SetDescription(s)
@@ -68,6 +76,14 @@ func (ifu *ItemFieldUpdate) SetType(i itemfield.Type) *ItemFieldUpdate {
return ifu
}
+// SetNillableType sets the "type" field if the given value is not nil.
+func (ifu *ItemFieldUpdate) SetNillableType(i *itemfield.Type) *ItemFieldUpdate {
+ if i != nil {
+ ifu.SetType(*i)
+ }
+ return ifu
+}
+
// SetTextValue sets the "text_value" field.
func (ifu *ItemFieldUpdate) SetTextValue(s string) *ItemFieldUpdate {
ifu.mutation.SetTextValue(s)
@@ -175,41 +191,8 @@ func (ifu *ItemFieldUpdate) ClearItem() *ItemFieldUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (ifu *ItemFieldUpdate) Save(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
ifu.defaults()
- if len(ifu.hooks) == 0 {
- if err = ifu.check(); err != nil {
- return 0, err
- }
- affected, err = ifu.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*ItemFieldMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = ifu.check(); err != nil {
- return 0, err
- }
- ifu.mutation = mutation
- affected, err = ifu.sqlSave(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(ifu.hooks) - 1; i >= 0; i-- {
- if ifu.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = ifu.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, ifu.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, ifu.sqlSave, ifu.mutation, ifu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -268,16 +251,10 @@ func (ifu *ItemFieldUpdate) check() error {
}
func (ifu *ItemFieldUpdate) sqlSave(ctx context.Context) (n int, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: itemfield.Table,
- Columns: itemfield.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: itemfield.FieldID,
- },
- },
+ if err := ifu.check(); err != nil {
+ return n, err
}
+ _spec := sqlgraph.NewUpdateSpec(itemfield.Table, itemfield.Columns, sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID))
if ps := ifu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -286,85 +263,40 @@ func (ifu *ItemFieldUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := ifu.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: itemfield.FieldUpdatedAt,
- })
+ _spec.SetField(itemfield.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := ifu.mutation.Name(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: itemfield.FieldName,
- })
+ _spec.SetField(itemfield.FieldName, field.TypeString, value)
}
if value, ok := ifu.mutation.Description(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: itemfield.FieldDescription,
- })
+ _spec.SetField(itemfield.FieldDescription, field.TypeString, value)
}
if ifu.mutation.DescriptionCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: itemfield.FieldDescription,
- })
+ _spec.ClearField(itemfield.FieldDescription, field.TypeString)
}
if value, ok := ifu.mutation.GetType(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeEnum,
- Value: value,
- Column: itemfield.FieldType,
- })
+ _spec.SetField(itemfield.FieldType, field.TypeEnum, value)
}
if value, ok := ifu.mutation.TextValue(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: itemfield.FieldTextValue,
- })
+ _spec.SetField(itemfield.FieldTextValue, field.TypeString, value)
}
if ifu.mutation.TextValueCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: itemfield.FieldTextValue,
- })
+ _spec.ClearField(itemfield.FieldTextValue, field.TypeString)
}
if value, ok := ifu.mutation.NumberValue(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: itemfield.FieldNumberValue,
- })
+ _spec.SetField(itemfield.FieldNumberValue, field.TypeInt, value)
}
if value, ok := ifu.mutation.AddedNumberValue(); ok {
- _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: itemfield.FieldNumberValue,
- })
+ _spec.AddField(itemfield.FieldNumberValue, field.TypeInt, value)
}
if ifu.mutation.NumberValueCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Column: itemfield.FieldNumberValue,
- })
+ _spec.ClearField(itemfield.FieldNumberValue, field.TypeInt)
}
if value, ok := ifu.mutation.BooleanValue(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: itemfield.FieldBooleanValue,
- })
+ _spec.SetField(itemfield.FieldBooleanValue, field.TypeBool, value)
}
if value, ok := ifu.mutation.TimeValue(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: itemfield.FieldTimeValue,
- })
+ _spec.SetField(itemfield.FieldTimeValue, field.TypeTime, value)
}
if ifu.mutation.ItemCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -374,10 +306,7 @@ func (ifu *ItemFieldUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{itemfield.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -390,10 +319,7 @@ func (ifu *ItemFieldUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{itemfield.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -409,6 +335,7 @@ func (ifu *ItemFieldUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
+ ifu.mutation.done = true
return n, nil
}
@@ -432,6 +359,14 @@ func (ifuo *ItemFieldUpdateOne) SetName(s string) *ItemFieldUpdateOne {
return ifuo
}
+// SetNillableName sets the "name" field if the given value is not nil.
+func (ifuo *ItemFieldUpdateOne) SetNillableName(s *string) *ItemFieldUpdateOne {
+ if s != nil {
+ ifuo.SetName(*s)
+ }
+ return ifuo
+}
+
// SetDescription sets the "description" field.
func (ifuo *ItemFieldUpdateOne) SetDescription(s string) *ItemFieldUpdateOne {
ifuo.mutation.SetDescription(s)
@@ -458,6 +393,14 @@ func (ifuo *ItemFieldUpdateOne) SetType(i itemfield.Type) *ItemFieldUpdateOne {
return ifuo
}
+// SetNillableType sets the "type" field if the given value is not nil.
+func (ifuo *ItemFieldUpdateOne) SetNillableType(i *itemfield.Type) *ItemFieldUpdateOne {
+ if i != nil {
+ ifuo.SetType(*i)
+ }
+ return ifuo
+}
+
// SetTextValue sets the "text_value" field.
func (ifuo *ItemFieldUpdateOne) SetTextValue(s string) *ItemFieldUpdateOne {
ifuo.mutation.SetTextValue(s)
@@ -563,6 +506,12 @@ func (ifuo *ItemFieldUpdateOne) ClearItem() *ItemFieldUpdateOne {
return ifuo
}
+// Where appends a list predicates to the ItemFieldUpdate builder.
+func (ifuo *ItemFieldUpdateOne) Where(ps ...predicate.ItemField) *ItemFieldUpdateOne {
+ ifuo.mutation.Where(ps...)
+ return ifuo
+}
+
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (ifuo *ItemFieldUpdateOne) Select(field string, fields ...string) *ItemFieldUpdateOne {
@@ -572,47 +521,8 @@ func (ifuo *ItemFieldUpdateOne) Select(field string, fields ...string) *ItemFiel
// Save executes the query and returns the updated ItemField entity.
func (ifuo *ItemFieldUpdateOne) Save(ctx context.Context) (*ItemField, error) {
- var (
- err error
- node *ItemField
- )
ifuo.defaults()
- if len(ifuo.hooks) == 0 {
- if err = ifuo.check(); err != nil {
- return nil, err
- }
- node, err = ifuo.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*ItemFieldMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = ifuo.check(); err != nil {
- return nil, err
- }
- ifuo.mutation = mutation
- node, err = ifuo.sqlSave(ctx)
- mutation.done = true
- return node, err
- })
- for i := len(ifuo.hooks) - 1; i >= 0; i-- {
- if ifuo.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = ifuo.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, ifuo.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*ItemField)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from ItemFieldMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, ifuo.sqlSave, ifuo.mutation, ifuo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -671,16 +581,10 @@ func (ifuo *ItemFieldUpdateOne) check() error {
}
func (ifuo *ItemFieldUpdateOne) sqlSave(ctx context.Context) (_node *ItemField, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: itemfield.Table,
- Columns: itemfield.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: itemfield.FieldID,
- },
- },
+ if err := ifuo.check(); err != nil {
+ return _node, err
}
+ _spec := sqlgraph.NewUpdateSpec(itemfield.Table, itemfield.Columns, sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID))
id, ok := ifuo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ItemField.id" for update`)}
@@ -706,85 +610,40 @@ func (ifuo *ItemFieldUpdateOne) sqlSave(ctx context.Context) (_node *ItemField,
}
}
if value, ok := ifuo.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: itemfield.FieldUpdatedAt,
- })
+ _spec.SetField(itemfield.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := ifuo.mutation.Name(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: itemfield.FieldName,
- })
+ _spec.SetField(itemfield.FieldName, field.TypeString, value)
}
if value, ok := ifuo.mutation.Description(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: itemfield.FieldDescription,
- })
+ _spec.SetField(itemfield.FieldDescription, field.TypeString, value)
}
if ifuo.mutation.DescriptionCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: itemfield.FieldDescription,
- })
+ _spec.ClearField(itemfield.FieldDescription, field.TypeString)
}
if value, ok := ifuo.mutation.GetType(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeEnum,
- Value: value,
- Column: itemfield.FieldType,
- })
+ _spec.SetField(itemfield.FieldType, field.TypeEnum, value)
}
if value, ok := ifuo.mutation.TextValue(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: itemfield.FieldTextValue,
- })
+ _spec.SetField(itemfield.FieldTextValue, field.TypeString, value)
}
if ifuo.mutation.TextValueCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: itemfield.FieldTextValue,
- })
+ _spec.ClearField(itemfield.FieldTextValue, field.TypeString)
}
if value, ok := ifuo.mutation.NumberValue(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: itemfield.FieldNumberValue,
- })
+ _spec.SetField(itemfield.FieldNumberValue, field.TypeInt, value)
}
if value, ok := ifuo.mutation.AddedNumberValue(); ok {
- _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Value: value,
- Column: itemfield.FieldNumberValue,
- })
+ _spec.AddField(itemfield.FieldNumberValue, field.TypeInt, value)
}
if ifuo.mutation.NumberValueCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeInt,
- Column: itemfield.FieldNumberValue,
- })
+ _spec.ClearField(itemfield.FieldNumberValue, field.TypeInt)
}
if value, ok := ifuo.mutation.BooleanValue(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: itemfield.FieldBooleanValue,
- })
+ _spec.SetField(itemfield.FieldBooleanValue, field.TypeBool, value)
}
if value, ok := ifuo.mutation.TimeValue(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: itemfield.FieldTimeValue,
- })
+ _spec.SetField(itemfield.FieldTimeValue, field.TypeTime, value)
}
if ifuo.mutation.ItemCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -794,10 +653,7 @@ func (ifuo *ItemFieldUpdateOne) sqlSave(ctx context.Context) (_node *ItemField,
Columns: []string{itemfield.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -810,10 +666,7 @@ func (ifuo *ItemFieldUpdateOne) sqlSave(ctx context.Context) (_node *ItemField,
Columns: []string{itemfield.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -832,5 +685,6 @@ func (ifuo *ItemFieldUpdateOne) sqlSave(ctx context.Context) (_node *ItemField,
}
return nil, err
}
+ ifuo.mutation.done = true
return _node, nil
}
diff --git a/backend/internal/data/ent/label.go b/backend/internal/data/ent/label.go
index 9e65bfe..fdd6f8d 100644
--- a/backend/internal/data/ent/label.go
+++ b/backend/internal/data/ent/label.go
@@ -7,6 +7,7 @@ import (
"strings"
"time"
+ "entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
@@ -32,6 +33,7 @@ type Label struct {
// The values are being populated by the LabelQuery when eager-loading is set.
Edges LabelEdges `json:"edges"`
group_labels *uuid.UUID
+ selectValues sql.SelectValues
}
// LabelEdges holds the relations/edges for other nodes in the graph.
@@ -81,7 +83,7 @@ func (*Label) scanValues(columns []string) ([]any, error) {
case label.ForeignKeys[0]: // group_labels
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
- return nil, fmt.Errorf("unexpected column %q for type Label", columns[i])
+ values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -138,26 +140,34 @@ func (l *Label) assignValues(columns []string, values []any) error {
l.group_labels = new(uuid.UUID)
*l.group_labels = *value.S.(*uuid.UUID)
}
+ default:
+ l.selectValues.Set(columns[i], values[i])
}
}
return nil
}
+// Value returns the ent.Value that was dynamically selected and assigned to the Label.
+// This includes values selected through modifiers, order, etc.
+func (l *Label) Value(name string) (ent.Value, error) {
+ return l.selectValues.Get(name)
+}
+
// QueryGroup queries the "group" edge of the Label entity.
func (l *Label) QueryGroup() *GroupQuery {
- return (&LabelClient{config: l.config}).QueryGroup(l)
+ return NewLabelClient(l.config).QueryGroup(l)
}
// QueryItems queries the "items" edge of the Label entity.
func (l *Label) QueryItems() *ItemQuery {
- return (&LabelClient{config: l.config}).QueryItems(l)
+ return NewLabelClient(l.config).QueryItems(l)
}
// Update returns a builder for updating this Label.
// Note that you need to call Label.Unwrap() before calling this method if this Label
// was returned from a transaction, and the transaction was committed or rolled back.
func (l *Label) Update() *LabelUpdateOne {
- return (&LabelClient{config: l.config}).UpdateOne(l)
+ return NewLabelClient(l.config).UpdateOne(l)
}
// Unwrap unwraps the Label entity that was returned from a transaction after it was closed,
@@ -196,9 +206,3 @@ func (l *Label) String() string {
// Labels is a parsable slice of Label.
type Labels []*Label
-
-func (l Labels) config(cfg config) {
- for _i := range l {
- l[_i].config = cfg
- }
-}
diff --git a/backend/internal/data/ent/label/label.go b/backend/internal/data/ent/label/label.go
index 82bcdbd..df34c87 100644
--- a/backend/internal/data/ent/label/label.go
+++ b/backend/internal/data/ent/label/label.go
@@ -5,6 +5,8 @@ package label
import (
"time"
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -96,3 +98,71 @@ var (
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)
+
+// OrderOption defines the ordering options for the Label queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByName orders the results by the name field.
+func ByName(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldName, opts...).ToFunc()
+}
+
+// ByDescription orders the results by the description field.
+func ByDescription(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldDescription, opts...).ToFunc()
+}
+
+// ByColor orders the results by the color field.
+func ByColor(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldColor, opts...).ToFunc()
+}
+
+// ByGroupField orders the results by group field.
+func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
+ }
+}
+
+// ByItemsCount orders the results by items count.
+func ByItemsCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newItemsStep(), opts...)
+ }
+}
+
+// ByItems orders the results by items terms.
+func ByItems(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newItemsStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+func newGroupStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(GroupInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
+ )
+}
+func newItemsStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(ItemsInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2M, false, ItemsTable, ItemsPrimaryKey...),
+ )
+}
diff --git a/backend/internal/data/ent/label/where.go b/backend/internal/data/ent/label/where.go
index 9279ee7..3754ac7 100644
--- a/backend/internal/data/ent/label/where.go
+++ b/backend/internal/data/ent/label/where.go
@@ -13,561 +13,367 @@ import (
// ID filters vertices based on their ID field.
func ID(id uuid.UUID) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.Label(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id uuid.UUID) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.Label(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id uuid.UUID) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldID), id))
- })
+ return predicate.Label(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...uuid.UUID) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.In(s.C(FieldID), v...))
- })
+ return predicate.Label(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...uuid.UUID) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.NotIn(s.C(FieldID), v...))
- })
+ return predicate.Label(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id uuid.UUID) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldID), id))
- })
+ return predicate.Label(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id uuid.UUID) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldID), id))
- })
+ return predicate.Label(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id uuid.UUID) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldID), id))
- })
+ return predicate.Label(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id uuid.UUID) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldID), id))
- })
+ return predicate.Label(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Label(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Label(sql.FieldEQ(FieldUpdatedAt, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldName), v))
- })
+ return predicate.Label(sql.FieldEQ(FieldName, v))
}
// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ.
func Description(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldDescription), v))
- })
+ return predicate.Label(sql.FieldEQ(FieldDescription, v))
}
// Color applies equality check predicate on the "color" field. It's identical to ColorEQ.
func Color(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldColor), v))
- })
+ return predicate.Label(sql.FieldEQ(FieldColor, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Label(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Label(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.Label {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldCreatedAt), v...))
- })
+ return predicate.Label(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.Label {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
- })
+ return predicate.Label(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldCreatedAt), v))
- })
+ return predicate.Label(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.Label(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldCreatedAt), v))
- })
+ return predicate.Label(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.Label(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Label(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Label(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.Label {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.Label(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.Label {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.Label(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Label(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Label(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Label(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Label(sql.FieldLTE(FieldUpdatedAt, v))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldName), v))
- })
+ return predicate.Label(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldName), v))
- })
+ return predicate.Label(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.Label {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldName), v...))
- })
+ return predicate.Label(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.Label {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldName), v...))
- })
+ return predicate.Label(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldName), v))
- })
+ return predicate.Label(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldName), v))
- })
+ return predicate.Label(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldName), v))
- })
+ return predicate.Label(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldName), v))
- })
+ return predicate.Label(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldName), v))
- })
+ return predicate.Label(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldName), v))
- })
+ return predicate.Label(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldName), v))
- })
+ return predicate.Label(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldName), v))
- })
+ return predicate.Label(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldName), v))
- })
+ return predicate.Label(sql.FieldContainsFold(FieldName, v))
}
// DescriptionEQ applies the EQ predicate on the "description" field.
func DescriptionEQ(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldDescription), v))
- })
+ return predicate.Label(sql.FieldEQ(FieldDescription, v))
}
// DescriptionNEQ applies the NEQ predicate on the "description" field.
func DescriptionNEQ(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldDescription), v))
- })
+ return predicate.Label(sql.FieldNEQ(FieldDescription, v))
}
// DescriptionIn applies the In predicate on the "description" field.
func DescriptionIn(vs ...string) predicate.Label {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldDescription), v...))
- })
+ return predicate.Label(sql.FieldIn(FieldDescription, vs...))
}
// DescriptionNotIn applies the NotIn predicate on the "description" field.
func DescriptionNotIn(vs ...string) predicate.Label {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldDescription), v...))
- })
+ return predicate.Label(sql.FieldNotIn(FieldDescription, vs...))
}
// DescriptionGT applies the GT predicate on the "description" field.
func DescriptionGT(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldDescription), v))
- })
+ return predicate.Label(sql.FieldGT(FieldDescription, v))
}
// DescriptionGTE applies the GTE predicate on the "description" field.
func DescriptionGTE(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldDescription), v))
- })
+ return predicate.Label(sql.FieldGTE(FieldDescription, v))
}
// DescriptionLT applies the LT predicate on the "description" field.
func DescriptionLT(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldDescription), v))
- })
+ return predicate.Label(sql.FieldLT(FieldDescription, v))
}
// DescriptionLTE applies the LTE predicate on the "description" field.
func DescriptionLTE(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldDescription), v))
- })
+ return predicate.Label(sql.FieldLTE(FieldDescription, v))
}
// DescriptionContains applies the Contains predicate on the "description" field.
func DescriptionContains(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldDescription), v))
- })
+ return predicate.Label(sql.FieldContains(FieldDescription, v))
}
// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field.
func DescriptionHasPrefix(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldDescription), v))
- })
+ return predicate.Label(sql.FieldHasPrefix(FieldDescription, v))
}
// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field.
func DescriptionHasSuffix(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldDescription), v))
- })
+ return predicate.Label(sql.FieldHasSuffix(FieldDescription, v))
}
// DescriptionIsNil applies the IsNil predicate on the "description" field.
func DescriptionIsNil() predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldDescription)))
- })
+ return predicate.Label(sql.FieldIsNull(FieldDescription))
}
// DescriptionNotNil applies the NotNil predicate on the "description" field.
func DescriptionNotNil() predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldDescription)))
- })
+ return predicate.Label(sql.FieldNotNull(FieldDescription))
}
// DescriptionEqualFold applies the EqualFold predicate on the "description" field.
func DescriptionEqualFold(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldDescription), v))
- })
+ return predicate.Label(sql.FieldEqualFold(FieldDescription, v))
}
// DescriptionContainsFold applies the ContainsFold predicate on the "description" field.
func DescriptionContainsFold(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldDescription), v))
- })
+ return predicate.Label(sql.FieldContainsFold(FieldDescription, v))
}
// ColorEQ applies the EQ predicate on the "color" field.
func ColorEQ(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldColor), v))
- })
+ return predicate.Label(sql.FieldEQ(FieldColor, v))
}
// ColorNEQ applies the NEQ predicate on the "color" field.
func ColorNEQ(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldColor), v))
- })
+ return predicate.Label(sql.FieldNEQ(FieldColor, v))
}
// ColorIn applies the In predicate on the "color" field.
func ColorIn(vs ...string) predicate.Label {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldColor), v...))
- })
+ return predicate.Label(sql.FieldIn(FieldColor, vs...))
}
// ColorNotIn applies the NotIn predicate on the "color" field.
func ColorNotIn(vs ...string) predicate.Label {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldColor), v...))
- })
+ return predicate.Label(sql.FieldNotIn(FieldColor, vs...))
}
// ColorGT applies the GT predicate on the "color" field.
func ColorGT(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldColor), v))
- })
+ return predicate.Label(sql.FieldGT(FieldColor, v))
}
// ColorGTE applies the GTE predicate on the "color" field.
func ColorGTE(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldColor), v))
- })
+ return predicate.Label(sql.FieldGTE(FieldColor, v))
}
// ColorLT applies the LT predicate on the "color" field.
func ColorLT(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldColor), v))
- })
+ return predicate.Label(sql.FieldLT(FieldColor, v))
}
// ColorLTE applies the LTE predicate on the "color" field.
func ColorLTE(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldColor), v))
- })
+ return predicate.Label(sql.FieldLTE(FieldColor, v))
}
// ColorContains applies the Contains predicate on the "color" field.
func ColorContains(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldColor), v))
- })
+ return predicate.Label(sql.FieldContains(FieldColor, v))
}
// ColorHasPrefix applies the HasPrefix predicate on the "color" field.
func ColorHasPrefix(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldColor), v))
- })
+ return predicate.Label(sql.FieldHasPrefix(FieldColor, v))
}
// ColorHasSuffix applies the HasSuffix predicate on the "color" field.
func ColorHasSuffix(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldColor), v))
- })
+ return predicate.Label(sql.FieldHasSuffix(FieldColor, v))
}
// ColorIsNil applies the IsNil predicate on the "color" field.
func ColorIsNil() predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldColor)))
- })
+ return predicate.Label(sql.FieldIsNull(FieldColor))
}
// ColorNotNil applies the NotNil predicate on the "color" field.
func ColorNotNil() predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldColor)))
- })
+ return predicate.Label(sql.FieldNotNull(FieldColor))
}
// ColorEqualFold applies the EqualFold predicate on the "color" field.
func ColorEqualFold(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldColor), v))
- })
+ return predicate.Label(sql.FieldEqualFold(FieldColor, v))
}
// ColorContainsFold applies the ContainsFold predicate on the "color" field.
func ColorContainsFold(v string) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldColor), v))
- })
+ return predicate.Label(sql.FieldContainsFold(FieldColor, v))
}
// HasGroup applies the HasEdge predicate on the "group" edge.
@@ -575,7 +381,6 @@ func HasGroup() predicate.Label {
return predicate.Label(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(GroupTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -585,11 +390,7 @@ func HasGroup() predicate.Label {
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
func HasGroupWith(preds ...predicate.Group) predicate.Label {
return predicate.Label(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(GroupInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
- )
+ step := newGroupStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -603,7 +404,6 @@ func HasItems() predicate.Label {
return predicate.Label(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(ItemsTable, FieldID),
sqlgraph.Edge(sqlgraph.M2M, false, ItemsTable, ItemsPrimaryKey...),
)
sqlgraph.HasNeighbors(s, step)
@@ -613,11 +413,7 @@ func HasItems() predicate.Label {
// HasItemsWith applies the HasEdge predicate on the "items" edge with a given conditions (other predicates).
func HasItemsWith(preds ...predicate.Item) predicate.Label {
return predicate.Label(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(ItemsInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2M, false, ItemsTable, ItemsPrimaryKey...),
- )
+ step := newItemsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -628,32 +424,15 @@ func HasItemsWith(preds ...predicate.Item) predicate.Label {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Label) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for _, p := range predicates {
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.Label(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Label) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for i, p := range predicates {
- if i > 0 {
- s1.Or()
- }
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.Label(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Label) predicate.Label {
- return predicate.Label(func(s *sql.Selector) {
- p(s.Not())
- })
+ return predicate.Label(sql.NotPredicates(p))
}
diff --git a/backend/internal/data/ent/label_create.go b/backend/internal/data/ent/label_create.go
index a000e35..0ad6469 100644
--- a/backend/internal/data/ent/label_create.go
+++ b/backend/internal/data/ent/label_create.go
@@ -132,50 +132,8 @@ func (lc *LabelCreate) Mutation() *LabelMutation {
// Save creates the Label in the database.
func (lc *LabelCreate) Save(ctx context.Context) (*Label, error) {
- var (
- err error
- node *Label
- )
lc.defaults()
- if len(lc.hooks) == 0 {
- if err = lc.check(); err != nil {
- return nil, err
- }
- node, err = lc.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*LabelMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = lc.check(); err != nil {
- return nil, err
- }
- lc.mutation = mutation
- if node, err = lc.sqlSave(ctx); err != nil {
- return nil, err
- }
- mutation.id = &node.ID
- mutation.done = true
- return node, err
- })
- for i := len(lc.hooks) - 1; i >= 0; i-- {
- if lc.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = lc.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, lc.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*Label)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from LabelMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, lc.sqlSave, lc.mutation, lc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -249,6 +207,9 @@ func (lc *LabelCreate) check() error {
}
func (lc *LabelCreate) sqlSave(ctx context.Context) (*Label, error) {
+ if err := lc.check(); err != nil {
+ return nil, err
+ }
_node, _spec := lc.createSpec()
if err := sqlgraph.CreateNode(ctx, lc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@@ -263,62 +224,38 @@ func (lc *LabelCreate) sqlSave(ctx context.Context) (*Label, error) {
return nil, err
}
}
+ lc.mutation.id = &_node.ID
+ lc.mutation.done = true
return _node, nil
}
func (lc *LabelCreate) createSpec() (*Label, *sqlgraph.CreateSpec) {
var (
_node = &Label{config: lc.config}
- _spec = &sqlgraph.CreateSpec{
- Table: label.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
- }
+ _spec = sqlgraph.NewCreateSpec(label.Table, sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID))
)
if id, ok := lc.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = &id
}
if value, ok := lc.mutation.CreatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: label.FieldCreatedAt,
- })
+ _spec.SetField(label.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := lc.mutation.UpdatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: label.FieldUpdatedAt,
- })
+ _spec.SetField(label.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := lc.mutation.Name(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: label.FieldName,
- })
+ _spec.SetField(label.FieldName, field.TypeString, value)
_node.Name = value
}
if value, ok := lc.mutation.Description(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: label.FieldDescription,
- })
+ _spec.SetField(label.FieldDescription, field.TypeString, value)
_node.Description = value
}
if value, ok := lc.mutation.Color(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: label.FieldColor,
- })
+ _spec.SetField(label.FieldColor, field.TypeString, value)
_node.Color = value
}
if nodes := lc.mutation.GroupIDs(); len(nodes) > 0 {
@@ -329,10 +266,7 @@ func (lc *LabelCreate) createSpec() (*Label, *sqlgraph.CreateSpec) {
Columns: []string{label.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -349,10 +283,7 @@ func (lc *LabelCreate) createSpec() (*Label, *sqlgraph.CreateSpec) {
Columns: label.ItemsPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -366,11 +297,15 @@ func (lc *LabelCreate) createSpec() (*Label, *sqlgraph.CreateSpec) {
// LabelCreateBulk is the builder for creating many Label entities in bulk.
type LabelCreateBulk struct {
config
+ err error
builders []*LabelCreate
}
// Save creates the Label entities in the database.
func (lcb *LabelCreateBulk) Save(ctx context.Context) ([]*Label, error) {
+ if lcb.err != nil {
+ return nil, lcb.err
+ }
specs := make([]*sqlgraph.CreateSpec, len(lcb.builders))
nodes := make([]*Label, len(lcb.builders))
mutators := make([]Mutator, len(lcb.builders))
@@ -387,8 +322,8 @@ func (lcb *LabelCreateBulk) Save(ctx context.Context) ([]*Label, error) {
return nil, err
}
builder.mutation = mutation
- nodes[i], specs[i] = builder.createSpec()
var err error
+ nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, lcb.builders[i+1].mutation)
} else {
diff --git a/backend/internal/data/ent/label_delete.go b/backend/internal/data/ent/label_delete.go
index 28e103c..f3b514a 100644
--- a/backend/internal/data/ent/label_delete.go
+++ b/backend/internal/data/ent/label_delete.go
@@ -4,7 +4,6 @@ package ent
import (
"context"
- "fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@@ -28,34 +27,7 @@ func (ld *LabelDelete) Where(ps ...predicate.Label) *LabelDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (ld *LabelDelete) Exec(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
- if len(ld.hooks) == 0 {
- affected, err = ld.sqlExec(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*LabelMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- ld.mutation = mutation
- affected, err = ld.sqlExec(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(ld.hooks) - 1; i >= 0; i-- {
- if ld.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = ld.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, ld.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, ld.sqlExec, ld.mutation, ld.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@@ -68,15 +40,7 @@ func (ld *LabelDelete) ExecX(ctx context.Context) int {
}
func (ld *LabelDelete) sqlExec(ctx context.Context) (int, error) {
- _spec := &sqlgraph.DeleteSpec{
- Node: &sqlgraph.NodeSpec{
- Table: label.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewDeleteSpec(label.Table, sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID))
if ps := ld.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -88,6 +52,7 @@ func (ld *LabelDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
+ ld.mutation.done = true
return affected, err
}
@@ -96,6 +61,12 @@ type LabelDeleteOne struct {
ld *LabelDelete
}
+// Where appends a list predicates to the LabelDelete builder.
+func (ldo *LabelDeleteOne) Where(ps ...predicate.Label) *LabelDeleteOne {
+ ldo.ld.mutation.Where(ps...)
+ return ldo
+}
+
// Exec executes the deletion query.
func (ldo *LabelDeleteOne) Exec(ctx context.Context) error {
n, err := ldo.ld.Exec(ctx)
@@ -111,5 +82,7 @@ func (ldo *LabelDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (ldo *LabelDeleteOne) ExecX(ctx context.Context) {
- ldo.ld.ExecX(ctx)
+ if err := ldo.Exec(ctx); err != nil {
+ panic(err)
+ }
}
diff --git a/backend/internal/data/ent/label_query.go b/backend/internal/data/ent/label_query.go
index fc53ec4..e3bb6d1 100644
--- a/backend/internal/data/ent/label_query.go
+++ b/backend/internal/data/ent/label_query.go
@@ -21,11 +21,9 @@ import (
// LabelQuery is the builder for querying Label entities.
type LabelQuery struct {
config
- limit *int
- offset *int
- unique *bool
- order []OrderFunc
- fields []string
+ ctx *QueryContext
+ order []label.OrderOption
+ inters []Interceptor
predicates []predicate.Label
withGroup *GroupQuery
withItems *ItemQuery
@@ -41,34 +39,34 @@ func (lq *LabelQuery) Where(ps ...predicate.Label) *LabelQuery {
return lq
}
-// Limit adds a limit step to the query.
+// Limit the number of records to be returned by this query.
func (lq *LabelQuery) Limit(limit int) *LabelQuery {
- lq.limit = &limit
+ lq.ctx.Limit = &limit
return lq
}
-// Offset adds an offset step to the query.
+// Offset to start from.
func (lq *LabelQuery) Offset(offset int) *LabelQuery {
- lq.offset = &offset
+ lq.ctx.Offset = &offset
return lq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (lq *LabelQuery) Unique(unique bool) *LabelQuery {
- lq.unique = &unique
+ lq.ctx.Unique = &unique
return lq
}
-// Order adds an order step to the query.
-func (lq *LabelQuery) Order(o ...OrderFunc) *LabelQuery {
+// Order specifies how the records should be ordered.
+func (lq *LabelQuery) Order(o ...label.OrderOption) *LabelQuery {
lq.order = append(lq.order, o...)
return lq
}
// QueryGroup chains the current query on the "group" edge.
func (lq *LabelQuery) QueryGroup() *GroupQuery {
- query := &GroupQuery{config: lq.config}
+ query := (&GroupClient{config: lq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := lq.prepareQuery(ctx); err != nil {
return nil, err
@@ -90,7 +88,7 @@ func (lq *LabelQuery) QueryGroup() *GroupQuery {
// QueryItems chains the current query on the "items" edge.
func (lq *LabelQuery) QueryItems() *ItemQuery {
- query := &ItemQuery{config: lq.config}
+ query := (&ItemClient{config: lq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := lq.prepareQuery(ctx); err != nil {
return nil, err
@@ -113,7 +111,7 @@ func (lq *LabelQuery) QueryItems() *ItemQuery {
// First returns the first Label entity from the query.
// Returns a *NotFoundError when no Label was found.
func (lq *LabelQuery) First(ctx context.Context) (*Label, error) {
- nodes, err := lq.Limit(1).All(ctx)
+ nodes, err := lq.Limit(1).All(setContextOp(ctx, lq.ctx, "First"))
if err != nil {
return nil, err
}
@@ -136,7 +134,7 @@ func (lq *LabelQuery) FirstX(ctx context.Context) *Label {
// Returns a *NotFoundError when no Label ID was found.
func (lq *LabelQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = lq.Limit(1).IDs(ctx); err != nil {
+ if ids, err = lq.Limit(1).IDs(setContextOp(ctx, lq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@@ -159,7 +157,7 @@ func (lq *LabelQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one Label entity is found.
// Returns a *NotFoundError when no Label entities are found.
func (lq *LabelQuery) Only(ctx context.Context) (*Label, error) {
- nodes, err := lq.Limit(2).All(ctx)
+ nodes, err := lq.Limit(2).All(setContextOp(ctx, lq.ctx, "Only"))
if err != nil {
return nil, err
}
@@ -187,7 +185,7 @@ func (lq *LabelQuery) OnlyX(ctx context.Context) *Label {
// Returns a *NotFoundError when no entities are found.
func (lq *LabelQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = lq.Limit(2).IDs(ctx); err != nil {
+ if ids, err = lq.Limit(2).IDs(setContextOp(ctx, lq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@@ -212,10 +210,12 @@ func (lq *LabelQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Labels.
func (lq *LabelQuery) All(ctx context.Context) ([]*Label, error) {
+ ctx = setContextOp(ctx, lq.ctx, "All")
if err := lq.prepareQuery(ctx); err != nil {
return nil, err
}
- return lq.sqlAll(ctx)
+ qr := querierAll[[]*Label, *LabelQuery]()
+ return withInterceptors[[]*Label](ctx, lq, qr, lq.inters)
}
// AllX is like All, but panics if an error occurs.
@@ -228,9 +228,12 @@ func (lq *LabelQuery) AllX(ctx context.Context) []*Label {
}
// IDs executes the query and returns a list of Label IDs.
-func (lq *LabelQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
- var ids []uuid.UUID
- if err := lq.Select(label.FieldID).Scan(ctx, &ids); err != nil {
+func (lq *LabelQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
+ if lq.ctx.Unique == nil && lq.path != nil {
+ lq.Unique(true)
+ }
+ ctx = setContextOp(ctx, lq.ctx, "IDs")
+ if err = lq.Select(label.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@@ -247,10 +250,11 @@ func (lq *LabelQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query.
func (lq *LabelQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, lq.ctx, "Count")
if err := lq.prepareQuery(ctx); err != nil {
return 0, err
}
- return lq.sqlCount(ctx)
+ return withInterceptors[int](ctx, lq, querierCount[*LabelQuery](), lq.inters)
}
// CountX is like Count, but panics if an error occurs.
@@ -264,10 +268,15 @@ func (lq *LabelQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (lq *LabelQuery) Exist(ctx context.Context) (bool, error) {
- if err := lq.prepareQuery(ctx); err != nil {
- return false, err
+ ctx = setContextOp(ctx, lq.ctx, "Exist")
+ switch _, err := lq.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
}
- return lq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@@ -287,23 +296,22 @@ func (lq *LabelQuery) Clone() *LabelQuery {
}
return &LabelQuery{
config: lq.config,
- limit: lq.limit,
- offset: lq.offset,
- order: append([]OrderFunc{}, lq.order...),
+ ctx: lq.ctx.Clone(),
+ order: append([]label.OrderOption{}, lq.order...),
+ inters: append([]Interceptor{}, lq.inters...),
predicates: append([]predicate.Label{}, lq.predicates...),
withGroup: lq.withGroup.Clone(),
withItems: lq.withItems.Clone(),
// clone intermediate query.
- sql: lq.sql.Clone(),
- path: lq.path,
- unique: lq.unique,
+ sql: lq.sql.Clone(),
+ path: lq.path,
}
}
// WithGroup tells the query-builder to eager-load the nodes that are connected to
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
func (lq *LabelQuery) WithGroup(opts ...func(*GroupQuery)) *LabelQuery {
- query := &GroupQuery{config: lq.config}
+ query := (&GroupClient{config: lq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -314,7 +322,7 @@ func (lq *LabelQuery) WithGroup(opts ...func(*GroupQuery)) *LabelQuery {
// WithItems tells the query-builder to eager-load the nodes that are connected to
// the "items" edge. The optional arguments are used to configure the query builder of the edge.
func (lq *LabelQuery) WithItems(opts ...func(*ItemQuery)) *LabelQuery {
- query := &ItemQuery{config: lq.config}
+ query := (&ItemClient{config: lq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -337,16 +345,11 @@ func (lq *LabelQuery) WithItems(opts ...func(*ItemQuery)) *LabelQuery {
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (lq *LabelQuery) GroupBy(field string, fields ...string) *LabelGroupBy {
- grbuild := &LabelGroupBy{config: lq.config}
- grbuild.fields = append([]string{field}, fields...)
- grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
- if err := lq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- return lq.sqlQuery(ctx), nil
- }
+ lq.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &LabelGroupBy{build: lq}
+ grbuild.flds = &lq.ctx.Fields
grbuild.label = label.Label
- grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
+ grbuild.scan = grbuild.Scan
return grbuild
}
@@ -363,15 +366,30 @@ func (lq *LabelQuery) GroupBy(field string, fields ...string) *LabelGroupBy {
// Select(label.FieldCreatedAt).
// Scan(ctx, &v)
func (lq *LabelQuery) Select(fields ...string) *LabelSelect {
- lq.fields = append(lq.fields, fields...)
- selbuild := &LabelSelect{LabelQuery: lq}
- selbuild.label = label.Label
- selbuild.flds, selbuild.scan = &lq.fields, selbuild.Scan
- return selbuild
+ lq.ctx.Fields = append(lq.ctx.Fields, fields...)
+ sbuild := &LabelSelect{LabelQuery: lq}
+ sbuild.label = label.Label
+ sbuild.flds, sbuild.scan = &lq.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a LabelSelect configured with the given aggregations.
+func (lq *LabelQuery) Aggregate(fns ...AggregateFunc) *LabelSelect {
+ return lq.Select().Aggregate(fns...)
}
func (lq *LabelQuery) prepareQuery(ctx context.Context) error {
- for _, f := range lq.fields {
+ for _, inter := range lq.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, lq); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range lq.ctx.Fields {
if !label.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@@ -449,6 +467,9 @@ func (lq *LabelQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
+ if len(ids) == 0 {
+ return nil
+ }
query.Where(group.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@@ -488,27 +509,30 @@ func (lq *LabelQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*
if err := query.prepareQuery(ctx); err != nil {
return err
}
- neighbors, err := query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) {
- assign := spec.Assign
- values := spec.ScanValues
- spec.ScanValues = func(columns []string) ([]any, error) {
- values, err := values(columns[1:])
- if err != nil {
- return nil, err
+ qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
+ return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) {
+ assign := spec.Assign
+ values := spec.ScanValues
+ spec.ScanValues = func(columns []string) ([]any, error) {
+ values, err := values(columns[1:])
+ if err != nil {
+ return nil, err
+ }
+ return append([]any{new(uuid.UUID)}, values...), nil
}
- return append([]any{new(uuid.UUID)}, values...), nil
- }
- spec.Assign = func(columns []string, values []any) error {
- outValue := *values[0].(*uuid.UUID)
- inValue := *values[1].(*uuid.UUID)
- if nids[inValue] == nil {
- nids[inValue] = map[*Label]struct{}{byID[outValue]: struct{}{}}
- return assign(columns[1:], values[1:])
+ spec.Assign = func(columns []string, values []any) error {
+ outValue := *values[0].(*uuid.UUID)
+ inValue := *values[1].(*uuid.UUID)
+ if nids[inValue] == nil {
+ nids[inValue] = map[*Label]struct{}{byID[outValue]: {}}
+ return assign(columns[1:], values[1:])
+ }
+ nids[inValue][byID[outValue]] = struct{}{}
+ return nil
}
- nids[inValue][byID[outValue]] = struct{}{}
- return nil
- }
+ })
})
+ neighbors, err := withInterceptors[[]*Item](ctx, query, qr, query.inters)
if err != nil {
return err
}
@@ -526,41 +550,22 @@ func (lq *LabelQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*
func (lq *LabelQuery) sqlCount(ctx context.Context) (int, error) {
_spec := lq.querySpec()
- _spec.Node.Columns = lq.fields
- if len(lq.fields) > 0 {
- _spec.Unique = lq.unique != nil && *lq.unique
+ _spec.Node.Columns = lq.ctx.Fields
+ if len(lq.ctx.Fields) > 0 {
+ _spec.Unique = lq.ctx.Unique != nil && *lq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, lq.driver, _spec)
}
-func (lq *LabelQuery) sqlExist(ctx context.Context) (bool, error) {
- switch _, err := lq.FirstID(ctx); {
- case IsNotFound(err):
- return false, nil
- case err != nil:
- return false, fmt.Errorf("ent: check existence: %w", err)
- default:
- return true, nil
- }
-}
-
func (lq *LabelQuery) querySpec() *sqlgraph.QuerySpec {
- _spec := &sqlgraph.QuerySpec{
- Node: &sqlgraph.NodeSpec{
- Table: label.Table,
- Columns: label.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
- },
- From: lq.sql,
- Unique: true,
- }
- if unique := lq.unique; unique != nil {
+ _spec := sqlgraph.NewQuerySpec(label.Table, label.Columns, sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID))
+ _spec.From = lq.sql
+ if unique := lq.ctx.Unique; unique != nil {
_spec.Unique = *unique
+ } else if lq.path != nil {
+ _spec.Unique = true
}
- if fields := lq.fields; len(fields) > 0 {
+ if fields := lq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, label.FieldID)
for i := range fields {
@@ -576,10 +581,10 @@ func (lq *LabelQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
- if limit := lq.limit; limit != nil {
+ if limit := lq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
- if offset := lq.offset; offset != nil {
+ if offset := lq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := lq.order; len(ps) > 0 {
@@ -595,7 +600,7 @@ func (lq *LabelQuery) querySpec() *sqlgraph.QuerySpec {
func (lq *LabelQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(lq.driver.Dialect())
t1 := builder.Table(label.Table)
- columns := lq.fields
+ columns := lq.ctx.Fields
if len(columns) == 0 {
columns = label.Columns
}
@@ -604,7 +609,7 @@ func (lq *LabelQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = lq.sql
selector.Select(selector.Columns(columns...)...)
}
- if lq.unique != nil && *lq.unique {
+ if lq.ctx.Unique != nil && *lq.ctx.Unique {
selector.Distinct()
}
for _, p := range lq.predicates {
@@ -613,12 +618,12 @@ func (lq *LabelQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range lq.order {
p(selector)
}
- if offset := lq.offset; offset != nil {
+ if offset := lq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
- if limit := lq.limit; limit != nil {
+ if limit := lq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@@ -626,13 +631,8 @@ func (lq *LabelQuery) sqlQuery(ctx context.Context) *sql.Selector {
// LabelGroupBy is the group-by builder for Label entities.
type LabelGroupBy struct {
- config
selector
- fields []string
- fns []AggregateFunc
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
- path func(context.Context) (*sql.Selector, error)
+ build *LabelQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@@ -641,74 +641,77 @@ func (lgb *LabelGroupBy) Aggregate(fns ...AggregateFunc) *LabelGroupBy {
return lgb
}
-// Scan applies the group-by query and scans the result into the given value.
+// Scan applies the selector query and scans the result into the given value.
func (lgb *LabelGroupBy) Scan(ctx context.Context, v any) error {
- query, err := lgb.path(ctx)
- if err != nil {
+ ctx = setContextOp(ctx, lgb.build.ctx, "GroupBy")
+ if err := lgb.build.prepareQuery(ctx); err != nil {
return err
}
- lgb.sql = query
- return lgb.sqlScan(ctx, v)
+ return scanWithInterceptors[*LabelQuery, *LabelGroupBy](ctx, lgb.build, lgb, lgb.build.inters, v)
}
-func (lgb *LabelGroupBy) sqlScan(ctx context.Context, v any) error {
- for _, f := range lgb.fields {
- if !label.ValidColumn(f) {
- return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
- }
- }
- selector := lgb.sqlQuery()
- if err := selector.Err(); err != nil {
- return err
- }
- rows := &sql.Rows{}
- query, args := selector.Query()
- if err := lgb.driver.Query(ctx, query, args, rows); err != nil {
- return err
- }
- defer rows.Close()
- return sql.ScanSlice(rows, v)
-}
-
-func (lgb *LabelGroupBy) sqlQuery() *sql.Selector {
- selector := lgb.sql.Select()
+func (lgb *LabelGroupBy) sqlScan(ctx context.Context, root *LabelQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(lgb.fns))
for _, fn := range lgb.fns {
aggregation = append(aggregation, fn(selector))
}
- // If no columns were selected in a custom aggregation function, the default
- // selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
- columns := make([]string, 0, len(lgb.fields)+len(lgb.fns))
- for _, f := range lgb.fields {
+ columns := make([]string, 0, len(*lgb.flds)+len(lgb.fns))
+ for _, f := range *lgb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
- return selector.GroupBy(selector.Columns(lgb.fields...)...)
+ selector.GroupBy(selector.Columns(*lgb.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := lgb.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
}
// LabelSelect is the builder for selecting fields of Label entities.
type LabelSelect struct {
*LabelQuery
selector
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (ls *LabelSelect) Aggregate(fns ...AggregateFunc) *LabelSelect {
+ ls.fns = append(ls.fns, fns...)
+ return ls
}
// Scan applies the selector query and scans the result into the given value.
func (ls *LabelSelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, ls.ctx, "Select")
if err := ls.prepareQuery(ctx); err != nil {
return err
}
- ls.sql = ls.LabelQuery.sqlQuery(ctx)
- return ls.sqlScan(ctx, v)
+ return scanWithInterceptors[*LabelQuery, *LabelSelect](ctx, ls.LabelQuery, ls, ls.inters, v)
}
-func (ls *LabelSelect) sqlScan(ctx context.Context, v any) error {
+func (ls *LabelSelect) sqlScan(ctx context.Context, root *LabelQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(ls.fns))
+ for _, fn := range ls.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*ls.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
rows := &sql.Rows{}
- query, args := ls.sql.Query()
+ query, args := selector.Query()
if err := ls.driver.Query(ctx, query, args, rows); err != nil {
return err
}
diff --git a/backend/internal/data/ent/label_update.go b/backend/internal/data/ent/label_update.go
index 16f4a0c..0862d22 100644
--- a/backend/internal/data/ent/label_update.go
+++ b/backend/internal/data/ent/label_update.go
@@ -43,6 +43,14 @@ func (lu *LabelUpdate) SetName(s string) *LabelUpdate {
return lu
}
+// SetNillableName sets the "name" field if the given value is not nil.
+func (lu *LabelUpdate) SetNillableName(s *string) *LabelUpdate {
+ if s != nil {
+ lu.SetName(*s)
+ }
+ return lu
+}
+
// SetDescription sets the "description" field.
func (lu *LabelUpdate) SetDescription(s string) *LabelUpdate {
lu.mutation.SetDescription(s)
@@ -143,41 +151,8 @@ func (lu *LabelUpdate) RemoveItems(i ...*Item) *LabelUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (lu *LabelUpdate) Save(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
lu.defaults()
- if len(lu.hooks) == 0 {
- if err = lu.check(); err != nil {
- return 0, err
- }
- affected, err = lu.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*LabelMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = lu.check(); err != nil {
- return 0, err
- }
- lu.mutation = mutation
- affected, err = lu.sqlSave(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(lu.hooks) - 1; i >= 0; i-- {
- if lu.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = lu.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, lu.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, lu.sqlSave, lu.mutation, lu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -234,16 +209,10 @@ func (lu *LabelUpdate) check() error {
}
func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: label.Table,
- Columns: label.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
- },
+ if err := lu.check(); err != nil {
+ return n, err
}
+ _spec := sqlgraph.NewUpdateSpec(label.Table, label.Columns, sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID))
if ps := lu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -252,44 +221,22 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := lu.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: label.FieldUpdatedAt,
- })
+ _spec.SetField(label.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := lu.mutation.Name(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: label.FieldName,
- })
+ _spec.SetField(label.FieldName, field.TypeString, value)
}
if value, ok := lu.mutation.Description(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: label.FieldDescription,
- })
+ _spec.SetField(label.FieldDescription, field.TypeString, value)
}
if lu.mutation.DescriptionCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: label.FieldDescription,
- })
+ _spec.ClearField(label.FieldDescription, field.TypeString)
}
if value, ok := lu.mutation.Color(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: label.FieldColor,
- })
+ _spec.SetField(label.FieldColor, field.TypeString, value)
}
if lu.mutation.ColorCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: label.FieldColor,
- })
+ _spec.ClearField(label.FieldColor, field.TypeString)
}
if lu.mutation.GroupCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -299,10 +246,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{label.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -315,10 +259,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{label.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -334,10 +275,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: label.ItemsPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -350,10 +288,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: label.ItemsPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -369,10 +304,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: label.ItemsPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -388,6 +320,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
+ lu.mutation.done = true
return n, nil
}
@@ -411,6 +344,14 @@ func (luo *LabelUpdateOne) SetName(s string) *LabelUpdateOne {
return luo
}
+// SetNillableName sets the "name" field if the given value is not nil.
+func (luo *LabelUpdateOne) SetNillableName(s *string) *LabelUpdateOne {
+ if s != nil {
+ luo.SetName(*s)
+ }
+ return luo
+}
+
// SetDescription sets the "description" field.
func (luo *LabelUpdateOne) SetDescription(s string) *LabelUpdateOne {
luo.mutation.SetDescription(s)
@@ -509,6 +450,12 @@ func (luo *LabelUpdateOne) RemoveItems(i ...*Item) *LabelUpdateOne {
return luo.RemoveItemIDs(ids...)
}
+// Where appends a list predicates to the LabelUpdate builder.
+func (luo *LabelUpdateOne) Where(ps ...predicate.Label) *LabelUpdateOne {
+ luo.mutation.Where(ps...)
+ return luo
+}
+
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (luo *LabelUpdateOne) Select(field string, fields ...string) *LabelUpdateOne {
@@ -518,47 +465,8 @@ func (luo *LabelUpdateOne) Select(field string, fields ...string) *LabelUpdateOn
// Save executes the query and returns the updated Label entity.
func (luo *LabelUpdateOne) Save(ctx context.Context) (*Label, error) {
- var (
- err error
- node *Label
- )
luo.defaults()
- if len(luo.hooks) == 0 {
- if err = luo.check(); err != nil {
- return nil, err
- }
- node, err = luo.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*LabelMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = luo.check(); err != nil {
- return nil, err
- }
- luo.mutation = mutation
- node, err = luo.sqlSave(ctx)
- mutation.done = true
- return node, err
- })
- for i := len(luo.hooks) - 1; i >= 0; i-- {
- if luo.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = luo.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, luo.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*Label)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from LabelMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, luo.sqlSave, luo.mutation, luo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -615,16 +523,10 @@ func (luo *LabelUpdateOne) check() error {
}
func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: label.Table,
- Columns: label.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: label.FieldID,
- },
- },
+ if err := luo.check(); err != nil {
+ return _node, err
}
+ _spec := sqlgraph.NewUpdateSpec(label.Table, label.Columns, sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID))
id, ok := luo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Label.id" for update`)}
@@ -650,44 +552,22 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error
}
}
if value, ok := luo.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: label.FieldUpdatedAt,
- })
+ _spec.SetField(label.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := luo.mutation.Name(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: label.FieldName,
- })
+ _spec.SetField(label.FieldName, field.TypeString, value)
}
if value, ok := luo.mutation.Description(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: label.FieldDescription,
- })
+ _spec.SetField(label.FieldDescription, field.TypeString, value)
}
if luo.mutation.DescriptionCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: label.FieldDescription,
- })
+ _spec.ClearField(label.FieldDescription, field.TypeString)
}
if value, ok := luo.mutation.Color(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: label.FieldColor,
- })
+ _spec.SetField(label.FieldColor, field.TypeString, value)
}
if luo.mutation.ColorCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: label.FieldColor,
- })
+ _spec.ClearField(label.FieldColor, field.TypeString)
}
if luo.mutation.GroupCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -697,10 +577,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error
Columns: []string{label.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -713,10 +590,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error
Columns: []string{label.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -732,10 +606,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error
Columns: label.ItemsPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -748,10 +619,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error
Columns: label.ItemsPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -767,10 +635,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error
Columns: label.ItemsPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -789,5 +654,6 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error
}
return nil, err
}
+ luo.mutation.done = true
return _node, nil
}
diff --git a/backend/internal/data/ent/location.go b/backend/internal/data/ent/location.go
index 67abd30..640f05e 100644
--- a/backend/internal/data/ent/location.go
+++ b/backend/internal/data/ent/location.go
@@ -7,6 +7,7 @@ import (
"strings"
"time"
+ "entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
@@ -31,16 +32,17 @@ type Location struct {
Edges LocationEdges `json:"edges"`
group_locations *uuid.UUID
location_children *uuid.UUID
+ selectValues sql.SelectValues
}
// LocationEdges holds the relations/edges for other nodes in the graph.
type LocationEdges struct {
+ // Group holds the value of the group edge.
+ Group *Group `json:"group,omitempty"`
// Parent holds the value of the parent edge.
Parent *Location `json:"parent,omitempty"`
// Children holds the value of the children edge.
Children []*Location `json:"children,omitempty"`
- // Group holds the value of the group edge.
- Group *Group `json:"group,omitempty"`
// Items holds the value of the items edge.
Items []*Item `json:"items,omitempty"`
// loadedTypes holds the information for reporting if a
@@ -48,10 +50,23 @@ type LocationEdges struct {
loadedTypes [4]bool
}
+// GroupOrErr returns the Group value or an error if the edge
+// was not loaded in eager-loading, or loaded but was not found.
+func (e LocationEdges) GroupOrErr() (*Group, error) {
+ if e.loadedTypes[0] {
+ if e.Group == nil {
+ // Edge was loaded but was not found.
+ return nil, &NotFoundError{label: group.Label}
+ }
+ return e.Group, nil
+ }
+ return nil, &NotLoadedError{edge: "group"}
+}
+
// ParentOrErr returns the Parent value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e LocationEdges) ParentOrErr() (*Location, error) {
- if e.loadedTypes[0] {
+ if e.loadedTypes[1] {
if e.Parent == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: location.Label}
@@ -64,25 +79,12 @@ func (e LocationEdges) ParentOrErr() (*Location, error) {
// ChildrenOrErr returns the Children value or an error if the edge
// was not loaded in eager-loading.
func (e LocationEdges) ChildrenOrErr() ([]*Location, error) {
- if e.loadedTypes[1] {
+ if e.loadedTypes[2] {
return e.Children, nil
}
return nil, &NotLoadedError{edge: "children"}
}
-// GroupOrErr returns the Group value or an error if the edge
-// was not loaded in eager-loading, or loaded but was not found.
-func (e LocationEdges) GroupOrErr() (*Group, error) {
- if e.loadedTypes[2] {
- if e.Group == nil {
- // Edge was loaded but was not found.
- return nil, &NotFoundError{label: group.Label}
- }
- return e.Group, nil
- }
- return nil, &NotLoadedError{edge: "group"}
-}
-
// ItemsOrErr returns the Items value or an error if the edge
// was not loaded in eager-loading.
func (e LocationEdges) ItemsOrErr() ([]*Item, error) {
@@ -108,7 +110,7 @@ func (*Location) scanValues(columns []string) ([]any, error) {
case location.ForeignKeys[1]: // location_children
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
- return nil, fmt.Errorf("unexpected column %q for type Location", columns[i])
+ values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -166,36 +168,44 @@ func (l *Location) assignValues(columns []string, values []any) error {
l.location_children = new(uuid.UUID)
*l.location_children = *value.S.(*uuid.UUID)
}
+ default:
+ l.selectValues.Set(columns[i], values[i])
}
}
return nil
}
-// QueryParent queries the "parent" edge of the Location entity.
-func (l *Location) QueryParent() *LocationQuery {
- return (&LocationClient{config: l.config}).QueryParent(l)
-}
-
-// QueryChildren queries the "children" edge of the Location entity.
-func (l *Location) QueryChildren() *LocationQuery {
- return (&LocationClient{config: l.config}).QueryChildren(l)
+// Value returns the ent.Value that was dynamically selected and assigned to the Location.
+// This includes values selected through modifiers, order, etc.
+func (l *Location) Value(name string) (ent.Value, error) {
+ return l.selectValues.Get(name)
}
// QueryGroup queries the "group" edge of the Location entity.
func (l *Location) QueryGroup() *GroupQuery {
- return (&LocationClient{config: l.config}).QueryGroup(l)
+ return NewLocationClient(l.config).QueryGroup(l)
+}
+
+// QueryParent queries the "parent" edge of the Location entity.
+func (l *Location) QueryParent() *LocationQuery {
+ return NewLocationClient(l.config).QueryParent(l)
+}
+
+// QueryChildren queries the "children" edge of the Location entity.
+func (l *Location) QueryChildren() *LocationQuery {
+ return NewLocationClient(l.config).QueryChildren(l)
}
// QueryItems queries the "items" edge of the Location entity.
func (l *Location) QueryItems() *ItemQuery {
- return (&LocationClient{config: l.config}).QueryItems(l)
+ return NewLocationClient(l.config).QueryItems(l)
}
// Update returns a builder for updating this Location.
// Note that you need to call Location.Unwrap() before calling this method if this Location
// was returned from a transaction, and the transaction was committed or rolled back.
func (l *Location) Update() *LocationUpdateOne {
- return (&LocationClient{config: l.config}).UpdateOne(l)
+ return NewLocationClient(l.config).UpdateOne(l)
}
// Unwrap unwraps the Location entity that was returned from a transaction after it was closed,
@@ -231,9 +241,3 @@ func (l *Location) String() string {
// Locations is a parsable slice of Location.
type Locations []*Location
-
-func (l Locations) config(cfg config) {
- for _i := range l {
- l[_i].config = cfg
- }
-}
diff --git a/backend/internal/data/ent/location/location.go b/backend/internal/data/ent/location/location.go
index 96cb75c..4a7fc16 100644
--- a/backend/internal/data/ent/location/location.go
+++ b/backend/internal/data/ent/location/location.go
@@ -5,6 +5,8 @@ package location
import (
"time"
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -21,16 +23,23 @@ const (
FieldName = "name"
// FieldDescription holds the string denoting the description field in the database.
FieldDescription = "description"
+ // EdgeGroup holds the string denoting the group edge name in mutations.
+ EdgeGroup = "group"
// EdgeParent holds the string denoting the parent edge name in mutations.
EdgeParent = "parent"
// EdgeChildren holds the string denoting the children edge name in mutations.
EdgeChildren = "children"
- // EdgeGroup holds the string denoting the group edge name in mutations.
- EdgeGroup = "group"
// EdgeItems holds the string denoting the items edge name in mutations.
EdgeItems = "items"
// Table holds the table name of the location in the database.
Table = "locations"
+ // GroupTable is the table that holds the group relation/edge.
+ GroupTable = "locations"
+ // GroupInverseTable is the table name for the Group entity.
+ // It exists in this package in order to avoid circular dependency with the "group" package.
+ GroupInverseTable = "groups"
+ // GroupColumn is the table column denoting the group relation/edge.
+ GroupColumn = "group_locations"
// ParentTable is the table that holds the parent relation/edge.
ParentTable = "locations"
// ParentColumn is the table column denoting the parent relation/edge.
@@ -39,13 +48,6 @@ const (
ChildrenTable = "locations"
// ChildrenColumn is the table column denoting the children relation/edge.
ChildrenColumn = "location_children"
- // GroupTable is the table that holds the group relation/edge.
- GroupTable = "locations"
- // GroupInverseTable is the table name for the Group entity.
- // It exists in this package in order to avoid circular dependency with the "group" package.
- GroupInverseTable = "groups"
- // GroupColumn is the table column denoting the group relation/edge.
- GroupColumn = "group_locations"
// ItemsTable is the table that holds the items relation/edge.
ItemsTable = "items"
// ItemsInverseTable is the table name for the Item entity.
@@ -100,3 +102,101 @@ var (
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)
+
+// OrderOption defines the ordering options for the Location queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByName orders the results by the name field.
+func ByName(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldName, opts...).ToFunc()
+}
+
+// ByDescription orders the results by the description field.
+func ByDescription(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldDescription, opts...).ToFunc()
+}
+
+// ByGroupField orders the results by group field.
+func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
+ }
+}
+
+// ByParentField orders the results by parent field.
+func ByParentField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newParentStep(), sql.OrderByField(field, opts...))
+ }
+}
+
+// ByChildrenCount orders the results by children count.
+func ByChildrenCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newChildrenStep(), opts...)
+ }
+}
+
+// ByChildren orders the results by children terms.
+func ByChildren(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newChildrenStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+
+// ByItemsCount orders the results by items count.
+func ByItemsCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newItemsStep(), opts...)
+ }
+}
+
+// ByItems orders the results by items terms.
+func ByItems(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newItemsStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+func newGroupStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(GroupInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
+ )
+}
+func newParentStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(Table, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
+ )
+}
+func newChildrenStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(Table, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
+ )
+}
+func newItemsStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(ItemsInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn),
+ )
+}
diff --git a/backend/internal/data/ent/location/where.go b/backend/internal/data/ent/location/where.go
index 73f28bd..a89ef4d 100644
--- a/backend/internal/data/ent/location/where.go
+++ b/backend/internal/data/ent/location/where.go
@@ -13,440 +13,309 @@ import (
// ID filters vertices based on their ID field.
func ID(id uuid.UUID) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.Location(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id uuid.UUID) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.Location(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id uuid.UUID) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldID), id))
- })
+ return predicate.Location(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...uuid.UUID) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.In(s.C(FieldID), v...))
- })
+ return predicate.Location(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...uuid.UUID) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.NotIn(s.C(FieldID), v...))
- })
+ return predicate.Location(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id uuid.UUID) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldID), id))
- })
+ return predicate.Location(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id uuid.UUID) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldID), id))
- })
+ return predicate.Location(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id uuid.UUID) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldID), id))
- })
+ return predicate.Location(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id uuid.UUID) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldID), id))
- })
+ return predicate.Location(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Location(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Location(sql.FieldEQ(FieldUpdatedAt, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldName), v))
- })
+ return predicate.Location(sql.FieldEQ(FieldName, v))
}
// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ.
func Description(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldDescription), v))
- })
+ return predicate.Location(sql.FieldEQ(FieldDescription, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Location(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.Location(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.Location {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldCreatedAt), v...))
- })
+ return predicate.Location(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.Location {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
- })
+ return predicate.Location(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldCreatedAt), v))
- })
+ return predicate.Location(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.Location(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldCreatedAt), v))
- })
+ return predicate.Location(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.Location(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Location(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Location(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.Location {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.Location(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.Location {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.Location(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Location(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Location(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Location(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.Location(sql.FieldLTE(FieldUpdatedAt, v))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldName), v))
- })
+ return predicate.Location(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldName), v))
- })
+ return predicate.Location(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.Location {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldName), v...))
- })
+ return predicate.Location(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.Location {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldName), v...))
- })
+ return predicate.Location(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldName), v))
- })
+ return predicate.Location(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldName), v))
- })
+ return predicate.Location(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldName), v))
- })
+ return predicate.Location(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldName), v))
- })
+ return predicate.Location(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldName), v))
- })
+ return predicate.Location(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldName), v))
- })
+ return predicate.Location(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldName), v))
- })
+ return predicate.Location(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldName), v))
- })
+ return predicate.Location(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldName), v))
- })
+ return predicate.Location(sql.FieldContainsFold(FieldName, v))
}
// DescriptionEQ applies the EQ predicate on the "description" field.
func DescriptionEQ(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldDescription), v))
- })
+ return predicate.Location(sql.FieldEQ(FieldDescription, v))
}
// DescriptionNEQ applies the NEQ predicate on the "description" field.
func DescriptionNEQ(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldDescription), v))
- })
+ return predicate.Location(sql.FieldNEQ(FieldDescription, v))
}
// DescriptionIn applies the In predicate on the "description" field.
func DescriptionIn(vs ...string) predicate.Location {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldDescription), v...))
- })
+ return predicate.Location(sql.FieldIn(FieldDescription, vs...))
}
// DescriptionNotIn applies the NotIn predicate on the "description" field.
func DescriptionNotIn(vs ...string) predicate.Location {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldDescription), v...))
- })
+ return predicate.Location(sql.FieldNotIn(FieldDescription, vs...))
}
// DescriptionGT applies the GT predicate on the "description" field.
func DescriptionGT(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldDescription), v))
- })
+ return predicate.Location(sql.FieldGT(FieldDescription, v))
}
// DescriptionGTE applies the GTE predicate on the "description" field.
func DescriptionGTE(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldDescription), v))
- })
+ return predicate.Location(sql.FieldGTE(FieldDescription, v))
}
// DescriptionLT applies the LT predicate on the "description" field.
func DescriptionLT(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldDescription), v))
- })
+ return predicate.Location(sql.FieldLT(FieldDescription, v))
}
// DescriptionLTE applies the LTE predicate on the "description" field.
func DescriptionLTE(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldDescription), v))
- })
+ return predicate.Location(sql.FieldLTE(FieldDescription, v))
}
// DescriptionContains applies the Contains predicate on the "description" field.
func DescriptionContains(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldDescription), v))
- })
+ return predicate.Location(sql.FieldContains(FieldDescription, v))
}
// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field.
func DescriptionHasPrefix(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldDescription), v))
- })
+ return predicate.Location(sql.FieldHasPrefix(FieldDescription, v))
}
// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field.
func DescriptionHasSuffix(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldDescription), v))
- })
+ return predicate.Location(sql.FieldHasSuffix(FieldDescription, v))
}
// DescriptionIsNil applies the IsNil predicate on the "description" field.
func DescriptionIsNil() predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldDescription)))
- })
+ return predicate.Location(sql.FieldIsNull(FieldDescription))
}
// DescriptionNotNil applies the NotNil predicate on the "description" field.
func DescriptionNotNil() predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldDescription)))
- })
+ return predicate.Location(sql.FieldNotNull(FieldDescription))
}
// DescriptionEqualFold applies the EqualFold predicate on the "description" field.
func DescriptionEqualFold(v string) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldDescription), v))
- })
+ return predicate.Location(sql.FieldEqualFold(FieldDescription, v))
}
// DescriptionContainsFold applies the ContainsFold predicate on the "description" field.
func DescriptionContainsFold(v string) predicate.Location {
+ return predicate.Location(sql.FieldContainsFold(FieldDescription, v))
+}
+
+// HasGroup applies the HasEdge predicate on the "group" edge.
+func HasGroup() predicate.Location {
return predicate.Location(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldDescription), v))
+ step := sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
+ )
+ sqlgraph.HasNeighbors(s, step)
+ })
+}
+
+// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
+func HasGroupWith(preds ...predicate.Group) predicate.Location {
+ return predicate.Location(func(s *sql.Selector) {
+ step := newGroupStep()
+ sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+ for _, p := range preds {
+ p(s)
+ }
+ })
})
}
@@ -455,7 +324,6 @@ func HasParent() predicate.Location {
return predicate.Location(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(ParentTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -465,11 +333,7 @@ func HasParent() predicate.Location {
// HasParentWith applies the HasEdge predicate on the "parent" edge with a given conditions (other predicates).
func HasParentWith(preds ...predicate.Location) predicate.Location {
return predicate.Location(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(Table, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
- )
+ step := newParentStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -483,7 +347,6 @@ func HasChildren() predicate.Location {
return predicate.Location(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(ChildrenTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -493,39 +356,7 @@ func HasChildren() predicate.Location {
// HasChildrenWith applies the HasEdge predicate on the "children" edge with a given conditions (other predicates).
func HasChildrenWith(preds ...predicate.Location) predicate.Location {
return predicate.Location(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(Table, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
- )
- sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
- for _, p := range preds {
- p(s)
- }
- })
- })
-}
-
-// HasGroup applies the HasEdge predicate on the "group" edge.
-func HasGroup() predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(GroupTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
- )
- sqlgraph.HasNeighbors(s, step)
- })
-}
-
-// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
-func HasGroupWith(preds ...predicate.Group) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(GroupInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
- )
+ step := newChildrenStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -539,7 +370,6 @@ func HasItems() predicate.Location {
return predicate.Location(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(ItemsTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -549,11 +379,7 @@ func HasItems() predicate.Location {
// HasItemsWith applies the HasEdge predicate on the "items" edge with a given conditions (other predicates).
func HasItemsWith(preds ...predicate.Item) predicate.Location {
return predicate.Location(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(ItemsInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn),
- )
+ step := newItemsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -564,32 +390,15 @@ func HasItemsWith(preds ...predicate.Item) predicate.Location {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Location) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for _, p := range predicates {
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.Location(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Location) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for i, p := range predicates {
- if i > 0 {
- s1.Or()
- }
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.Location(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Location) predicate.Location {
- return predicate.Location(func(s *sql.Selector) {
- p(s.Not())
- })
+ return predicate.Location(sql.NotPredicates(p))
}
diff --git a/backend/internal/data/ent/location_create.go b/backend/internal/data/ent/location_create.go
index 35081c2..98f0f7a 100644
--- a/backend/internal/data/ent/location_create.go
+++ b/backend/internal/data/ent/location_create.go
@@ -85,6 +85,17 @@ func (lc *LocationCreate) SetNillableID(u *uuid.UUID) *LocationCreate {
return lc
}
+// SetGroupID sets the "group" edge to the Group entity by ID.
+func (lc *LocationCreate) SetGroupID(id uuid.UUID) *LocationCreate {
+ lc.mutation.SetGroupID(id)
+ return lc
+}
+
+// SetGroup sets the "group" edge to the Group entity.
+func (lc *LocationCreate) SetGroup(g *Group) *LocationCreate {
+ return lc.SetGroupID(g.ID)
+}
+
// SetParentID sets the "parent" edge to the Location entity by ID.
func (lc *LocationCreate) SetParentID(id uuid.UUID) *LocationCreate {
lc.mutation.SetParentID(id)
@@ -119,17 +130,6 @@ func (lc *LocationCreate) AddChildren(l ...*Location) *LocationCreate {
return lc.AddChildIDs(ids...)
}
-// SetGroupID sets the "group" edge to the Group entity by ID.
-func (lc *LocationCreate) SetGroupID(id uuid.UUID) *LocationCreate {
- lc.mutation.SetGroupID(id)
- return lc
-}
-
-// SetGroup sets the "group" edge to the Group entity.
-func (lc *LocationCreate) SetGroup(g *Group) *LocationCreate {
- return lc.SetGroupID(g.ID)
-}
-
// AddItemIDs adds the "items" edge to the Item entity by IDs.
func (lc *LocationCreate) AddItemIDs(ids ...uuid.UUID) *LocationCreate {
lc.mutation.AddItemIDs(ids...)
@@ -152,50 +152,8 @@ func (lc *LocationCreate) Mutation() *LocationMutation {
// Save creates the Location in the database.
func (lc *LocationCreate) Save(ctx context.Context) (*Location, error) {
- var (
- err error
- node *Location
- )
lc.defaults()
- if len(lc.hooks) == 0 {
- if err = lc.check(); err != nil {
- return nil, err
- }
- node, err = lc.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*LocationMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = lc.check(); err != nil {
- return nil, err
- }
- lc.mutation = mutation
- if node, err = lc.sqlSave(ctx); err != nil {
- return nil, err
- }
- mutation.id = &node.ID
- mutation.done = true
- return node, err
- })
- for i := len(lc.hooks) - 1; i >= 0; i-- {
- if lc.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = lc.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, lc.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*Location)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from LocationMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, lc.sqlSave, lc.mutation, lc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -264,6 +222,9 @@ func (lc *LocationCreate) check() error {
}
func (lc *LocationCreate) sqlSave(ctx context.Context) (*Location, error) {
+ if err := lc.check(); err != nil {
+ return nil, err
+ }
_node, _spec := lc.createSpec()
if err := sqlgraph.CreateNode(ctx, lc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@@ -278,56 +239,53 @@ func (lc *LocationCreate) sqlSave(ctx context.Context) (*Location, error) {
return nil, err
}
}
+ lc.mutation.id = &_node.ID
+ lc.mutation.done = true
return _node, nil
}
func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) {
var (
_node = &Location{config: lc.config}
- _spec = &sqlgraph.CreateSpec{
- Table: location.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
- }
+ _spec = sqlgraph.NewCreateSpec(location.Table, sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID))
)
if id, ok := lc.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = &id
}
if value, ok := lc.mutation.CreatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: location.FieldCreatedAt,
- })
+ _spec.SetField(location.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := lc.mutation.UpdatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: location.FieldUpdatedAt,
- })
+ _spec.SetField(location.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := lc.mutation.Name(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: location.FieldName,
- })
+ _spec.SetField(location.FieldName, field.TypeString, value)
_node.Name = value
}
if value, ok := lc.mutation.Description(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: location.FieldDescription,
- })
+ _spec.SetField(location.FieldDescription, field.TypeString, value)
_node.Description = value
}
+ if nodes := lc.mutation.GroupIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: location.GroupTable,
+ Columns: []string{location.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _node.group_locations = &nodes[0]
+ _spec.Edges = append(_spec.Edges, edge)
+ }
if nodes := lc.mutation.ParentIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
@@ -336,10 +294,7 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) {
Columns: []string{location.ParentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -356,10 +311,7 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) {
Columns: []string{location.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -367,26 +319,6 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) {
}
_spec.Edges = append(_spec.Edges, edge)
}
- if nodes := lc.mutation.GroupIDs(); len(nodes) > 0 {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: location.GroupTable,
- Columns: []string{location.GroupColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _node.group_locations = &nodes[0]
- _spec.Edges = append(_spec.Edges, edge)
- }
if nodes := lc.mutation.ItemsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
@@ -395,10 +327,7 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) {
Columns: []string{location.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -412,11 +341,15 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) {
// LocationCreateBulk is the builder for creating many Location entities in bulk.
type LocationCreateBulk struct {
config
+ err error
builders []*LocationCreate
}
// Save creates the Location entities in the database.
func (lcb *LocationCreateBulk) Save(ctx context.Context) ([]*Location, error) {
+ if lcb.err != nil {
+ return nil, lcb.err
+ }
specs := make([]*sqlgraph.CreateSpec, len(lcb.builders))
nodes := make([]*Location, len(lcb.builders))
mutators := make([]Mutator, len(lcb.builders))
@@ -433,8 +366,8 @@ func (lcb *LocationCreateBulk) Save(ctx context.Context) ([]*Location, error) {
return nil, err
}
builder.mutation = mutation
- nodes[i], specs[i] = builder.createSpec()
var err error
+ nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, lcb.builders[i+1].mutation)
} else {
diff --git a/backend/internal/data/ent/location_delete.go b/backend/internal/data/ent/location_delete.go
index 7fd8e84..451b7f1 100644
--- a/backend/internal/data/ent/location_delete.go
+++ b/backend/internal/data/ent/location_delete.go
@@ -4,7 +4,6 @@ package ent
import (
"context"
- "fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@@ -28,34 +27,7 @@ func (ld *LocationDelete) Where(ps ...predicate.Location) *LocationDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (ld *LocationDelete) Exec(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
- if len(ld.hooks) == 0 {
- affected, err = ld.sqlExec(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*LocationMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- ld.mutation = mutation
- affected, err = ld.sqlExec(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(ld.hooks) - 1; i >= 0; i-- {
- if ld.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = ld.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, ld.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, ld.sqlExec, ld.mutation, ld.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@@ -68,15 +40,7 @@ func (ld *LocationDelete) ExecX(ctx context.Context) int {
}
func (ld *LocationDelete) sqlExec(ctx context.Context) (int, error) {
- _spec := &sqlgraph.DeleteSpec{
- Node: &sqlgraph.NodeSpec{
- Table: location.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewDeleteSpec(location.Table, sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID))
if ps := ld.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -88,6 +52,7 @@ func (ld *LocationDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
+ ld.mutation.done = true
return affected, err
}
@@ -96,6 +61,12 @@ type LocationDeleteOne struct {
ld *LocationDelete
}
+// Where appends a list predicates to the LocationDelete builder.
+func (ldo *LocationDeleteOne) Where(ps ...predicate.Location) *LocationDeleteOne {
+ ldo.ld.mutation.Where(ps...)
+ return ldo
+}
+
// Exec executes the deletion query.
func (ldo *LocationDeleteOne) Exec(ctx context.Context) error {
n, err := ldo.ld.Exec(ctx)
@@ -111,5 +82,7 @@ func (ldo *LocationDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (ldo *LocationDeleteOne) ExecX(ctx context.Context) {
- ldo.ld.ExecX(ctx)
+ if err := ldo.Exec(ctx); err != nil {
+ panic(err)
+ }
}
diff --git a/backend/internal/data/ent/location_query.go b/backend/internal/data/ent/location_query.go
index ff3014b..4aae965 100644
--- a/backend/internal/data/ent/location_query.go
+++ b/backend/internal/data/ent/location_query.go
@@ -21,15 +21,13 @@ import (
// LocationQuery is the builder for querying Location entities.
type LocationQuery struct {
config
- limit *int
- offset *int
- unique *bool
- order []OrderFunc
- fields []string
+ ctx *QueryContext
+ order []location.OrderOption
+ inters []Interceptor
predicates []predicate.Location
+ withGroup *GroupQuery
withParent *LocationQuery
withChildren *LocationQuery
- withGroup *GroupQuery
withItems *ItemQuery
withFKs bool
// intermediate query (i.e. traversal path).
@@ -43,34 +41,56 @@ func (lq *LocationQuery) Where(ps ...predicate.Location) *LocationQuery {
return lq
}
-// Limit adds a limit step to the query.
+// Limit the number of records to be returned by this query.
func (lq *LocationQuery) Limit(limit int) *LocationQuery {
- lq.limit = &limit
+ lq.ctx.Limit = &limit
return lq
}
-// Offset adds an offset step to the query.
+// Offset to start from.
func (lq *LocationQuery) Offset(offset int) *LocationQuery {
- lq.offset = &offset
+ lq.ctx.Offset = &offset
return lq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (lq *LocationQuery) Unique(unique bool) *LocationQuery {
- lq.unique = &unique
+ lq.ctx.Unique = &unique
return lq
}
-// Order adds an order step to the query.
-func (lq *LocationQuery) Order(o ...OrderFunc) *LocationQuery {
+// Order specifies how the records should be ordered.
+func (lq *LocationQuery) Order(o ...location.OrderOption) *LocationQuery {
lq.order = append(lq.order, o...)
return lq
}
+// QueryGroup chains the current query on the "group" edge.
+func (lq *LocationQuery) QueryGroup() *GroupQuery {
+ query := (&GroupClient{config: lq.config}).Query()
+ query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+ if err := lq.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ selector := lq.sqlQuery(ctx)
+ if err := selector.Err(); err != nil {
+ return nil, err
+ }
+ step := sqlgraph.NewStep(
+ sqlgraph.From(location.Table, location.FieldID, selector),
+ sqlgraph.To(group.Table, group.FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, location.GroupTable, location.GroupColumn),
+ )
+ fromU = sqlgraph.SetNeighbors(lq.driver.Dialect(), step)
+ return fromU, nil
+ }
+ return query
+}
+
// QueryParent chains the current query on the "parent" edge.
func (lq *LocationQuery) QueryParent() *LocationQuery {
- query := &LocationQuery{config: lq.config}
+ query := (&LocationClient{config: lq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := lq.prepareQuery(ctx); err != nil {
return nil, err
@@ -92,7 +112,7 @@ func (lq *LocationQuery) QueryParent() *LocationQuery {
// QueryChildren chains the current query on the "children" edge.
func (lq *LocationQuery) QueryChildren() *LocationQuery {
- query := &LocationQuery{config: lq.config}
+ query := (&LocationClient{config: lq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := lq.prepareQuery(ctx); err != nil {
return nil, err
@@ -112,31 +132,9 @@ func (lq *LocationQuery) QueryChildren() *LocationQuery {
return query
}
-// QueryGroup chains the current query on the "group" edge.
-func (lq *LocationQuery) QueryGroup() *GroupQuery {
- query := &GroupQuery{config: lq.config}
- query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
- if err := lq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- selector := lq.sqlQuery(ctx)
- if err := selector.Err(); err != nil {
- return nil, err
- }
- step := sqlgraph.NewStep(
- sqlgraph.From(location.Table, location.FieldID, selector),
- sqlgraph.To(group.Table, group.FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, location.GroupTable, location.GroupColumn),
- )
- fromU = sqlgraph.SetNeighbors(lq.driver.Dialect(), step)
- return fromU, nil
- }
- return query
-}
-
// QueryItems chains the current query on the "items" edge.
func (lq *LocationQuery) QueryItems() *ItemQuery {
- query := &ItemQuery{config: lq.config}
+ query := (&ItemClient{config: lq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := lq.prepareQuery(ctx); err != nil {
return nil, err
@@ -159,7 +157,7 @@ func (lq *LocationQuery) QueryItems() *ItemQuery {
// First returns the first Location entity from the query.
// Returns a *NotFoundError when no Location was found.
func (lq *LocationQuery) First(ctx context.Context) (*Location, error) {
- nodes, err := lq.Limit(1).All(ctx)
+ nodes, err := lq.Limit(1).All(setContextOp(ctx, lq.ctx, "First"))
if err != nil {
return nil, err
}
@@ -182,7 +180,7 @@ func (lq *LocationQuery) FirstX(ctx context.Context) *Location {
// Returns a *NotFoundError when no Location ID was found.
func (lq *LocationQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = lq.Limit(1).IDs(ctx); err != nil {
+ if ids, err = lq.Limit(1).IDs(setContextOp(ctx, lq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@@ -205,7 +203,7 @@ func (lq *LocationQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one Location entity is found.
// Returns a *NotFoundError when no Location entities are found.
func (lq *LocationQuery) Only(ctx context.Context) (*Location, error) {
- nodes, err := lq.Limit(2).All(ctx)
+ nodes, err := lq.Limit(2).All(setContextOp(ctx, lq.ctx, "Only"))
if err != nil {
return nil, err
}
@@ -233,7 +231,7 @@ func (lq *LocationQuery) OnlyX(ctx context.Context) *Location {
// Returns a *NotFoundError when no entities are found.
func (lq *LocationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = lq.Limit(2).IDs(ctx); err != nil {
+ if ids, err = lq.Limit(2).IDs(setContextOp(ctx, lq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@@ -258,10 +256,12 @@ func (lq *LocationQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Locations.
func (lq *LocationQuery) All(ctx context.Context) ([]*Location, error) {
+ ctx = setContextOp(ctx, lq.ctx, "All")
if err := lq.prepareQuery(ctx); err != nil {
return nil, err
}
- return lq.sqlAll(ctx)
+ qr := querierAll[[]*Location, *LocationQuery]()
+ return withInterceptors[[]*Location](ctx, lq, qr, lq.inters)
}
// AllX is like All, but panics if an error occurs.
@@ -274,9 +274,12 @@ func (lq *LocationQuery) AllX(ctx context.Context) []*Location {
}
// IDs executes the query and returns a list of Location IDs.
-func (lq *LocationQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
- var ids []uuid.UUID
- if err := lq.Select(location.FieldID).Scan(ctx, &ids); err != nil {
+func (lq *LocationQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
+ if lq.ctx.Unique == nil && lq.path != nil {
+ lq.Unique(true)
+ }
+ ctx = setContextOp(ctx, lq.ctx, "IDs")
+ if err = lq.Select(location.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@@ -293,10 +296,11 @@ func (lq *LocationQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query.
func (lq *LocationQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, lq.ctx, "Count")
if err := lq.prepareQuery(ctx); err != nil {
return 0, err
}
- return lq.sqlCount(ctx)
+ return withInterceptors[int](ctx, lq, querierCount[*LocationQuery](), lq.inters)
}
// CountX is like Count, but panics if an error occurs.
@@ -310,10 +314,15 @@ func (lq *LocationQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (lq *LocationQuery) Exist(ctx context.Context) (bool, error) {
- if err := lq.prepareQuery(ctx); err != nil {
- return false, err
+ ctx = setContextOp(ctx, lq.ctx, "Exist")
+ switch _, err := lq.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
}
- return lq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@@ -333,25 +342,35 @@ func (lq *LocationQuery) Clone() *LocationQuery {
}
return &LocationQuery{
config: lq.config,
- limit: lq.limit,
- offset: lq.offset,
- order: append([]OrderFunc{}, lq.order...),
+ ctx: lq.ctx.Clone(),
+ order: append([]location.OrderOption{}, lq.order...),
+ inters: append([]Interceptor{}, lq.inters...),
predicates: append([]predicate.Location{}, lq.predicates...),
+ withGroup: lq.withGroup.Clone(),
withParent: lq.withParent.Clone(),
withChildren: lq.withChildren.Clone(),
- withGroup: lq.withGroup.Clone(),
withItems: lq.withItems.Clone(),
// clone intermediate query.
- sql: lq.sql.Clone(),
- path: lq.path,
- unique: lq.unique,
+ sql: lq.sql.Clone(),
+ path: lq.path,
}
}
+// WithGroup tells the query-builder to eager-load the nodes that are connected to
+// the "group" edge. The optional arguments are used to configure the query builder of the edge.
+func (lq *LocationQuery) WithGroup(opts ...func(*GroupQuery)) *LocationQuery {
+ query := (&GroupClient{config: lq.config}).Query()
+ for _, opt := range opts {
+ opt(query)
+ }
+ lq.withGroup = query
+ return lq
+}
+
// WithParent tells the query-builder to eager-load the nodes that are connected to
// the "parent" edge. The optional arguments are used to configure the query builder of the edge.
func (lq *LocationQuery) WithParent(opts ...func(*LocationQuery)) *LocationQuery {
- query := &LocationQuery{config: lq.config}
+ query := (&LocationClient{config: lq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -362,7 +381,7 @@ func (lq *LocationQuery) WithParent(opts ...func(*LocationQuery)) *LocationQuery
// WithChildren tells the query-builder to eager-load the nodes that are connected to
// the "children" edge. The optional arguments are used to configure the query builder of the edge.
func (lq *LocationQuery) WithChildren(opts ...func(*LocationQuery)) *LocationQuery {
- query := &LocationQuery{config: lq.config}
+ query := (&LocationClient{config: lq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -370,21 +389,10 @@ func (lq *LocationQuery) WithChildren(opts ...func(*LocationQuery)) *LocationQue
return lq
}
-// WithGroup tells the query-builder to eager-load the nodes that are connected to
-// the "group" edge. The optional arguments are used to configure the query builder of the edge.
-func (lq *LocationQuery) WithGroup(opts ...func(*GroupQuery)) *LocationQuery {
- query := &GroupQuery{config: lq.config}
- for _, opt := range opts {
- opt(query)
- }
- lq.withGroup = query
- return lq
-}
-
// WithItems tells the query-builder to eager-load the nodes that are connected to
// the "items" edge. The optional arguments are used to configure the query builder of the edge.
func (lq *LocationQuery) WithItems(opts ...func(*ItemQuery)) *LocationQuery {
- query := &ItemQuery{config: lq.config}
+ query := (&ItemClient{config: lq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -407,16 +415,11 @@ func (lq *LocationQuery) WithItems(opts ...func(*ItemQuery)) *LocationQuery {
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (lq *LocationQuery) GroupBy(field string, fields ...string) *LocationGroupBy {
- grbuild := &LocationGroupBy{config: lq.config}
- grbuild.fields = append([]string{field}, fields...)
- grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
- if err := lq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- return lq.sqlQuery(ctx), nil
- }
+ lq.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &LocationGroupBy{build: lq}
+ grbuild.flds = &lq.ctx.Fields
grbuild.label = location.Label
- grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
+ grbuild.scan = grbuild.Scan
return grbuild
}
@@ -433,15 +436,30 @@ func (lq *LocationQuery) GroupBy(field string, fields ...string) *LocationGroupB
// Select(location.FieldCreatedAt).
// Scan(ctx, &v)
func (lq *LocationQuery) Select(fields ...string) *LocationSelect {
- lq.fields = append(lq.fields, fields...)
- selbuild := &LocationSelect{LocationQuery: lq}
- selbuild.label = location.Label
- selbuild.flds, selbuild.scan = &lq.fields, selbuild.Scan
- return selbuild
+ lq.ctx.Fields = append(lq.ctx.Fields, fields...)
+ sbuild := &LocationSelect{LocationQuery: lq}
+ sbuild.label = location.Label
+ sbuild.flds, sbuild.scan = &lq.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a LocationSelect configured with the given aggregations.
+func (lq *LocationQuery) Aggregate(fns ...AggregateFunc) *LocationSelect {
+ return lq.Select().Aggregate(fns...)
}
func (lq *LocationQuery) prepareQuery(ctx context.Context) error {
- for _, f := range lq.fields {
+ for _, inter := range lq.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, lq); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range lq.ctx.Fields {
if !location.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@@ -462,13 +480,13 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc
withFKs = lq.withFKs
_spec = lq.querySpec()
loadedTypes = [4]bool{
+ lq.withGroup != nil,
lq.withParent != nil,
lq.withChildren != nil,
- lq.withGroup != nil,
lq.withItems != nil,
}
)
- if lq.withParent != nil || lq.withGroup != nil {
+ if lq.withGroup != nil || lq.withParent != nil {
withFKs = true
}
if withFKs {
@@ -492,6 +510,12 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc
if len(nodes) == 0 {
return nodes, nil
}
+ if query := lq.withGroup; query != nil {
+ if err := lq.loadGroup(ctx, query, nodes, nil,
+ func(n *Location, e *Group) { n.Edges.Group = e }); err != nil {
+ return nil, err
+ }
+ }
if query := lq.withParent; query != nil {
if err := lq.loadParent(ctx, query, nodes, nil,
func(n *Location, e *Location) { n.Edges.Parent = e }); err != nil {
@@ -505,12 +529,6 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc
return nil, err
}
}
- if query := lq.withGroup; query != nil {
- if err := lq.loadGroup(ctx, query, nodes, nil,
- func(n *Location, e *Group) { n.Edges.Group = e }); err != nil {
- return nil, err
- }
- }
if query := lq.withItems; query != nil {
if err := lq.loadItems(ctx, query, nodes,
func(n *Location) { n.Edges.Items = []*Item{} },
@@ -521,6 +539,38 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc
return nodes, nil
}
+func (lq *LocationQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Location, init func(*Location), assign func(*Location, *Group)) error {
+ ids := make([]uuid.UUID, 0, len(nodes))
+ nodeids := make(map[uuid.UUID][]*Location)
+ for i := range nodes {
+ if nodes[i].group_locations == nil {
+ continue
+ }
+ fk := *nodes[i].group_locations
+ if _, ok := nodeids[fk]; !ok {
+ ids = append(ids, fk)
+ }
+ nodeids[fk] = append(nodeids[fk], nodes[i])
+ }
+ if len(ids) == 0 {
+ return nil
+ }
+ query.Where(group.IDIn(ids...))
+ neighbors, err := query.All(ctx)
+ if err != nil {
+ return err
+ }
+ for _, n := range neighbors {
+ nodes, ok := nodeids[n.ID]
+ if !ok {
+ return fmt.Errorf(`unexpected foreign-key "group_locations" returned %v`, n.ID)
+ }
+ for i := range nodes {
+ assign(nodes[i], n)
+ }
+ }
+ return nil
+}
func (lq *LocationQuery) loadParent(ctx context.Context, query *LocationQuery, nodes []*Location, init func(*Location), assign func(*Location, *Location)) error {
ids := make([]uuid.UUID, 0, len(nodes))
nodeids := make(map[uuid.UUID][]*Location)
@@ -534,6 +584,9 @@ func (lq *LocationQuery) loadParent(ctx context.Context, query *LocationQuery, n
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
+ if len(ids) == 0 {
+ return nil
+ }
query.Where(location.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@@ -562,7 +615,7 @@ func (lq *LocationQuery) loadChildren(ctx context.Context, query *LocationQuery,
}
query.withFKs = true
query.Where(predicate.Location(func(s *sql.Selector) {
- s.Where(sql.InValues(location.ChildrenColumn, fks...))
+ s.Where(sql.InValues(s.C(location.ChildrenColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -575,41 +628,12 @@ func (lq *LocationQuery) loadChildren(ctx context.Context, query *LocationQuery,
}
node, ok := nodeids[*fk]
if !ok {
- return fmt.Errorf(`unexpected foreign-key "location_children" returned %v for node %v`, *fk, n.ID)
+ return fmt.Errorf(`unexpected referenced foreign-key "location_children" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
return nil
}
-func (lq *LocationQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Location, init func(*Location), assign func(*Location, *Group)) error {
- ids := make([]uuid.UUID, 0, len(nodes))
- nodeids := make(map[uuid.UUID][]*Location)
- for i := range nodes {
- if nodes[i].group_locations == nil {
- continue
- }
- fk := *nodes[i].group_locations
- if _, ok := nodeids[fk]; !ok {
- ids = append(ids, fk)
- }
- nodeids[fk] = append(nodeids[fk], nodes[i])
- }
- query.Where(group.IDIn(ids...))
- neighbors, err := query.All(ctx)
- if err != nil {
- return err
- }
- for _, n := range neighbors {
- nodes, ok := nodeids[n.ID]
- if !ok {
- return fmt.Errorf(`unexpected foreign-key "group_locations" returned %v`, n.ID)
- }
- for i := range nodes {
- assign(nodes[i], n)
- }
- }
- return nil
-}
func (lq *LocationQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*Location, init func(*Location), assign func(*Location, *Item)) error {
fks := make([]driver.Value, 0, len(nodes))
nodeids := make(map[uuid.UUID]*Location)
@@ -622,7 +646,7 @@ func (lq *LocationQuery) loadItems(ctx context.Context, query *ItemQuery, nodes
}
query.withFKs = true
query.Where(predicate.Item(func(s *sql.Selector) {
- s.Where(sql.InValues(location.ItemsColumn, fks...))
+ s.Where(sql.InValues(s.C(location.ItemsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -635,7 +659,7 @@ func (lq *LocationQuery) loadItems(ctx context.Context, query *ItemQuery, nodes
}
node, ok := nodeids[*fk]
if !ok {
- return fmt.Errorf(`unexpected foreign-key "location_items" returned %v for node %v`, *fk, n.ID)
+ return fmt.Errorf(`unexpected referenced foreign-key "location_items" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -644,41 +668,22 @@ func (lq *LocationQuery) loadItems(ctx context.Context, query *ItemQuery, nodes
func (lq *LocationQuery) sqlCount(ctx context.Context) (int, error) {
_spec := lq.querySpec()
- _spec.Node.Columns = lq.fields
- if len(lq.fields) > 0 {
- _spec.Unique = lq.unique != nil && *lq.unique
+ _spec.Node.Columns = lq.ctx.Fields
+ if len(lq.ctx.Fields) > 0 {
+ _spec.Unique = lq.ctx.Unique != nil && *lq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, lq.driver, _spec)
}
-func (lq *LocationQuery) sqlExist(ctx context.Context) (bool, error) {
- switch _, err := lq.FirstID(ctx); {
- case IsNotFound(err):
- return false, nil
- case err != nil:
- return false, fmt.Errorf("ent: check existence: %w", err)
- default:
- return true, nil
- }
-}
-
func (lq *LocationQuery) querySpec() *sqlgraph.QuerySpec {
- _spec := &sqlgraph.QuerySpec{
- Node: &sqlgraph.NodeSpec{
- Table: location.Table,
- Columns: location.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
- },
- From: lq.sql,
- Unique: true,
- }
- if unique := lq.unique; unique != nil {
+ _spec := sqlgraph.NewQuerySpec(location.Table, location.Columns, sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID))
+ _spec.From = lq.sql
+ if unique := lq.ctx.Unique; unique != nil {
_spec.Unique = *unique
+ } else if lq.path != nil {
+ _spec.Unique = true
}
- if fields := lq.fields; len(fields) > 0 {
+ if fields := lq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, location.FieldID)
for i := range fields {
@@ -694,10 +699,10 @@ func (lq *LocationQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
- if limit := lq.limit; limit != nil {
+ if limit := lq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
- if offset := lq.offset; offset != nil {
+ if offset := lq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := lq.order; len(ps) > 0 {
@@ -713,7 +718,7 @@ func (lq *LocationQuery) querySpec() *sqlgraph.QuerySpec {
func (lq *LocationQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(lq.driver.Dialect())
t1 := builder.Table(location.Table)
- columns := lq.fields
+ columns := lq.ctx.Fields
if len(columns) == 0 {
columns = location.Columns
}
@@ -722,7 +727,7 @@ func (lq *LocationQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = lq.sql
selector.Select(selector.Columns(columns...)...)
}
- if lq.unique != nil && *lq.unique {
+ if lq.ctx.Unique != nil && *lq.ctx.Unique {
selector.Distinct()
}
for _, p := range lq.predicates {
@@ -731,12 +736,12 @@ func (lq *LocationQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range lq.order {
p(selector)
}
- if offset := lq.offset; offset != nil {
+ if offset := lq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
- if limit := lq.limit; limit != nil {
+ if limit := lq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@@ -744,13 +749,8 @@ func (lq *LocationQuery) sqlQuery(ctx context.Context) *sql.Selector {
// LocationGroupBy is the group-by builder for Location entities.
type LocationGroupBy struct {
- config
selector
- fields []string
- fns []AggregateFunc
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
- path func(context.Context) (*sql.Selector, error)
+ build *LocationQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@@ -759,74 +759,77 @@ func (lgb *LocationGroupBy) Aggregate(fns ...AggregateFunc) *LocationGroupBy {
return lgb
}
-// Scan applies the group-by query and scans the result into the given value.
+// Scan applies the selector query and scans the result into the given value.
func (lgb *LocationGroupBy) Scan(ctx context.Context, v any) error {
- query, err := lgb.path(ctx)
- if err != nil {
+ ctx = setContextOp(ctx, lgb.build.ctx, "GroupBy")
+ if err := lgb.build.prepareQuery(ctx); err != nil {
return err
}
- lgb.sql = query
- return lgb.sqlScan(ctx, v)
+ return scanWithInterceptors[*LocationQuery, *LocationGroupBy](ctx, lgb.build, lgb, lgb.build.inters, v)
}
-func (lgb *LocationGroupBy) sqlScan(ctx context.Context, v any) error {
- for _, f := range lgb.fields {
- if !location.ValidColumn(f) {
- return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
- }
- }
- selector := lgb.sqlQuery()
- if err := selector.Err(); err != nil {
- return err
- }
- rows := &sql.Rows{}
- query, args := selector.Query()
- if err := lgb.driver.Query(ctx, query, args, rows); err != nil {
- return err
- }
- defer rows.Close()
- return sql.ScanSlice(rows, v)
-}
-
-func (lgb *LocationGroupBy) sqlQuery() *sql.Selector {
- selector := lgb.sql.Select()
+func (lgb *LocationGroupBy) sqlScan(ctx context.Context, root *LocationQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(lgb.fns))
for _, fn := range lgb.fns {
aggregation = append(aggregation, fn(selector))
}
- // If no columns were selected in a custom aggregation function, the default
- // selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
- columns := make([]string, 0, len(lgb.fields)+len(lgb.fns))
- for _, f := range lgb.fields {
+ columns := make([]string, 0, len(*lgb.flds)+len(lgb.fns))
+ for _, f := range *lgb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
- return selector.GroupBy(selector.Columns(lgb.fields...)...)
+ selector.GroupBy(selector.Columns(*lgb.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := lgb.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
}
// LocationSelect is the builder for selecting fields of Location entities.
type LocationSelect struct {
*LocationQuery
selector
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (ls *LocationSelect) Aggregate(fns ...AggregateFunc) *LocationSelect {
+ ls.fns = append(ls.fns, fns...)
+ return ls
}
// Scan applies the selector query and scans the result into the given value.
func (ls *LocationSelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, ls.ctx, "Select")
if err := ls.prepareQuery(ctx); err != nil {
return err
}
- ls.sql = ls.LocationQuery.sqlQuery(ctx)
- return ls.sqlScan(ctx, v)
+ return scanWithInterceptors[*LocationQuery, *LocationSelect](ctx, ls.LocationQuery, ls, ls.inters, v)
}
-func (ls *LocationSelect) sqlScan(ctx context.Context, v any) error {
+func (ls *LocationSelect) sqlScan(ctx context.Context, root *LocationQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(ls.fns))
+ for _, fn := range ls.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*ls.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
rows := &sql.Rows{}
- query, args := ls.sql.Query()
+ query, args := selector.Query()
if err := ls.driver.Query(ctx, query, args, rows); err != nil {
return err
}
diff --git a/backend/internal/data/ent/location_update.go b/backend/internal/data/ent/location_update.go
index eaeb530..d569b21 100644
--- a/backend/internal/data/ent/location_update.go
+++ b/backend/internal/data/ent/location_update.go
@@ -43,6 +43,14 @@ func (lu *LocationUpdate) SetName(s string) *LocationUpdate {
return lu
}
+// SetNillableName sets the "name" field if the given value is not nil.
+func (lu *LocationUpdate) SetNillableName(s *string) *LocationUpdate {
+ if s != nil {
+ lu.SetName(*s)
+ }
+ return lu
+}
+
// SetDescription sets the "description" field.
func (lu *LocationUpdate) SetDescription(s string) *LocationUpdate {
lu.mutation.SetDescription(s)
@@ -63,6 +71,17 @@ func (lu *LocationUpdate) ClearDescription() *LocationUpdate {
return lu
}
+// SetGroupID sets the "group" edge to the Group entity by ID.
+func (lu *LocationUpdate) SetGroupID(id uuid.UUID) *LocationUpdate {
+ lu.mutation.SetGroupID(id)
+ return lu
+}
+
+// SetGroup sets the "group" edge to the Group entity.
+func (lu *LocationUpdate) SetGroup(g *Group) *LocationUpdate {
+ return lu.SetGroupID(g.ID)
+}
+
// SetParentID sets the "parent" edge to the Location entity by ID.
func (lu *LocationUpdate) SetParentID(id uuid.UUID) *LocationUpdate {
lu.mutation.SetParentID(id)
@@ -97,17 +116,6 @@ func (lu *LocationUpdate) AddChildren(l ...*Location) *LocationUpdate {
return lu.AddChildIDs(ids...)
}
-// SetGroupID sets the "group" edge to the Group entity by ID.
-func (lu *LocationUpdate) SetGroupID(id uuid.UUID) *LocationUpdate {
- lu.mutation.SetGroupID(id)
- return lu
-}
-
-// SetGroup sets the "group" edge to the Group entity.
-func (lu *LocationUpdate) SetGroup(g *Group) *LocationUpdate {
- return lu.SetGroupID(g.ID)
-}
-
// AddItemIDs adds the "items" edge to the Item entity by IDs.
func (lu *LocationUpdate) AddItemIDs(ids ...uuid.UUID) *LocationUpdate {
lu.mutation.AddItemIDs(ids...)
@@ -128,6 +136,12 @@ func (lu *LocationUpdate) Mutation() *LocationMutation {
return lu.mutation
}
+// ClearGroup clears the "group" edge to the Group entity.
+func (lu *LocationUpdate) ClearGroup() *LocationUpdate {
+ lu.mutation.ClearGroup()
+ return lu
+}
+
// ClearParent clears the "parent" edge to the Location entity.
func (lu *LocationUpdate) ClearParent() *LocationUpdate {
lu.mutation.ClearParent()
@@ -155,12 +169,6 @@ func (lu *LocationUpdate) RemoveChildren(l ...*Location) *LocationUpdate {
return lu.RemoveChildIDs(ids...)
}
-// ClearGroup clears the "group" edge to the Group entity.
-func (lu *LocationUpdate) ClearGroup() *LocationUpdate {
- lu.mutation.ClearGroup()
- return lu
-}
-
// ClearItems clears all "items" edges to the Item entity.
func (lu *LocationUpdate) ClearItems() *LocationUpdate {
lu.mutation.ClearItems()
@@ -184,41 +192,8 @@ func (lu *LocationUpdate) RemoveItems(i ...*Item) *LocationUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (lu *LocationUpdate) Save(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
lu.defaults()
- if len(lu.hooks) == 0 {
- if err = lu.check(); err != nil {
- return 0, err
- }
- affected, err = lu.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*LocationMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = lu.check(); err != nil {
- return 0, err
- }
- lu.mutation = mutation
- affected, err = lu.sqlSave(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(lu.hooks) - 1; i >= 0; i-- {
- if lu.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = lu.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, lu.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, lu.sqlSave, lu.mutation, lu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -270,16 +245,10 @@ func (lu *LocationUpdate) check() error {
}
func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: location.Table,
- Columns: location.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
- },
+ if err := lu.check(); err != nil {
+ return n, err
}
+ _spec := sqlgraph.NewUpdateSpec(location.Table, location.Columns, sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID))
if ps := lu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -288,31 +257,45 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := lu.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: location.FieldUpdatedAt,
- })
+ _spec.SetField(location.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := lu.mutation.Name(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: location.FieldName,
- })
+ _spec.SetField(location.FieldName, field.TypeString, value)
}
if value, ok := lu.mutation.Description(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: location.FieldDescription,
- })
+ _spec.SetField(location.FieldDescription, field.TypeString, value)
}
if lu.mutation.DescriptionCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: location.FieldDescription,
- })
+ _spec.ClearField(location.FieldDescription, field.TypeString)
+ }
+ if lu.mutation.GroupCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: location.GroupTable,
+ Columns: []string{location.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := lu.mutation.GroupIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: location.GroupTable,
+ Columns: []string{location.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if lu.mutation.ParentCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -322,10 +305,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{location.ParentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -338,10 +318,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{location.ParentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -357,10 +334,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{location.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -373,10 +347,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{location.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -392,45 +363,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{location.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _spec.Edges.Add = append(_spec.Edges.Add, edge)
- }
- if lu.mutation.GroupCleared() {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: location.GroupTable,
- Columns: []string{location.GroupColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
- },
- }
- _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
- }
- if nodes := lu.mutation.GroupIDs(); len(nodes) > 0 {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: location.GroupTable,
- Columns: []string{location.GroupColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -446,10 +379,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{location.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -462,10 +392,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{location.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -481,10 +408,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{location.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -500,6 +424,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
+ lu.mutation.done = true
return n, nil
}
@@ -523,6 +448,14 @@ func (luo *LocationUpdateOne) SetName(s string) *LocationUpdateOne {
return luo
}
+// SetNillableName sets the "name" field if the given value is not nil.
+func (luo *LocationUpdateOne) SetNillableName(s *string) *LocationUpdateOne {
+ if s != nil {
+ luo.SetName(*s)
+ }
+ return luo
+}
+
// SetDescription sets the "description" field.
func (luo *LocationUpdateOne) SetDescription(s string) *LocationUpdateOne {
luo.mutation.SetDescription(s)
@@ -543,6 +476,17 @@ func (luo *LocationUpdateOne) ClearDescription() *LocationUpdateOne {
return luo
}
+// SetGroupID sets the "group" edge to the Group entity by ID.
+func (luo *LocationUpdateOne) SetGroupID(id uuid.UUID) *LocationUpdateOne {
+ luo.mutation.SetGroupID(id)
+ return luo
+}
+
+// SetGroup sets the "group" edge to the Group entity.
+func (luo *LocationUpdateOne) SetGroup(g *Group) *LocationUpdateOne {
+ return luo.SetGroupID(g.ID)
+}
+
// SetParentID sets the "parent" edge to the Location entity by ID.
func (luo *LocationUpdateOne) SetParentID(id uuid.UUID) *LocationUpdateOne {
luo.mutation.SetParentID(id)
@@ -577,17 +521,6 @@ func (luo *LocationUpdateOne) AddChildren(l ...*Location) *LocationUpdateOne {
return luo.AddChildIDs(ids...)
}
-// SetGroupID sets the "group" edge to the Group entity by ID.
-func (luo *LocationUpdateOne) SetGroupID(id uuid.UUID) *LocationUpdateOne {
- luo.mutation.SetGroupID(id)
- return luo
-}
-
-// SetGroup sets the "group" edge to the Group entity.
-func (luo *LocationUpdateOne) SetGroup(g *Group) *LocationUpdateOne {
- return luo.SetGroupID(g.ID)
-}
-
// AddItemIDs adds the "items" edge to the Item entity by IDs.
func (luo *LocationUpdateOne) AddItemIDs(ids ...uuid.UUID) *LocationUpdateOne {
luo.mutation.AddItemIDs(ids...)
@@ -608,6 +541,12 @@ func (luo *LocationUpdateOne) Mutation() *LocationMutation {
return luo.mutation
}
+// ClearGroup clears the "group" edge to the Group entity.
+func (luo *LocationUpdateOne) ClearGroup() *LocationUpdateOne {
+ luo.mutation.ClearGroup()
+ return luo
+}
+
// ClearParent clears the "parent" edge to the Location entity.
func (luo *LocationUpdateOne) ClearParent() *LocationUpdateOne {
luo.mutation.ClearParent()
@@ -635,12 +574,6 @@ func (luo *LocationUpdateOne) RemoveChildren(l ...*Location) *LocationUpdateOne
return luo.RemoveChildIDs(ids...)
}
-// ClearGroup clears the "group" edge to the Group entity.
-func (luo *LocationUpdateOne) ClearGroup() *LocationUpdateOne {
- luo.mutation.ClearGroup()
- return luo
-}
-
// ClearItems clears all "items" edges to the Item entity.
func (luo *LocationUpdateOne) ClearItems() *LocationUpdateOne {
luo.mutation.ClearItems()
@@ -662,6 +595,12 @@ func (luo *LocationUpdateOne) RemoveItems(i ...*Item) *LocationUpdateOne {
return luo.RemoveItemIDs(ids...)
}
+// Where appends a list predicates to the LocationUpdate builder.
+func (luo *LocationUpdateOne) Where(ps ...predicate.Location) *LocationUpdateOne {
+ luo.mutation.Where(ps...)
+ return luo
+}
+
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (luo *LocationUpdateOne) Select(field string, fields ...string) *LocationUpdateOne {
@@ -671,47 +610,8 @@ func (luo *LocationUpdateOne) Select(field string, fields ...string) *LocationUp
// Save executes the query and returns the updated Location entity.
func (luo *LocationUpdateOne) Save(ctx context.Context) (*Location, error) {
- var (
- err error
- node *Location
- )
luo.defaults()
- if len(luo.hooks) == 0 {
- if err = luo.check(); err != nil {
- return nil, err
- }
- node, err = luo.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*LocationMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = luo.check(); err != nil {
- return nil, err
- }
- luo.mutation = mutation
- node, err = luo.sqlSave(ctx)
- mutation.done = true
- return node, err
- })
- for i := len(luo.hooks) - 1; i >= 0; i-- {
- if luo.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = luo.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, luo.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*Location)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from LocationMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, luo.sqlSave, luo.mutation, luo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -763,16 +663,10 @@ func (luo *LocationUpdateOne) check() error {
}
func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: location.Table,
- Columns: location.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
- },
+ if err := luo.check(); err != nil {
+ return _node, err
}
+ _spec := sqlgraph.NewUpdateSpec(location.Table, location.Columns, sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID))
id, ok := luo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Location.id" for update`)}
@@ -798,31 +692,45 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
}
}
if value, ok := luo.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: location.FieldUpdatedAt,
- })
+ _spec.SetField(location.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := luo.mutation.Name(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: location.FieldName,
- })
+ _spec.SetField(location.FieldName, field.TypeString, value)
}
if value, ok := luo.mutation.Description(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: location.FieldDescription,
- })
+ _spec.SetField(location.FieldDescription, field.TypeString, value)
}
if luo.mutation.DescriptionCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Column: location.FieldDescription,
- })
+ _spec.ClearField(location.FieldDescription, field.TypeString)
+ }
+ if luo.mutation.GroupCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: location.GroupTable,
+ Columns: []string{location.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := luo.mutation.GroupIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: location.GroupTable,
+ Columns: []string{location.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if luo.mutation.ParentCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -832,10 +740,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
Columns: []string{location.ParentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -848,10 +753,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
Columns: []string{location.ParentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -867,10 +769,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
Columns: []string{location.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -883,10 +782,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
Columns: []string{location.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -902,45 +798,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
Columns: []string{location.ChildrenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: location.FieldID,
- },
- },
- }
- for _, k := range nodes {
- edge.Target.Nodes = append(edge.Target.Nodes, k)
- }
- _spec.Edges.Add = append(_spec.Edges.Add, edge)
- }
- if luo.mutation.GroupCleared() {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: location.GroupTable,
- Columns: []string{location.GroupColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
- },
- }
- _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
- }
- if nodes := luo.mutation.GroupIDs(); len(nodes) > 0 {
- edge := &sqlgraph.EdgeSpec{
- Rel: sqlgraph.M2O,
- Inverse: true,
- Table: location.GroupTable,
- Columns: []string{location.GroupColumn},
- Bidi: false,
- Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -956,10 +814,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
Columns: []string{location.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -972,10 +827,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
Columns: []string{location.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -991,10 +843,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
Columns: []string{location.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: item.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1013,5 +862,6 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
}
return nil, err
}
+ luo.mutation.done = true
return _node, nil
}
diff --git a/backend/internal/data/ent/maintenanceentry.go b/backend/internal/data/ent/maintenanceentry.go
new file mode 100644
index 0000000..af35e0b
--- /dev/null
+++ b/backend/internal/data/ent/maintenanceentry.go
@@ -0,0 +1,217 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "entgo.io/ent"
+ "entgo.io/ent/dialect/sql"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/item"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
+)
+
+// MaintenanceEntry is the model entity for the MaintenanceEntry schema.
+type MaintenanceEntry struct {
+ config `json:"-"`
+ // ID of the ent.
+ ID uuid.UUID `json:"id,omitempty"`
+ // CreatedAt holds the value of the "created_at" field.
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ // UpdatedAt holds the value of the "updated_at" field.
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+ // ItemID holds the value of the "item_id" field.
+ ItemID uuid.UUID `json:"item_id,omitempty"`
+ // Date holds the value of the "date" field.
+ Date time.Time `json:"date,omitempty"`
+ // ScheduledDate holds the value of the "scheduled_date" field.
+ ScheduledDate time.Time `json:"scheduled_date,omitempty"`
+ // Name holds the value of the "name" field.
+ Name string `json:"name,omitempty"`
+ // Description holds the value of the "description" field.
+ Description string `json:"description,omitempty"`
+ // Cost holds the value of the "cost" field.
+ Cost float64 `json:"cost,omitempty"`
+ // Edges holds the relations/edges for other nodes in the graph.
+ // The values are being populated by the MaintenanceEntryQuery when eager-loading is set.
+ Edges MaintenanceEntryEdges `json:"edges"`
+ selectValues sql.SelectValues
+}
+
+// MaintenanceEntryEdges holds the relations/edges for other nodes in the graph.
+type MaintenanceEntryEdges struct {
+ // Item holds the value of the item edge.
+ Item *Item `json:"item,omitempty"`
+ // loadedTypes holds the information for reporting if a
+ // type was loaded (or requested) in eager-loading or not.
+ loadedTypes [1]bool
+}
+
+// ItemOrErr returns the Item value or an error if the edge
+// was not loaded in eager-loading, or loaded but was not found.
+func (e MaintenanceEntryEdges) ItemOrErr() (*Item, error) {
+ if e.loadedTypes[0] {
+ if e.Item == nil {
+ // Edge was loaded but was not found.
+ return nil, &NotFoundError{label: item.Label}
+ }
+ return e.Item, nil
+ }
+ return nil, &NotLoadedError{edge: "item"}
+}
+
+// scanValues returns the types for scanning values from sql.Rows.
+func (*MaintenanceEntry) scanValues(columns []string) ([]any, error) {
+ values := make([]any, len(columns))
+ for i := range columns {
+ switch columns[i] {
+ case maintenanceentry.FieldCost:
+ values[i] = new(sql.NullFloat64)
+ case maintenanceentry.FieldName, maintenanceentry.FieldDescription:
+ values[i] = new(sql.NullString)
+ case maintenanceentry.FieldCreatedAt, maintenanceentry.FieldUpdatedAt, maintenanceentry.FieldDate, maintenanceentry.FieldScheduledDate:
+ values[i] = new(sql.NullTime)
+ case maintenanceentry.FieldID, maintenanceentry.FieldItemID:
+ values[i] = new(uuid.UUID)
+ default:
+ values[i] = new(sql.UnknownType)
+ }
+ }
+ return values, nil
+}
+
+// assignValues assigns the values that were returned from sql.Rows (after scanning)
+// to the MaintenanceEntry fields.
+func (me *MaintenanceEntry) assignValues(columns []string, values []any) error {
+ if m, n := len(values), len(columns); m < n {
+ return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
+ }
+ for i := range columns {
+ switch columns[i] {
+ case maintenanceentry.FieldID:
+ if value, ok := values[i].(*uuid.UUID); !ok {
+ return fmt.Errorf("unexpected type %T for field id", values[i])
+ } else if value != nil {
+ me.ID = *value
+ }
+ case maintenanceentry.FieldCreatedAt:
+ if value, ok := values[i].(*sql.NullTime); !ok {
+ return fmt.Errorf("unexpected type %T for field created_at", values[i])
+ } else if value.Valid {
+ me.CreatedAt = value.Time
+ }
+ case maintenanceentry.FieldUpdatedAt:
+ if value, ok := values[i].(*sql.NullTime); !ok {
+ return fmt.Errorf("unexpected type %T for field updated_at", values[i])
+ } else if value.Valid {
+ me.UpdatedAt = value.Time
+ }
+ case maintenanceentry.FieldItemID:
+ if value, ok := values[i].(*uuid.UUID); !ok {
+ return fmt.Errorf("unexpected type %T for field item_id", values[i])
+ } else if value != nil {
+ me.ItemID = *value
+ }
+ case maintenanceentry.FieldDate:
+ if value, ok := values[i].(*sql.NullTime); !ok {
+ return fmt.Errorf("unexpected type %T for field date", values[i])
+ } else if value.Valid {
+ me.Date = value.Time
+ }
+ case maintenanceentry.FieldScheduledDate:
+ if value, ok := values[i].(*sql.NullTime); !ok {
+ return fmt.Errorf("unexpected type %T for field scheduled_date", values[i])
+ } else if value.Valid {
+ me.ScheduledDate = value.Time
+ }
+ case maintenanceentry.FieldName:
+ if value, ok := values[i].(*sql.NullString); !ok {
+ return fmt.Errorf("unexpected type %T for field name", values[i])
+ } else if value.Valid {
+ me.Name = value.String
+ }
+ case maintenanceentry.FieldDescription:
+ if value, ok := values[i].(*sql.NullString); !ok {
+ return fmt.Errorf("unexpected type %T for field description", values[i])
+ } else if value.Valid {
+ me.Description = value.String
+ }
+ case maintenanceentry.FieldCost:
+ if value, ok := values[i].(*sql.NullFloat64); !ok {
+ return fmt.Errorf("unexpected type %T for field cost", values[i])
+ } else if value.Valid {
+ me.Cost = value.Float64
+ }
+ default:
+ me.selectValues.Set(columns[i], values[i])
+ }
+ }
+ return nil
+}
+
+// Value returns the ent.Value that was dynamically selected and assigned to the MaintenanceEntry.
+// This includes values selected through modifiers, order, etc.
+func (me *MaintenanceEntry) Value(name string) (ent.Value, error) {
+ return me.selectValues.Get(name)
+}
+
+// QueryItem queries the "item" edge of the MaintenanceEntry entity.
+func (me *MaintenanceEntry) QueryItem() *ItemQuery {
+ return NewMaintenanceEntryClient(me.config).QueryItem(me)
+}
+
+// Update returns a builder for updating this MaintenanceEntry.
+// Note that you need to call MaintenanceEntry.Unwrap() before calling this method if this MaintenanceEntry
+// was returned from a transaction, and the transaction was committed or rolled back.
+func (me *MaintenanceEntry) Update() *MaintenanceEntryUpdateOne {
+ return NewMaintenanceEntryClient(me.config).UpdateOne(me)
+}
+
+// Unwrap unwraps the MaintenanceEntry entity that was returned from a transaction after it was closed,
+// so that all future queries will be executed through the driver which created the transaction.
+func (me *MaintenanceEntry) Unwrap() *MaintenanceEntry {
+ _tx, ok := me.config.driver.(*txDriver)
+ if !ok {
+ panic("ent: MaintenanceEntry is not a transactional entity")
+ }
+ me.config.driver = _tx.drv
+ return me
+}
+
+// String implements the fmt.Stringer.
+func (me *MaintenanceEntry) String() string {
+ var builder strings.Builder
+ builder.WriteString("MaintenanceEntry(")
+ builder.WriteString(fmt.Sprintf("id=%v, ", me.ID))
+ builder.WriteString("created_at=")
+ builder.WriteString(me.CreatedAt.Format(time.ANSIC))
+ builder.WriteString(", ")
+ builder.WriteString("updated_at=")
+ builder.WriteString(me.UpdatedAt.Format(time.ANSIC))
+ builder.WriteString(", ")
+ builder.WriteString("item_id=")
+ builder.WriteString(fmt.Sprintf("%v", me.ItemID))
+ builder.WriteString(", ")
+ builder.WriteString("date=")
+ builder.WriteString(me.Date.Format(time.ANSIC))
+ builder.WriteString(", ")
+ builder.WriteString("scheduled_date=")
+ builder.WriteString(me.ScheduledDate.Format(time.ANSIC))
+ builder.WriteString(", ")
+ builder.WriteString("name=")
+ builder.WriteString(me.Name)
+ builder.WriteString(", ")
+ builder.WriteString("description=")
+ builder.WriteString(me.Description)
+ builder.WriteString(", ")
+ builder.WriteString("cost=")
+ builder.WriteString(fmt.Sprintf("%v", me.Cost))
+ builder.WriteByte(')')
+ return builder.String()
+}
+
+// MaintenanceEntries is a parsable slice of MaintenanceEntry.
+type MaintenanceEntries []*MaintenanceEntry
diff --git a/backend/internal/data/ent/maintenanceentry/maintenanceentry.go b/backend/internal/data/ent/maintenanceentry/maintenanceentry.go
new file mode 100644
index 0000000..b4b8142
--- /dev/null
+++ b/backend/internal/data/ent/maintenanceentry/maintenanceentry.go
@@ -0,0 +1,147 @@
+// Code generated by ent, DO NOT EDIT.
+
+package maintenanceentry
+
+import (
+ "time"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "github.com/google/uuid"
+)
+
+const (
+ // Label holds the string label denoting the maintenanceentry type in the database.
+ Label = "maintenance_entry"
+ // FieldID holds the string denoting the id field in the database.
+ FieldID = "id"
+ // FieldCreatedAt holds the string denoting the created_at field in the database.
+ FieldCreatedAt = "created_at"
+ // FieldUpdatedAt holds the string denoting the updated_at field in the database.
+ FieldUpdatedAt = "updated_at"
+ // FieldItemID holds the string denoting the item_id field in the database.
+ FieldItemID = "item_id"
+ // FieldDate holds the string denoting the date field in the database.
+ FieldDate = "date"
+ // FieldScheduledDate holds the string denoting the scheduled_date field in the database.
+ FieldScheduledDate = "scheduled_date"
+ // FieldName holds the string denoting the name field in the database.
+ FieldName = "name"
+ // FieldDescription holds the string denoting the description field in the database.
+ FieldDescription = "description"
+ // FieldCost holds the string denoting the cost field in the database.
+ FieldCost = "cost"
+ // EdgeItem holds the string denoting the item edge name in mutations.
+ EdgeItem = "item"
+ // Table holds the table name of the maintenanceentry in the database.
+ Table = "maintenance_entries"
+ // ItemTable is the table that holds the item relation/edge.
+ ItemTable = "maintenance_entries"
+ // ItemInverseTable is the table name for the Item entity.
+ // It exists in this package in order to avoid circular dependency with the "item" package.
+ ItemInverseTable = "items"
+ // ItemColumn is the table column denoting the item relation/edge.
+ ItemColumn = "item_id"
+)
+
+// Columns holds all SQL columns for maintenanceentry fields.
+var Columns = []string{
+ FieldID,
+ FieldCreatedAt,
+ FieldUpdatedAt,
+ FieldItemID,
+ FieldDate,
+ FieldScheduledDate,
+ FieldName,
+ FieldDescription,
+ FieldCost,
+}
+
+// ValidColumn reports if the column name is valid (part of the table columns).
+func ValidColumn(column string) bool {
+ for i := range Columns {
+ if column == Columns[i] {
+ return true
+ }
+ }
+ return false
+}
+
+var (
+ // DefaultCreatedAt holds the default value on creation for the "created_at" field.
+ DefaultCreatedAt func() time.Time
+ // DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
+ DefaultUpdatedAt func() time.Time
+ // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
+ UpdateDefaultUpdatedAt func() time.Time
+ // NameValidator is a validator for the "name" field. It is called by the builders before save.
+ NameValidator func(string) error
+ // DescriptionValidator is a validator for the "description" field. It is called by the builders before save.
+ DescriptionValidator func(string) error
+ // DefaultCost holds the default value on creation for the "cost" field.
+ DefaultCost float64
+ // DefaultID holds the default value on creation for the "id" field.
+ DefaultID func() uuid.UUID
+)
+
+// OrderOption defines the ordering options for the MaintenanceEntry queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByItemID orders the results by the item_id field.
+func ByItemID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldItemID, opts...).ToFunc()
+}
+
+// ByDate orders the results by the date field.
+func ByDate(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldDate, opts...).ToFunc()
+}
+
+// ByScheduledDate orders the results by the scheduled_date field.
+func ByScheduledDate(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldScheduledDate, opts...).ToFunc()
+}
+
+// ByName orders the results by the name field.
+func ByName(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldName, opts...).ToFunc()
+}
+
+// ByDescription orders the results by the description field.
+func ByDescription(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldDescription, opts...).ToFunc()
+}
+
+// ByCost orders the results by the cost field.
+func ByCost(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCost, opts...).ToFunc()
+}
+
+// ByItemField orders the results by item field.
+func ByItemField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newItemStep(), sql.OrderByField(field, opts...))
+ }
+}
+func newItemStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(ItemInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
+ )
+}
diff --git a/backend/internal/data/ent/maintenanceentry/where.go b/backend/internal/data/ent/maintenanceentry/where.go
new file mode 100644
index 0000000..85e736d
--- /dev/null
+++ b/backend/internal/data/ent/maintenanceentry/where.go
@@ -0,0 +1,515 @@
+// Code generated by ent, DO NOT EDIT.
+
+package maintenanceentry
+
+import (
+ "time"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
+)
+
+// ID filters vertices based on their ID field.
+func ID(id uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldID, id))
+}
+
+// IDEQ applies the EQ predicate on the ID field.
+func IDEQ(id uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldID, id))
+}
+
+// IDNEQ applies the NEQ predicate on the ID field.
+func IDNEQ(id uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNEQ(FieldID, id))
+}
+
+// IDIn applies the In predicate on the ID field.
+func IDIn(ids ...uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldIn(FieldID, ids...))
+}
+
+// IDNotIn applies the NotIn predicate on the ID field.
+func IDNotIn(ids ...uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNotIn(FieldID, ids...))
+}
+
+// IDGT applies the GT predicate on the ID field.
+func IDGT(id uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGT(FieldID, id))
+}
+
+// IDGTE applies the GTE predicate on the ID field.
+func IDGTE(id uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGTE(FieldID, id))
+}
+
+// IDLT applies the LT predicate on the ID field.
+func IDLT(id uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLT(FieldID, id))
+}
+
+// IDLTE applies the LTE predicate on the ID field.
+func IDLTE(id uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLTE(FieldID, id))
+}
+
+// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
+func CreatedAt(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldCreatedAt, v))
+}
+
+// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
+func UpdatedAt(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldUpdatedAt, v))
+}
+
+// ItemID applies equality check predicate on the "item_id" field. It's identical to ItemIDEQ.
+func ItemID(v uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldItemID, v))
+}
+
+// Date applies equality check predicate on the "date" field. It's identical to DateEQ.
+func Date(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldDate, v))
+}
+
+// ScheduledDate applies equality check predicate on the "scheduled_date" field. It's identical to ScheduledDateEQ.
+func ScheduledDate(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldScheduledDate, v))
+}
+
+// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
+func Name(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldName, v))
+}
+
+// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ.
+func Description(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldDescription, v))
+}
+
+// Cost applies equality check predicate on the "cost" field. It's identical to CostEQ.
+func Cost(v float64) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldCost, v))
+}
+
+// CreatedAtEQ applies the EQ predicate on the "created_at" field.
+func CreatedAtEQ(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldCreatedAt, v))
+}
+
+// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
+func CreatedAtNEQ(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNEQ(FieldCreatedAt, v))
+}
+
+// CreatedAtIn applies the In predicate on the "created_at" field.
+func CreatedAtIn(vs ...time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldIn(FieldCreatedAt, vs...))
+}
+
+// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
+func CreatedAtNotIn(vs ...time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNotIn(FieldCreatedAt, vs...))
+}
+
+// CreatedAtGT applies the GT predicate on the "created_at" field.
+func CreatedAtGT(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGT(FieldCreatedAt, v))
+}
+
+// CreatedAtGTE applies the GTE predicate on the "created_at" field.
+func CreatedAtGTE(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGTE(FieldCreatedAt, v))
+}
+
+// CreatedAtLT applies the LT predicate on the "created_at" field.
+func CreatedAtLT(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLT(FieldCreatedAt, v))
+}
+
+// CreatedAtLTE applies the LTE predicate on the "created_at" field.
+func CreatedAtLTE(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLTE(FieldCreatedAt, v))
+}
+
+// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
+func UpdatedAtEQ(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldUpdatedAt, v))
+}
+
+// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
+func UpdatedAtNEQ(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNEQ(FieldUpdatedAt, v))
+}
+
+// UpdatedAtIn applies the In predicate on the "updated_at" field.
+func UpdatedAtIn(vs ...time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldIn(FieldUpdatedAt, vs...))
+}
+
+// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
+func UpdatedAtNotIn(vs ...time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNotIn(FieldUpdatedAt, vs...))
+}
+
+// UpdatedAtGT applies the GT predicate on the "updated_at" field.
+func UpdatedAtGT(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGT(FieldUpdatedAt, v))
+}
+
+// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
+func UpdatedAtGTE(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGTE(FieldUpdatedAt, v))
+}
+
+// UpdatedAtLT applies the LT predicate on the "updated_at" field.
+func UpdatedAtLT(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLT(FieldUpdatedAt, v))
+}
+
+// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
+func UpdatedAtLTE(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLTE(FieldUpdatedAt, v))
+}
+
+// ItemIDEQ applies the EQ predicate on the "item_id" field.
+func ItemIDEQ(v uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldItemID, v))
+}
+
+// ItemIDNEQ applies the NEQ predicate on the "item_id" field.
+func ItemIDNEQ(v uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNEQ(FieldItemID, v))
+}
+
+// ItemIDIn applies the In predicate on the "item_id" field.
+func ItemIDIn(vs ...uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldIn(FieldItemID, vs...))
+}
+
+// ItemIDNotIn applies the NotIn predicate on the "item_id" field.
+func ItemIDNotIn(vs ...uuid.UUID) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNotIn(FieldItemID, vs...))
+}
+
+// DateEQ applies the EQ predicate on the "date" field.
+func DateEQ(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldDate, v))
+}
+
+// DateNEQ applies the NEQ predicate on the "date" field.
+func DateNEQ(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNEQ(FieldDate, v))
+}
+
+// DateIn applies the In predicate on the "date" field.
+func DateIn(vs ...time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldIn(FieldDate, vs...))
+}
+
+// DateNotIn applies the NotIn predicate on the "date" field.
+func DateNotIn(vs ...time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNotIn(FieldDate, vs...))
+}
+
+// DateGT applies the GT predicate on the "date" field.
+func DateGT(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGT(FieldDate, v))
+}
+
+// DateGTE applies the GTE predicate on the "date" field.
+func DateGTE(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGTE(FieldDate, v))
+}
+
+// DateLT applies the LT predicate on the "date" field.
+func DateLT(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLT(FieldDate, v))
+}
+
+// DateLTE applies the LTE predicate on the "date" field.
+func DateLTE(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLTE(FieldDate, v))
+}
+
+// DateIsNil applies the IsNil predicate on the "date" field.
+func DateIsNil() predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldIsNull(FieldDate))
+}
+
+// DateNotNil applies the NotNil predicate on the "date" field.
+func DateNotNil() predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNotNull(FieldDate))
+}
+
+// ScheduledDateEQ applies the EQ predicate on the "scheduled_date" field.
+func ScheduledDateEQ(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldScheduledDate, v))
+}
+
+// ScheduledDateNEQ applies the NEQ predicate on the "scheduled_date" field.
+func ScheduledDateNEQ(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNEQ(FieldScheduledDate, v))
+}
+
+// ScheduledDateIn applies the In predicate on the "scheduled_date" field.
+func ScheduledDateIn(vs ...time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldIn(FieldScheduledDate, vs...))
+}
+
+// ScheduledDateNotIn applies the NotIn predicate on the "scheduled_date" field.
+func ScheduledDateNotIn(vs ...time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNotIn(FieldScheduledDate, vs...))
+}
+
+// ScheduledDateGT applies the GT predicate on the "scheduled_date" field.
+func ScheduledDateGT(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGT(FieldScheduledDate, v))
+}
+
+// ScheduledDateGTE applies the GTE predicate on the "scheduled_date" field.
+func ScheduledDateGTE(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGTE(FieldScheduledDate, v))
+}
+
+// ScheduledDateLT applies the LT predicate on the "scheduled_date" field.
+func ScheduledDateLT(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLT(FieldScheduledDate, v))
+}
+
+// ScheduledDateLTE applies the LTE predicate on the "scheduled_date" field.
+func ScheduledDateLTE(v time.Time) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLTE(FieldScheduledDate, v))
+}
+
+// ScheduledDateIsNil applies the IsNil predicate on the "scheduled_date" field.
+func ScheduledDateIsNil() predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldIsNull(FieldScheduledDate))
+}
+
+// ScheduledDateNotNil applies the NotNil predicate on the "scheduled_date" field.
+func ScheduledDateNotNil() predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNotNull(FieldScheduledDate))
+}
+
+// NameEQ applies the EQ predicate on the "name" field.
+func NameEQ(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldName, v))
+}
+
+// NameNEQ applies the NEQ predicate on the "name" field.
+func NameNEQ(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNEQ(FieldName, v))
+}
+
+// NameIn applies the In predicate on the "name" field.
+func NameIn(vs ...string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldIn(FieldName, vs...))
+}
+
+// NameNotIn applies the NotIn predicate on the "name" field.
+func NameNotIn(vs ...string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNotIn(FieldName, vs...))
+}
+
+// NameGT applies the GT predicate on the "name" field.
+func NameGT(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGT(FieldName, v))
+}
+
+// NameGTE applies the GTE predicate on the "name" field.
+func NameGTE(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGTE(FieldName, v))
+}
+
+// NameLT applies the LT predicate on the "name" field.
+func NameLT(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLT(FieldName, v))
+}
+
+// NameLTE applies the LTE predicate on the "name" field.
+func NameLTE(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLTE(FieldName, v))
+}
+
+// NameContains applies the Contains predicate on the "name" field.
+func NameContains(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldContains(FieldName, v))
+}
+
+// NameHasPrefix applies the HasPrefix predicate on the "name" field.
+func NameHasPrefix(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldHasPrefix(FieldName, v))
+}
+
+// NameHasSuffix applies the HasSuffix predicate on the "name" field.
+func NameHasSuffix(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldHasSuffix(FieldName, v))
+}
+
+// NameEqualFold applies the EqualFold predicate on the "name" field.
+func NameEqualFold(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEqualFold(FieldName, v))
+}
+
+// NameContainsFold applies the ContainsFold predicate on the "name" field.
+func NameContainsFold(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldContainsFold(FieldName, v))
+}
+
+// DescriptionEQ applies the EQ predicate on the "description" field.
+func DescriptionEQ(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldDescription, v))
+}
+
+// DescriptionNEQ applies the NEQ predicate on the "description" field.
+func DescriptionNEQ(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNEQ(FieldDescription, v))
+}
+
+// DescriptionIn applies the In predicate on the "description" field.
+func DescriptionIn(vs ...string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldIn(FieldDescription, vs...))
+}
+
+// DescriptionNotIn applies the NotIn predicate on the "description" field.
+func DescriptionNotIn(vs ...string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNotIn(FieldDescription, vs...))
+}
+
+// DescriptionGT applies the GT predicate on the "description" field.
+func DescriptionGT(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGT(FieldDescription, v))
+}
+
+// DescriptionGTE applies the GTE predicate on the "description" field.
+func DescriptionGTE(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGTE(FieldDescription, v))
+}
+
+// DescriptionLT applies the LT predicate on the "description" field.
+func DescriptionLT(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLT(FieldDescription, v))
+}
+
+// DescriptionLTE applies the LTE predicate on the "description" field.
+func DescriptionLTE(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLTE(FieldDescription, v))
+}
+
+// DescriptionContains applies the Contains predicate on the "description" field.
+func DescriptionContains(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldContains(FieldDescription, v))
+}
+
+// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field.
+func DescriptionHasPrefix(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldHasPrefix(FieldDescription, v))
+}
+
+// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field.
+func DescriptionHasSuffix(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldHasSuffix(FieldDescription, v))
+}
+
+// DescriptionIsNil applies the IsNil predicate on the "description" field.
+func DescriptionIsNil() predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldIsNull(FieldDescription))
+}
+
+// DescriptionNotNil applies the NotNil predicate on the "description" field.
+func DescriptionNotNil() predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNotNull(FieldDescription))
+}
+
+// DescriptionEqualFold applies the EqualFold predicate on the "description" field.
+func DescriptionEqualFold(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEqualFold(FieldDescription, v))
+}
+
+// DescriptionContainsFold applies the ContainsFold predicate on the "description" field.
+func DescriptionContainsFold(v string) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldContainsFold(FieldDescription, v))
+}
+
+// CostEQ applies the EQ predicate on the "cost" field.
+func CostEQ(v float64) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldEQ(FieldCost, v))
+}
+
+// CostNEQ applies the NEQ predicate on the "cost" field.
+func CostNEQ(v float64) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNEQ(FieldCost, v))
+}
+
+// CostIn applies the In predicate on the "cost" field.
+func CostIn(vs ...float64) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldIn(FieldCost, vs...))
+}
+
+// CostNotIn applies the NotIn predicate on the "cost" field.
+func CostNotIn(vs ...float64) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldNotIn(FieldCost, vs...))
+}
+
+// CostGT applies the GT predicate on the "cost" field.
+func CostGT(v float64) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGT(FieldCost, v))
+}
+
+// CostGTE applies the GTE predicate on the "cost" field.
+func CostGTE(v float64) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldGTE(FieldCost, v))
+}
+
+// CostLT applies the LT predicate on the "cost" field.
+func CostLT(v float64) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLT(FieldCost, v))
+}
+
+// CostLTE applies the LTE predicate on the "cost" field.
+func CostLTE(v float64) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.FieldLTE(FieldCost, v))
+}
+
+// HasItem applies the HasEdge predicate on the "item" edge.
+func HasItem() predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(func(s *sql.Selector) {
+ step := sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
+ )
+ sqlgraph.HasNeighbors(s, step)
+ })
+}
+
+// HasItemWith applies the HasEdge predicate on the "item" edge with a given conditions (other predicates).
+func HasItemWith(preds ...predicate.Item) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(func(s *sql.Selector) {
+ step := newItemStep()
+ sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+ for _, p := range preds {
+ p(s)
+ }
+ })
+ })
+}
+
+// And groups predicates with the AND operator between them.
+func And(predicates ...predicate.MaintenanceEntry) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.AndPredicates(predicates...))
+}
+
+// Or groups predicates with the OR operator between them.
+func Or(predicates ...predicate.MaintenanceEntry) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.OrPredicates(predicates...))
+}
+
+// Not applies the not operator on the given predicate.
+func Not(p predicate.MaintenanceEntry) predicate.MaintenanceEntry {
+ return predicate.MaintenanceEntry(sql.NotPredicates(p))
+}
diff --git a/backend/internal/data/ent/maintenanceentry_create.go b/backend/internal/data/ent/maintenanceentry_create.go
new file mode 100644
index 0000000..ea71a4d
--- /dev/null
+++ b/backend/internal/data/ent/maintenanceentry_create.go
@@ -0,0 +1,388 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/item"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
+)
+
+// MaintenanceEntryCreate is the builder for creating a MaintenanceEntry entity.
+type MaintenanceEntryCreate struct {
+ config
+ mutation *MaintenanceEntryMutation
+ hooks []Hook
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (mec *MaintenanceEntryCreate) SetCreatedAt(t time.Time) *MaintenanceEntryCreate {
+ mec.mutation.SetCreatedAt(t)
+ return mec
+}
+
+// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
+func (mec *MaintenanceEntryCreate) SetNillableCreatedAt(t *time.Time) *MaintenanceEntryCreate {
+ if t != nil {
+ mec.SetCreatedAt(*t)
+ }
+ return mec
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (mec *MaintenanceEntryCreate) SetUpdatedAt(t time.Time) *MaintenanceEntryCreate {
+ mec.mutation.SetUpdatedAt(t)
+ return mec
+}
+
+// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
+func (mec *MaintenanceEntryCreate) SetNillableUpdatedAt(t *time.Time) *MaintenanceEntryCreate {
+ if t != nil {
+ mec.SetUpdatedAt(*t)
+ }
+ return mec
+}
+
+// SetItemID sets the "item_id" field.
+func (mec *MaintenanceEntryCreate) SetItemID(u uuid.UUID) *MaintenanceEntryCreate {
+ mec.mutation.SetItemID(u)
+ return mec
+}
+
+// SetDate sets the "date" field.
+func (mec *MaintenanceEntryCreate) SetDate(t time.Time) *MaintenanceEntryCreate {
+ mec.mutation.SetDate(t)
+ return mec
+}
+
+// SetNillableDate sets the "date" field if the given value is not nil.
+func (mec *MaintenanceEntryCreate) SetNillableDate(t *time.Time) *MaintenanceEntryCreate {
+ if t != nil {
+ mec.SetDate(*t)
+ }
+ return mec
+}
+
+// SetScheduledDate sets the "scheduled_date" field.
+func (mec *MaintenanceEntryCreate) SetScheduledDate(t time.Time) *MaintenanceEntryCreate {
+ mec.mutation.SetScheduledDate(t)
+ return mec
+}
+
+// SetNillableScheduledDate sets the "scheduled_date" field if the given value is not nil.
+func (mec *MaintenanceEntryCreate) SetNillableScheduledDate(t *time.Time) *MaintenanceEntryCreate {
+ if t != nil {
+ mec.SetScheduledDate(*t)
+ }
+ return mec
+}
+
+// SetName sets the "name" field.
+func (mec *MaintenanceEntryCreate) SetName(s string) *MaintenanceEntryCreate {
+ mec.mutation.SetName(s)
+ return mec
+}
+
+// SetDescription sets the "description" field.
+func (mec *MaintenanceEntryCreate) SetDescription(s string) *MaintenanceEntryCreate {
+ mec.mutation.SetDescription(s)
+ return mec
+}
+
+// SetNillableDescription sets the "description" field if the given value is not nil.
+func (mec *MaintenanceEntryCreate) SetNillableDescription(s *string) *MaintenanceEntryCreate {
+ if s != nil {
+ mec.SetDescription(*s)
+ }
+ return mec
+}
+
+// SetCost sets the "cost" field.
+func (mec *MaintenanceEntryCreate) SetCost(f float64) *MaintenanceEntryCreate {
+ mec.mutation.SetCost(f)
+ return mec
+}
+
+// SetNillableCost sets the "cost" field if the given value is not nil.
+func (mec *MaintenanceEntryCreate) SetNillableCost(f *float64) *MaintenanceEntryCreate {
+ if f != nil {
+ mec.SetCost(*f)
+ }
+ return mec
+}
+
+// SetID sets the "id" field.
+func (mec *MaintenanceEntryCreate) SetID(u uuid.UUID) *MaintenanceEntryCreate {
+ mec.mutation.SetID(u)
+ return mec
+}
+
+// SetNillableID sets the "id" field if the given value is not nil.
+func (mec *MaintenanceEntryCreate) SetNillableID(u *uuid.UUID) *MaintenanceEntryCreate {
+ if u != nil {
+ mec.SetID(*u)
+ }
+ return mec
+}
+
+// SetItem sets the "item" edge to the Item entity.
+func (mec *MaintenanceEntryCreate) SetItem(i *Item) *MaintenanceEntryCreate {
+ return mec.SetItemID(i.ID)
+}
+
+// Mutation returns the MaintenanceEntryMutation object of the builder.
+func (mec *MaintenanceEntryCreate) Mutation() *MaintenanceEntryMutation {
+ return mec.mutation
+}
+
+// Save creates the MaintenanceEntry in the database.
+func (mec *MaintenanceEntryCreate) Save(ctx context.Context) (*MaintenanceEntry, error) {
+ mec.defaults()
+ return withHooks(ctx, mec.sqlSave, mec.mutation, mec.hooks)
+}
+
+// SaveX calls Save and panics if Save returns an error.
+func (mec *MaintenanceEntryCreate) SaveX(ctx context.Context) *MaintenanceEntry {
+ v, err := mec.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// Exec executes the query.
+func (mec *MaintenanceEntryCreate) Exec(ctx context.Context) error {
+ _, err := mec.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (mec *MaintenanceEntryCreate) ExecX(ctx context.Context) {
+ if err := mec.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
+
+// defaults sets the default values of the builder before save.
+func (mec *MaintenanceEntryCreate) defaults() {
+ if _, ok := mec.mutation.CreatedAt(); !ok {
+ v := maintenanceentry.DefaultCreatedAt()
+ mec.mutation.SetCreatedAt(v)
+ }
+ if _, ok := mec.mutation.UpdatedAt(); !ok {
+ v := maintenanceentry.DefaultUpdatedAt()
+ mec.mutation.SetUpdatedAt(v)
+ }
+ if _, ok := mec.mutation.Cost(); !ok {
+ v := maintenanceentry.DefaultCost
+ mec.mutation.SetCost(v)
+ }
+ if _, ok := mec.mutation.ID(); !ok {
+ v := maintenanceentry.DefaultID()
+ mec.mutation.SetID(v)
+ }
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (mec *MaintenanceEntryCreate) check() error {
+ if _, ok := mec.mutation.CreatedAt(); !ok {
+ return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "MaintenanceEntry.created_at"`)}
+ }
+ if _, ok := mec.mutation.UpdatedAt(); !ok {
+ return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "MaintenanceEntry.updated_at"`)}
+ }
+ if _, ok := mec.mutation.ItemID(); !ok {
+ return &ValidationError{Name: "item_id", err: errors.New(`ent: missing required field "MaintenanceEntry.item_id"`)}
+ }
+ if _, ok := mec.mutation.Name(); !ok {
+ return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "MaintenanceEntry.name"`)}
+ }
+ if v, ok := mec.mutation.Name(); ok {
+ if err := maintenanceentry.NameValidator(v); err != nil {
+ return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "MaintenanceEntry.name": %w`, err)}
+ }
+ }
+ if v, ok := mec.mutation.Description(); ok {
+ if err := maintenanceentry.DescriptionValidator(v); err != nil {
+ return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "MaintenanceEntry.description": %w`, err)}
+ }
+ }
+ if _, ok := mec.mutation.Cost(); !ok {
+ return &ValidationError{Name: "cost", err: errors.New(`ent: missing required field "MaintenanceEntry.cost"`)}
+ }
+ if _, ok := mec.mutation.ItemID(); !ok {
+ return &ValidationError{Name: "item", err: errors.New(`ent: missing required edge "MaintenanceEntry.item"`)}
+ }
+ return nil
+}
+
+func (mec *MaintenanceEntryCreate) sqlSave(ctx context.Context) (*MaintenanceEntry, error) {
+ if err := mec.check(); err != nil {
+ return nil, err
+ }
+ _node, _spec := mec.createSpec()
+ if err := sqlgraph.CreateNode(ctx, mec.driver, _spec); err != nil {
+ if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ return nil, err
+ }
+ if _spec.ID.Value != nil {
+ if id, ok := _spec.ID.Value.(*uuid.UUID); ok {
+ _node.ID = *id
+ } else if err := _node.ID.Scan(_spec.ID.Value); err != nil {
+ return nil, err
+ }
+ }
+ mec.mutation.id = &_node.ID
+ mec.mutation.done = true
+ return _node, nil
+}
+
+func (mec *MaintenanceEntryCreate) createSpec() (*MaintenanceEntry, *sqlgraph.CreateSpec) {
+ var (
+ _node = &MaintenanceEntry{config: mec.config}
+ _spec = sqlgraph.NewCreateSpec(maintenanceentry.Table, sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID))
+ )
+ if id, ok := mec.mutation.ID(); ok {
+ _node.ID = id
+ _spec.ID.Value = &id
+ }
+ if value, ok := mec.mutation.CreatedAt(); ok {
+ _spec.SetField(maintenanceentry.FieldCreatedAt, field.TypeTime, value)
+ _node.CreatedAt = value
+ }
+ if value, ok := mec.mutation.UpdatedAt(); ok {
+ _spec.SetField(maintenanceentry.FieldUpdatedAt, field.TypeTime, value)
+ _node.UpdatedAt = value
+ }
+ if value, ok := mec.mutation.Date(); ok {
+ _spec.SetField(maintenanceentry.FieldDate, field.TypeTime, value)
+ _node.Date = value
+ }
+ if value, ok := mec.mutation.ScheduledDate(); ok {
+ _spec.SetField(maintenanceentry.FieldScheduledDate, field.TypeTime, value)
+ _node.ScheduledDate = value
+ }
+ if value, ok := mec.mutation.Name(); ok {
+ _spec.SetField(maintenanceentry.FieldName, field.TypeString, value)
+ _node.Name = value
+ }
+ if value, ok := mec.mutation.Description(); ok {
+ _spec.SetField(maintenanceentry.FieldDescription, field.TypeString, value)
+ _node.Description = value
+ }
+ if value, ok := mec.mutation.Cost(); ok {
+ _spec.SetField(maintenanceentry.FieldCost, field.TypeFloat64, value)
+ _node.Cost = value
+ }
+ if nodes := mec.mutation.ItemIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: maintenanceentry.ItemTable,
+ Columns: []string{maintenanceentry.ItemColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _node.ItemID = nodes[0]
+ _spec.Edges = append(_spec.Edges, edge)
+ }
+ return _node, _spec
+}
+
+// MaintenanceEntryCreateBulk is the builder for creating many MaintenanceEntry entities in bulk.
+type MaintenanceEntryCreateBulk struct {
+ config
+ err error
+ builders []*MaintenanceEntryCreate
+}
+
+// Save creates the MaintenanceEntry entities in the database.
+func (mecb *MaintenanceEntryCreateBulk) Save(ctx context.Context) ([]*MaintenanceEntry, error) {
+ if mecb.err != nil {
+ return nil, mecb.err
+ }
+ specs := make([]*sqlgraph.CreateSpec, len(mecb.builders))
+ nodes := make([]*MaintenanceEntry, len(mecb.builders))
+ mutators := make([]Mutator, len(mecb.builders))
+ for i := range mecb.builders {
+ func(i int, root context.Context) {
+ builder := mecb.builders[i]
+ builder.defaults()
+ var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+ mutation, ok := m.(*MaintenanceEntryMutation)
+ if !ok {
+ return nil, fmt.Errorf("unexpected mutation type %T", m)
+ }
+ if err := builder.check(); err != nil {
+ return nil, err
+ }
+ builder.mutation = mutation
+ var err error
+ nodes[i], specs[i] = builder.createSpec()
+ if i < len(mutators)-1 {
+ _, err = mutators[i+1].Mutate(root, mecb.builders[i+1].mutation)
+ } else {
+ spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
+ // Invoke the actual operation on the latest mutation in the chain.
+ if err = sqlgraph.BatchCreate(ctx, mecb.driver, spec); err != nil {
+ if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ mutation.id = &nodes[i].ID
+ mutation.done = true
+ return nodes[i], nil
+ })
+ for i := len(builder.hooks) - 1; i >= 0; i-- {
+ mut = builder.hooks[i](mut)
+ }
+ mutators[i] = mut
+ }(i, ctx)
+ }
+ if len(mutators) > 0 {
+ if _, err := mutators[0].Mutate(ctx, mecb.builders[0].mutation); err != nil {
+ return nil, err
+ }
+ }
+ return nodes, nil
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (mecb *MaintenanceEntryCreateBulk) SaveX(ctx context.Context) []*MaintenanceEntry {
+ v, err := mecb.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// Exec executes the query.
+func (mecb *MaintenanceEntryCreateBulk) Exec(ctx context.Context) error {
+ _, err := mecb.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (mecb *MaintenanceEntryCreateBulk) ExecX(ctx context.Context) {
+ if err := mecb.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
diff --git a/backend/internal/data/ent/maintenanceentry_delete.go b/backend/internal/data/ent/maintenanceentry_delete.go
new file mode 100644
index 0000000..0323ae9
--- /dev/null
+++ b/backend/internal/data/ent/maintenanceentry_delete.go
@@ -0,0 +1,88 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
+)
+
+// MaintenanceEntryDelete is the builder for deleting a MaintenanceEntry entity.
+type MaintenanceEntryDelete struct {
+ config
+ hooks []Hook
+ mutation *MaintenanceEntryMutation
+}
+
+// Where appends a list predicates to the MaintenanceEntryDelete builder.
+func (med *MaintenanceEntryDelete) Where(ps ...predicate.MaintenanceEntry) *MaintenanceEntryDelete {
+ med.mutation.Where(ps...)
+ return med
+}
+
+// Exec executes the deletion query and returns how many vertices were deleted.
+func (med *MaintenanceEntryDelete) Exec(ctx context.Context) (int, error) {
+ return withHooks(ctx, med.sqlExec, med.mutation, med.hooks)
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (med *MaintenanceEntryDelete) ExecX(ctx context.Context) int {
+ n, err := med.Exec(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return n
+}
+
+func (med *MaintenanceEntryDelete) sqlExec(ctx context.Context) (int, error) {
+ _spec := sqlgraph.NewDeleteSpec(maintenanceentry.Table, sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID))
+ if ps := med.mutation.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ affected, err := sqlgraph.DeleteNodes(ctx, med.driver, _spec)
+ if err != nil && sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ med.mutation.done = true
+ return affected, err
+}
+
+// MaintenanceEntryDeleteOne is the builder for deleting a single MaintenanceEntry entity.
+type MaintenanceEntryDeleteOne struct {
+ med *MaintenanceEntryDelete
+}
+
+// Where appends a list predicates to the MaintenanceEntryDelete builder.
+func (medo *MaintenanceEntryDeleteOne) Where(ps ...predicate.MaintenanceEntry) *MaintenanceEntryDeleteOne {
+ medo.med.mutation.Where(ps...)
+ return medo
+}
+
+// Exec executes the deletion query.
+func (medo *MaintenanceEntryDeleteOne) Exec(ctx context.Context) error {
+ n, err := medo.med.Exec(ctx)
+ switch {
+ case err != nil:
+ return err
+ case n == 0:
+ return &NotFoundError{maintenanceentry.Label}
+ default:
+ return nil
+ }
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (medo *MaintenanceEntryDeleteOne) ExecX(ctx context.Context) {
+ if err := medo.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
diff --git a/backend/internal/data/ent/maintenanceentry_query.go b/backend/internal/data/ent/maintenanceentry_query.go
new file mode 100644
index 0000000..8d41f75
--- /dev/null
+++ b/backend/internal/data/ent/maintenanceentry_query.go
@@ -0,0 +1,606 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+ "fmt"
+ "math"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/item"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
+)
+
+// MaintenanceEntryQuery is the builder for querying MaintenanceEntry entities.
+type MaintenanceEntryQuery struct {
+ config
+ ctx *QueryContext
+ order []maintenanceentry.OrderOption
+ inters []Interceptor
+ predicates []predicate.MaintenanceEntry
+ withItem *ItemQuery
+ // intermediate query (i.e. traversal path).
+ sql *sql.Selector
+ path func(context.Context) (*sql.Selector, error)
+}
+
+// Where adds a new predicate for the MaintenanceEntryQuery builder.
+func (meq *MaintenanceEntryQuery) Where(ps ...predicate.MaintenanceEntry) *MaintenanceEntryQuery {
+ meq.predicates = append(meq.predicates, ps...)
+ return meq
+}
+
+// Limit the number of records to be returned by this query.
+func (meq *MaintenanceEntryQuery) Limit(limit int) *MaintenanceEntryQuery {
+ meq.ctx.Limit = &limit
+ return meq
+}
+
+// Offset to start from.
+func (meq *MaintenanceEntryQuery) Offset(offset int) *MaintenanceEntryQuery {
+ meq.ctx.Offset = &offset
+ return meq
+}
+
+// Unique configures the query builder to filter duplicate records on query.
+// By default, unique is set to true, and can be disabled using this method.
+func (meq *MaintenanceEntryQuery) Unique(unique bool) *MaintenanceEntryQuery {
+ meq.ctx.Unique = &unique
+ return meq
+}
+
+// Order specifies how the records should be ordered.
+func (meq *MaintenanceEntryQuery) Order(o ...maintenanceentry.OrderOption) *MaintenanceEntryQuery {
+ meq.order = append(meq.order, o...)
+ return meq
+}
+
+// QueryItem chains the current query on the "item" edge.
+func (meq *MaintenanceEntryQuery) QueryItem() *ItemQuery {
+ query := (&ItemClient{config: meq.config}).Query()
+ query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+ if err := meq.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ selector := meq.sqlQuery(ctx)
+ if err := selector.Err(); err != nil {
+ return nil, err
+ }
+ step := sqlgraph.NewStep(
+ sqlgraph.From(maintenanceentry.Table, maintenanceentry.FieldID, selector),
+ sqlgraph.To(item.Table, item.FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, maintenanceentry.ItemTable, maintenanceentry.ItemColumn),
+ )
+ fromU = sqlgraph.SetNeighbors(meq.driver.Dialect(), step)
+ return fromU, nil
+ }
+ return query
+}
+
+// First returns the first MaintenanceEntry entity from the query.
+// Returns a *NotFoundError when no MaintenanceEntry was found.
+func (meq *MaintenanceEntryQuery) First(ctx context.Context) (*MaintenanceEntry, error) {
+ nodes, err := meq.Limit(1).All(setContextOp(ctx, meq.ctx, "First"))
+ if err != nil {
+ return nil, err
+ }
+ if len(nodes) == 0 {
+ return nil, &NotFoundError{maintenanceentry.Label}
+ }
+ return nodes[0], nil
+}
+
+// FirstX is like First, but panics if an error occurs.
+func (meq *MaintenanceEntryQuery) FirstX(ctx context.Context) *MaintenanceEntry {
+ node, err := meq.First(ctx)
+ if err != nil && !IsNotFound(err) {
+ panic(err)
+ }
+ return node
+}
+
+// FirstID returns the first MaintenanceEntry ID from the query.
+// Returns a *NotFoundError when no MaintenanceEntry ID was found.
+func (meq *MaintenanceEntryQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
+ var ids []uuid.UUID
+ if ids, err = meq.Limit(1).IDs(setContextOp(ctx, meq.ctx, "FirstID")); err != nil {
+ return
+ }
+ if len(ids) == 0 {
+ err = &NotFoundError{maintenanceentry.Label}
+ return
+ }
+ return ids[0], nil
+}
+
+// FirstIDX is like FirstID, but panics if an error occurs.
+func (meq *MaintenanceEntryQuery) FirstIDX(ctx context.Context) uuid.UUID {
+ id, err := meq.FirstID(ctx)
+ if err != nil && !IsNotFound(err) {
+ panic(err)
+ }
+ return id
+}
+
+// Only returns a single MaintenanceEntry entity found by the query, ensuring it only returns one.
+// Returns a *NotSingularError when more than one MaintenanceEntry entity is found.
+// Returns a *NotFoundError when no MaintenanceEntry entities are found.
+func (meq *MaintenanceEntryQuery) Only(ctx context.Context) (*MaintenanceEntry, error) {
+ nodes, err := meq.Limit(2).All(setContextOp(ctx, meq.ctx, "Only"))
+ if err != nil {
+ return nil, err
+ }
+ switch len(nodes) {
+ case 1:
+ return nodes[0], nil
+ case 0:
+ return nil, &NotFoundError{maintenanceentry.Label}
+ default:
+ return nil, &NotSingularError{maintenanceentry.Label}
+ }
+}
+
+// OnlyX is like Only, but panics if an error occurs.
+func (meq *MaintenanceEntryQuery) OnlyX(ctx context.Context) *MaintenanceEntry {
+ node, err := meq.Only(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return node
+}
+
+// OnlyID is like Only, but returns the only MaintenanceEntry ID in the query.
+// Returns a *NotSingularError when more than one MaintenanceEntry ID is found.
+// Returns a *NotFoundError when no entities are found.
+func (meq *MaintenanceEntryQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
+ var ids []uuid.UUID
+ if ids, err = meq.Limit(2).IDs(setContextOp(ctx, meq.ctx, "OnlyID")); err != nil {
+ return
+ }
+ switch len(ids) {
+ case 1:
+ id = ids[0]
+ case 0:
+ err = &NotFoundError{maintenanceentry.Label}
+ default:
+ err = &NotSingularError{maintenanceentry.Label}
+ }
+ return
+}
+
+// OnlyIDX is like OnlyID, but panics if an error occurs.
+func (meq *MaintenanceEntryQuery) OnlyIDX(ctx context.Context) uuid.UUID {
+ id, err := meq.OnlyID(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+// All executes the query and returns a list of MaintenanceEntries.
+func (meq *MaintenanceEntryQuery) All(ctx context.Context) ([]*MaintenanceEntry, error) {
+ ctx = setContextOp(ctx, meq.ctx, "All")
+ if err := meq.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ qr := querierAll[[]*MaintenanceEntry, *MaintenanceEntryQuery]()
+ return withInterceptors[[]*MaintenanceEntry](ctx, meq, qr, meq.inters)
+}
+
+// AllX is like All, but panics if an error occurs.
+func (meq *MaintenanceEntryQuery) AllX(ctx context.Context) []*MaintenanceEntry {
+ nodes, err := meq.All(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return nodes
+}
+
+// IDs executes the query and returns a list of MaintenanceEntry IDs.
+func (meq *MaintenanceEntryQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
+ if meq.ctx.Unique == nil && meq.path != nil {
+ meq.Unique(true)
+ }
+ ctx = setContextOp(ctx, meq.ctx, "IDs")
+ if err = meq.Select(maintenanceentry.FieldID).Scan(ctx, &ids); err != nil {
+ return nil, err
+ }
+ return ids, nil
+}
+
+// IDsX is like IDs, but panics if an error occurs.
+func (meq *MaintenanceEntryQuery) IDsX(ctx context.Context) []uuid.UUID {
+ ids, err := meq.IDs(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return ids
+}
+
+// Count returns the count of the given query.
+func (meq *MaintenanceEntryQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, meq.ctx, "Count")
+ if err := meq.prepareQuery(ctx); err != nil {
+ return 0, err
+ }
+ return withInterceptors[int](ctx, meq, querierCount[*MaintenanceEntryQuery](), meq.inters)
+}
+
+// CountX is like Count, but panics if an error occurs.
+func (meq *MaintenanceEntryQuery) CountX(ctx context.Context) int {
+ count, err := meq.Count(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return count
+}
+
+// Exist returns true if the query has elements in the graph.
+func (meq *MaintenanceEntryQuery) Exist(ctx context.Context) (bool, error) {
+ ctx = setContextOp(ctx, meq.ctx, "Exist")
+ switch _, err := meq.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
+ }
+}
+
+// ExistX is like Exist, but panics if an error occurs.
+func (meq *MaintenanceEntryQuery) ExistX(ctx context.Context) bool {
+ exist, err := meq.Exist(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return exist
+}
+
+// Clone returns a duplicate of the MaintenanceEntryQuery builder, including all associated steps. It can be
+// used to prepare common query builders and use them differently after the clone is made.
+func (meq *MaintenanceEntryQuery) Clone() *MaintenanceEntryQuery {
+ if meq == nil {
+ return nil
+ }
+ return &MaintenanceEntryQuery{
+ config: meq.config,
+ ctx: meq.ctx.Clone(),
+ order: append([]maintenanceentry.OrderOption{}, meq.order...),
+ inters: append([]Interceptor{}, meq.inters...),
+ predicates: append([]predicate.MaintenanceEntry{}, meq.predicates...),
+ withItem: meq.withItem.Clone(),
+ // clone intermediate query.
+ sql: meq.sql.Clone(),
+ path: meq.path,
+ }
+}
+
+// WithItem tells the query-builder to eager-load the nodes that are connected to
+// the "item" edge. The optional arguments are used to configure the query builder of the edge.
+func (meq *MaintenanceEntryQuery) WithItem(opts ...func(*ItemQuery)) *MaintenanceEntryQuery {
+ query := (&ItemClient{config: meq.config}).Query()
+ for _, opt := range opts {
+ opt(query)
+ }
+ meq.withItem = query
+ return meq
+}
+
+// GroupBy is used to group vertices by one or more fields/columns.
+// It is often used with aggregate functions, like: count, max, mean, min, sum.
+//
+// Example:
+//
+// var v []struct {
+// CreatedAt time.Time `json:"created_at,omitempty"`
+// Count int `json:"count,omitempty"`
+// }
+//
+// client.MaintenanceEntry.Query().
+// GroupBy(maintenanceentry.FieldCreatedAt).
+// Aggregate(ent.Count()).
+// Scan(ctx, &v)
+func (meq *MaintenanceEntryQuery) GroupBy(field string, fields ...string) *MaintenanceEntryGroupBy {
+ meq.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &MaintenanceEntryGroupBy{build: meq}
+ grbuild.flds = &meq.ctx.Fields
+ grbuild.label = maintenanceentry.Label
+ grbuild.scan = grbuild.Scan
+ return grbuild
+}
+
+// Select allows the selection one or more fields/columns for the given query,
+// instead of selecting all fields in the entity.
+//
+// Example:
+//
+// var v []struct {
+// CreatedAt time.Time `json:"created_at,omitempty"`
+// }
+//
+// client.MaintenanceEntry.Query().
+// Select(maintenanceentry.FieldCreatedAt).
+// Scan(ctx, &v)
+func (meq *MaintenanceEntryQuery) Select(fields ...string) *MaintenanceEntrySelect {
+ meq.ctx.Fields = append(meq.ctx.Fields, fields...)
+ sbuild := &MaintenanceEntrySelect{MaintenanceEntryQuery: meq}
+ sbuild.label = maintenanceentry.Label
+ sbuild.flds, sbuild.scan = &meq.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a MaintenanceEntrySelect configured with the given aggregations.
+func (meq *MaintenanceEntryQuery) Aggregate(fns ...AggregateFunc) *MaintenanceEntrySelect {
+ return meq.Select().Aggregate(fns...)
+}
+
+func (meq *MaintenanceEntryQuery) prepareQuery(ctx context.Context) error {
+ for _, inter := range meq.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, meq); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range meq.ctx.Fields {
+ if !maintenanceentry.ValidColumn(f) {
+ return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+ }
+ }
+ if meq.path != nil {
+ prev, err := meq.path(ctx)
+ if err != nil {
+ return err
+ }
+ meq.sql = prev
+ }
+ return nil
+}
+
+func (meq *MaintenanceEntryQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*MaintenanceEntry, error) {
+ var (
+ nodes = []*MaintenanceEntry{}
+ _spec = meq.querySpec()
+ loadedTypes = [1]bool{
+ meq.withItem != nil,
+ }
+ )
+ _spec.ScanValues = func(columns []string) ([]any, error) {
+ return (*MaintenanceEntry).scanValues(nil, columns)
+ }
+ _spec.Assign = func(columns []string, values []any) error {
+ node := &MaintenanceEntry{config: meq.config}
+ nodes = append(nodes, node)
+ node.Edges.loadedTypes = loadedTypes
+ return node.assignValues(columns, values)
+ }
+ for i := range hooks {
+ hooks[i](ctx, _spec)
+ }
+ if err := sqlgraph.QueryNodes(ctx, meq.driver, _spec); err != nil {
+ return nil, err
+ }
+ if len(nodes) == 0 {
+ return nodes, nil
+ }
+ if query := meq.withItem; query != nil {
+ if err := meq.loadItem(ctx, query, nodes, nil,
+ func(n *MaintenanceEntry, e *Item) { n.Edges.Item = e }); err != nil {
+ return nil, err
+ }
+ }
+ return nodes, nil
+}
+
+func (meq *MaintenanceEntryQuery) loadItem(ctx context.Context, query *ItemQuery, nodes []*MaintenanceEntry, init func(*MaintenanceEntry), assign func(*MaintenanceEntry, *Item)) error {
+ ids := make([]uuid.UUID, 0, len(nodes))
+ nodeids := make(map[uuid.UUID][]*MaintenanceEntry)
+ for i := range nodes {
+ fk := nodes[i].ItemID
+ if _, ok := nodeids[fk]; !ok {
+ ids = append(ids, fk)
+ }
+ nodeids[fk] = append(nodeids[fk], nodes[i])
+ }
+ if len(ids) == 0 {
+ return nil
+ }
+ query.Where(item.IDIn(ids...))
+ neighbors, err := query.All(ctx)
+ if err != nil {
+ return err
+ }
+ for _, n := range neighbors {
+ nodes, ok := nodeids[n.ID]
+ if !ok {
+ return fmt.Errorf(`unexpected foreign-key "item_id" returned %v`, n.ID)
+ }
+ for i := range nodes {
+ assign(nodes[i], n)
+ }
+ }
+ return nil
+}
+
+func (meq *MaintenanceEntryQuery) sqlCount(ctx context.Context) (int, error) {
+ _spec := meq.querySpec()
+ _spec.Node.Columns = meq.ctx.Fields
+ if len(meq.ctx.Fields) > 0 {
+ _spec.Unique = meq.ctx.Unique != nil && *meq.ctx.Unique
+ }
+ return sqlgraph.CountNodes(ctx, meq.driver, _spec)
+}
+
+func (meq *MaintenanceEntryQuery) querySpec() *sqlgraph.QuerySpec {
+ _spec := sqlgraph.NewQuerySpec(maintenanceentry.Table, maintenanceentry.Columns, sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID))
+ _spec.From = meq.sql
+ if unique := meq.ctx.Unique; unique != nil {
+ _spec.Unique = *unique
+ } else if meq.path != nil {
+ _spec.Unique = true
+ }
+ if fields := meq.ctx.Fields; len(fields) > 0 {
+ _spec.Node.Columns = make([]string, 0, len(fields))
+ _spec.Node.Columns = append(_spec.Node.Columns, maintenanceentry.FieldID)
+ for i := range fields {
+ if fields[i] != maintenanceentry.FieldID {
+ _spec.Node.Columns = append(_spec.Node.Columns, fields[i])
+ }
+ }
+ if meq.withItem != nil {
+ _spec.Node.AddColumnOnce(maintenanceentry.FieldItemID)
+ }
+ }
+ if ps := meq.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ if limit := meq.ctx.Limit; limit != nil {
+ _spec.Limit = *limit
+ }
+ if offset := meq.ctx.Offset; offset != nil {
+ _spec.Offset = *offset
+ }
+ if ps := meq.order; len(ps) > 0 {
+ _spec.Order = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ return _spec
+}
+
+func (meq *MaintenanceEntryQuery) sqlQuery(ctx context.Context) *sql.Selector {
+ builder := sql.Dialect(meq.driver.Dialect())
+ t1 := builder.Table(maintenanceentry.Table)
+ columns := meq.ctx.Fields
+ if len(columns) == 0 {
+ columns = maintenanceentry.Columns
+ }
+ selector := builder.Select(t1.Columns(columns...)...).From(t1)
+ if meq.sql != nil {
+ selector = meq.sql
+ selector.Select(selector.Columns(columns...)...)
+ }
+ if meq.ctx.Unique != nil && *meq.ctx.Unique {
+ selector.Distinct()
+ }
+ for _, p := range meq.predicates {
+ p(selector)
+ }
+ for _, p := range meq.order {
+ p(selector)
+ }
+ if offset := meq.ctx.Offset; offset != nil {
+ // limit is mandatory for offset clause. We start
+ // with default value, and override it below if needed.
+ selector.Offset(*offset).Limit(math.MaxInt32)
+ }
+ if limit := meq.ctx.Limit; limit != nil {
+ selector.Limit(*limit)
+ }
+ return selector
+}
+
+// MaintenanceEntryGroupBy is the group-by builder for MaintenanceEntry entities.
+type MaintenanceEntryGroupBy struct {
+ selector
+ build *MaintenanceEntryQuery
+}
+
+// Aggregate adds the given aggregation functions to the group-by query.
+func (megb *MaintenanceEntryGroupBy) Aggregate(fns ...AggregateFunc) *MaintenanceEntryGroupBy {
+ megb.fns = append(megb.fns, fns...)
+ return megb
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (megb *MaintenanceEntryGroupBy) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, megb.build.ctx, "GroupBy")
+ if err := megb.build.prepareQuery(ctx); err != nil {
+ return err
+ }
+ return scanWithInterceptors[*MaintenanceEntryQuery, *MaintenanceEntryGroupBy](ctx, megb.build, megb, megb.build.inters, v)
+}
+
+func (megb *MaintenanceEntryGroupBy) sqlScan(ctx context.Context, root *MaintenanceEntryQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
+ aggregation := make([]string, 0, len(megb.fns))
+ for _, fn := range megb.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ if len(selector.SelectedColumns()) == 0 {
+ columns := make([]string, 0, len(*megb.flds)+len(megb.fns))
+ for _, f := range *megb.flds {
+ columns = append(columns, selector.C(f))
+ }
+ columns = append(columns, aggregation...)
+ selector.Select(columns...)
+ }
+ selector.GroupBy(selector.Columns(*megb.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := megb.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
+}
+
+// MaintenanceEntrySelect is the builder for selecting fields of MaintenanceEntry entities.
+type MaintenanceEntrySelect struct {
+ *MaintenanceEntryQuery
+ selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (mes *MaintenanceEntrySelect) Aggregate(fns ...AggregateFunc) *MaintenanceEntrySelect {
+ mes.fns = append(mes.fns, fns...)
+ return mes
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (mes *MaintenanceEntrySelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, mes.ctx, "Select")
+ if err := mes.prepareQuery(ctx); err != nil {
+ return err
+ }
+ return scanWithInterceptors[*MaintenanceEntryQuery, *MaintenanceEntrySelect](ctx, mes.MaintenanceEntryQuery, mes, mes.inters, v)
+}
+
+func (mes *MaintenanceEntrySelect) sqlScan(ctx context.Context, root *MaintenanceEntryQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(mes.fns))
+ for _, fn := range mes.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*mes.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := mes.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
+}
diff --git a/backend/internal/data/ent/maintenanceentry_update.go b/backend/internal/data/ent/maintenanceentry_update.go
new file mode 100644
index 0000000..3616d32
--- /dev/null
+++ b/backend/internal/data/ent/maintenanceentry_update.go
@@ -0,0 +1,608 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/item"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
+)
+
+// MaintenanceEntryUpdate is the builder for updating MaintenanceEntry entities.
+type MaintenanceEntryUpdate struct {
+ config
+ hooks []Hook
+ mutation *MaintenanceEntryMutation
+}
+
+// Where appends a list predicates to the MaintenanceEntryUpdate builder.
+func (meu *MaintenanceEntryUpdate) Where(ps ...predicate.MaintenanceEntry) *MaintenanceEntryUpdate {
+ meu.mutation.Where(ps...)
+ return meu
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (meu *MaintenanceEntryUpdate) SetUpdatedAt(t time.Time) *MaintenanceEntryUpdate {
+ meu.mutation.SetUpdatedAt(t)
+ return meu
+}
+
+// SetItemID sets the "item_id" field.
+func (meu *MaintenanceEntryUpdate) SetItemID(u uuid.UUID) *MaintenanceEntryUpdate {
+ meu.mutation.SetItemID(u)
+ return meu
+}
+
+// SetNillableItemID sets the "item_id" field if the given value is not nil.
+func (meu *MaintenanceEntryUpdate) SetNillableItemID(u *uuid.UUID) *MaintenanceEntryUpdate {
+ if u != nil {
+ meu.SetItemID(*u)
+ }
+ return meu
+}
+
+// SetDate sets the "date" field.
+func (meu *MaintenanceEntryUpdate) SetDate(t time.Time) *MaintenanceEntryUpdate {
+ meu.mutation.SetDate(t)
+ return meu
+}
+
+// SetNillableDate sets the "date" field if the given value is not nil.
+func (meu *MaintenanceEntryUpdate) SetNillableDate(t *time.Time) *MaintenanceEntryUpdate {
+ if t != nil {
+ meu.SetDate(*t)
+ }
+ return meu
+}
+
+// ClearDate clears the value of the "date" field.
+func (meu *MaintenanceEntryUpdate) ClearDate() *MaintenanceEntryUpdate {
+ meu.mutation.ClearDate()
+ return meu
+}
+
+// SetScheduledDate sets the "scheduled_date" field.
+func (meu *MaintenanceEntryUpdate) SetScheduledDate(t time.Time) *MaintenanceEntryUpdate {
+ meu.mutation.SetScheduledDate(t)
+ return meu
+}
+
+// SetNillableScheduledDate sets the "scheduled_date" field if the given value is not nil.
+func (meu *MaintenanceEntryUpdate) SetNillableScheduledDate(t *time.Time) *MaintenanceEntryUpdate {
+ if t != nil {
+ meu.SetScheduledDate(*t)
+ }
+ return meu
+}
+
+// ClearScheduledDate clears the value of the "scheduled_date" field.
+func (meu *MaintenanceEntryUpdate) ClearScheduledDate() *MaintenanceEntryUpdate {
+ meu.mutation.ClearScheduledDate()
+ return meu
+}
+
+// SetName sets the "name" field.
+func (meu *MaintenanceEntryUpdate) SetName(s string) *MaintenanceEntryUpdate {
+ meu.mutation.SetName(s)
+ return meu
+}
+
+// SetNillableName sets the "name" field if the given value is not nil.
+func (meu *MaintenanceEntryUpdate) SetNillableName(s *string) *MaintenanceEntryUpdate {
+ if s != nil {
+ meu.SetName(*s)
+ }
+ return meu
+}
+
+// SetDescription sets the "description" field.
+func (meu *MaintenanceEntryUpdate) SetDescription(s string) *MaintenanceEntryUpdate {
+ meu.mutation.SetDescription(s)
+ return meu
+}
+
+// SetNillableDescription sets the "description" field if the given value is not nil.
+func (meu *MaintenanceEntryUpdate) SetNillableDescription(s *string) *MaintenanceEntryUpdate {
+ if s != nil {
+ meu.SetDescription(*s)
+ }
+ return meu
+}
+
+// ClearDescription clears the value of the "description" field.
+func (meu *MaintenanceEntryUpdate) ClearDescription() *MaintenanceEntryUpdate {
+ meu.mutation.ClearDescription()
+ return meu
+}
+
+// SetCost sets the "cost" field.
+func (meu *MaintenanceEntryUpdate) SetCost(f float64) *MaintenanceEntryUpdate {
+ meu.mutation.ResetCost()
+ meu.mutation.SetCost(f)
+ return meu
+}
+
+// SetNillableCost sets the "cost" field if the given value is not nil.
+func (meu *MaintenanceEntryUpdate) SetNillableCost(f *float64) *MaintenanceEntryUpdate {
+ if f != nil {
+ meu.SetCost(*f)
+ }
+ return meu
+}
+
+// AddCost adds f to the "cost" field.
+func (meu *MaintenanceEntryUpdate) AddCost(f float64) *MaintenanceEntryUpdate {
+ meu.mutation.AddCost(f)
+ return meu
+}
+
+// SetItem sets the "item" edge to the Item entity.
+func (meu *MaintenanceEntryUpdate) SetItem(i *Item) *MaintenanceEntryUpdate {
+ return meu.SetItemID(i.ID)
+}
+
+// Mutation returns the MaintenanceEntryMutation object of the builder.
+func (meu *MaintenanceEntryUpdate) Mutation() *MaintenanceEntryMutation {
+ return meu.mutation
+}
+
+// ClearItem clears the "item" edge to the Item entity.
+func (meu *MaintenanceEntryUpdate) ClearItem() *MaintenanceEntryUpdate {
+ meu.mutation.ClearItem()
+ return meu
+}
+
+// Save executes the query and returns the number of nodes affected by the update operation.
+func (meu *MaintenanceEntryUpdate) Save(ctx context.Context) (int, error) {
+ meu.defaults()
+ return withHooks(ctx, meu.sqlSave, meu.mutation, meu.hooks)
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (meu *MaintenanceEntryUpdate) SaveX(ctx context.Context) int {
+ affected, err := meu.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return affected
+}
+
+// Exec executes the query.
+func (meu *MaintenanceEntryUpdate) Exec(ctx context.Context) error {
+ _, err := meu.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (meu *MaintenanceEntryUpdate) ExecX(ctx context.Context) {
+ if err := meu.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
+
+// defaults sets the default values of the builder before save.
+func (meu *MaintenanceEntryUpdate) defaults() {
+ if _, ok := meu.mutation.UpdatedAt(); !ok {
+ v := maintenanceentry.UpdateDefaultUpdatedAt()
+ meu.mutation.SetUpdatedAt(v)
+ }
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (meu *MaintenanceEntryUpdate) check() error {
+ if v, ok := meu.mutation.Name(); ok {
+ if err := maintenanceentry.NameValidator(v); err != nil {
+ return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "MaintenanceEntry.name": %w`, err)}
+ }
+ }
+ if v, ok := meu.mutation.Description(); ok {
+ if err := maintenanceentry.DescriptionValidator(v); err != nil {
+ return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "MaintenanceEntry.description": %w`, err)}
+ }
+ }
+ if _, ok := meu.mutation.ItemID(); meu.mutation.ItemCleared() && !ok {
+ return errors.New(`ent: clearing a required unique edge "MaintenanceEntry.item"`)
+ }
+ return nil
+}
+
+func (meu *MaintenanceEntryUpdate) sqlSave(ctx context.Context) (n int, err error) {
+ if err := meu.check(); err != nil {
+ return n, err
+ }
+ _spec := sqlgraph.NewUpdateSpec(maintenanceentry.Table, maintenanceentry.Columns, sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID))
+ if ps := meu.mutation.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ if value, ok := meu.mutation.UpdatedAt(); ok {
+ _spec.SetField(maintenanceentry.FieldUpdatedAt, field.TypeTime, value)
+ }
+ if value, ok := meu.mutation.Date(); ok {
+ _spec.SetField(maintenanceentry.FieldDate, field.TypeTime, value)
+ }
+ if meu.mutation.DateCleared() {
+ _spec.ClearField(maintenanceentry.FieldDate, field.TypeTime)
+ }
+ if value, ok := meu.mutation.ScheduledDate(); ok {
+ _spec.SetField(maintenanceentry.FieldScheduledDate, field.TypeTime, value)
+ }
+ if meu.mutation.ScheduledDateCleared() {
+ _spec.ClearField(maintenanceentry.FieldScheduledDate, field.TypeTime)
+ }
+ if value, ok := meu.mutation.Name(); ok {
+ _spec.SetField(maintenanceentry.FieldName, field.TypeString, value)
+ }
+ if value, ok := meu.mutation.Description(); ok {
+ _spec.SetField(maintenanceentry.FieldDescription, field.TypeString, value)
+ }
+ if meu.mutation.DescriptionCleared() {
+ _spec.ClearField(maintenanceentry.FieldDescription, field.TypeString)
+ }
+ if value, ok := meu.mutation.Cost(); ok {
+ _spec.SetField(maintenanceentry.FieldCost, field.TypeFloat64, value)
+ }
+ if value, ok := meu.mutation.AddedCost(); ok {
+ _spec.AddField(maintenanceentry.FieldCost, field.TypeFloat64, value)
+ }
+ if meu.mutation.ItemCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: maintenanceentry.ItemTable,
+ Columns: []string{maintenanceentry.ItemColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := meu.mutation.ItemIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: maintenanceentry.ItemTable,
+ Columns: []string{maintenanceentry.ItemColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if n, err = sqlgraph.UpdateNodes(ctx, meu.driver, _spec); err != nil {
+ if _, ok := err.(*sqlgraph.NotFoundError); ok {
+ err = &NotFoundError{maintenanceentry.Label}
+ } else if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ return 0, err
+ }
+ meu.mutation.done = true
+ return n, nil
+}
+
+// MaintenanceEntryUpdateOne is the builder for updating a single MaintenanceEntry entity.
+type MaintenanceEntryUpdateOne struct {
+ config
+ fields []string
+ hooks []Hook
+ mutation *MaintenanceEntryMutation
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (meuo *MaintenanceEntryUpdateOne) SetUpdatedAt(t time.Time) *MaintenanceEntryUpdateOne {
+ meuo.mutation.SetUpdatedAt(t)
+ return meuo
+}
+
+// SetItemID sets the "item_id" field.
+func (meuo *MaintenanceEntryUpdateOne) SetItemID(u uuid.UUID) *MaintenanceEntryUpdateOne {
+ meuo.mutation.SetItemID(u)
+ return meuo
+}
+
+// SetNillableItemID sets the "item_id" field if the given value is not nil.
+func (meuo *MaintenanceEntryUpdateOne) SetNillableItemID(u *uuid.UUID) *MaintenanceEntryUpdateOne {
+ if u != nil {
+ meuo.SetItemID(*u)
+ }
+ return meuo
+}
+
+// SetDate sets the "date" field.
+func (meuo *MaintenanceEntryUpdateOne) SetDate(t time.Time) *MaintenanceEntryUpdateOne {
+ meuo.mutation.SetDate(t)
+ return meuo
+}
+
+// SetNillableDate sets the "date" field if the given value is not nil.
+func (meuo *MaintenanceEntryUpdateOne) SetNillableDate(t *time.Time) *MaintenanceEntryUpdateOne {
+ if t != nil {
+ meuo.SetDate(*t)
+ }
+ return meuo
+}
+
+// ClearDate clears the value of the "date" field.
+func (meuo *MaintenanceEntryUpdateOne) ClearDate() *MaintenanceEntryUpdateOne {
+ meuo.mutation.ClearDate()
+ return meuo
+}
+
+// SetScheduledDate sets the "scheduled_date" field.
+func (meuo *MaintenanceEntryUpdateOne) SetScheduledDate(t time.Time) *MaintenanceEntryUpdateOne {
+ meuo.mutation.SetScheduledDate(t)
+ return meuo
+}
+
+// SetNillableScheduledDate sets the "scheduled_date" field if the given value is not nil.
+func (meuo *MaintenanceEntryUpdateOne) SetNillableScheduledDate(t *time.Time) *MaintenanceEntryUpdateOne {
+ if t != nil {
+ meuo.SetScheduledDate(*t)
+ }
+ return meuo
+}
+
+// ClearScheduledDate clears the value of the "scheduled_date" field.
+func (meuo *MaintenanceEntryUpdateOne) ClearScheduledDate() *MaintenanceEntryUpdateOne {
+ meuo.mutation.ClearScheduledDate()
+ return meuo
+}
+
+// SetName sets the "name" field.
+func (meuo *MaintenanceEntryUpdateOne) SetName(s string) *MaintenanceEntryUpdateOne {
+ meuo.mutation.SetName(s)
+ return meuo
+}
+
+// SetNillableName sets the "name" field if the given value is not nil.
+func (meuo *MaintenanceEntryUpdateOne) SetNillableName(s *string) *MaintenanceEntryUpdateOne {
+ if s != nil {
+ meuo.SetName(*s)
+ }
+ return meuo
+}
+
+// SetDescription sets the "description" field.
+func (meuo *MaintenanceEntryUpdateOne) SetDescription(s string) *MaintenanceEntryUpdateOne {
+ meuo.mutation.SetDescription(s)
+ return meuo
+}
+
+// SetNillableDescription sets the "description" field if the given value is not nil.
+func (meuo *MaintenanceEntryUpdateOne) SetNillableDescription(s *string) *MaintenanceEntryUpdateOne {
+ if s != nil {
+ meuo.SetDescription(*s)
+ }
+ return meuo
+}
+
+// ClearDescription clears the value of the "description" field.
+func (meuo *MaintenanceEntryUpdateOne) ClearDescription() *MaintenanceEntryUpdateOne {
+ meuo.mutation.ClearDescription()
+ return meuo
+}
+
+// SetCost sets the "cost" field.
+func (meuo *MaintenanceEntryUpdateOne) SetCost(f float64) *MaintenanceEntryUpdateOne {
+ meuo.mutation.ResetCost()
+ meuo.mutation.SetCost(f)
+ return meuo
+}
+
+// SetNillableCost sets the "cost" field if the given value is not nil.
+func (meuo *MaintenanceEntryUpdateOne) SetNillableCost(f *float64) *MaintenanceEntryUpdateOne {
+ if f != nil {
+ meuo.SetCost(*f)
+ }
+ return meuo
+}
+
+// AddCost adds f to the "cost" field.
+func (meuo *MaintenanceEntryUpdateOne) AddCost(f float64) *MaintenanceEntryUpdateOne {
+ meuo.mutation.AddCost(f)
+ return meuo
+}
+
+// SetItem sets the "item" edge to the Item entity.
+func (meuo *MaintenanceEntryUpdateOne) SetItem(i *Item) *MaintenanceEntryUpdateOne {
+ return meuo.SetItemID(i.ID)
+}
+
+// Mutation returns the MaintenanceEntryMutation object of the builder.
+func (meuo *MaintenanceEntryUpdateOne) Mutation() *MaintenanceEntryMutation {
+ return meuo.mutation
+}
+
+// ClearItem clears the "item" edge to the Item entity.
+func (meuo *MaintenanceEntryUpdateOne) ClearItem() *MaintenanceEntryUpdateOne {
+ meuo.mutation.ClearItem()
+ return meuo
+}
+
+// Where appends a list predicates to the MaintenanceEntryUpdate builder.
+func (meuo *MaintenanceEntryUpdateOne) Where(ps ...predicate.MaintenanceEntry) *MaintenanceEntryUpdateOne {
+ meuo.mutation.Where(ps...)
+ return meuo
+}
+
+// Select allows selecting one or more fields (columns) of the returned entity.
+// The default is selecting all fields defined in the entity schema.
+func (meuo *MaintenanceEntryUpdateOne) Select(field string, fields ...string) *MaintenanceEntryUpdateOne {
+ meuo.fields = append([]string{field}, fields...)
+ return meuo
+}
+
+// Save executes the query and returns the updated MaintenanceEntry entity.
+func (meuo *MaintenanceEntryUpdateOne) Save(ctx context.Context) (*MaintenanceEntry, error) {
+ meuo.defaults()
+ return withHooks(ctx, meuo.sqlSave, meuo.mutation, meuo.hooks)
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (meuo *MaintenanceEntryUpdateOne) SaveX(ctx context.Context) *MaintenanceEntry {
+ node, err := meuo.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return node
+}
+
+// Exec executes the query on the entity.
+func (meuo *MaintenanceEntryUpdateOne) Exec(ctx context.Context) error {
+ _, err := meuo.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (meuo *MaintenanceEntryUpdateOne) ExecX(ctx context.Context) {
+ if err := meuo.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
+
+// defaults sets the default values of the builder before save.
+func (meuo *MaintenanceEntryUpdateOne) defaults() {
+ if _, ok := meuo.mutation.UpdatedAt(); !ok {
+ v := maintenanceentry.UpdateDefaultUpdatedAt()
+ meuo.mutation.SetUpdatedAt(v)
+ }
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (meuo *MaintenanceEntryUpdateOne) check() error {
+ if v, ok := meuo.mutation.Name(); ok {
+ if err := maintenanceentry.NameValidator(v); err != nil {
+ return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "MaintenanceEntry.name": %w`, err)}
+ }
+ }
+ if v, ok := meuo.mutation.Description(); ok {
+ if err := maintenanceentry.DescriptionValidator(v); err != nil {
+ return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "MaintenanceEntry.description": %w`, err)}
+ }
+ }
+ if _, ok := meuo.mutation.ItemID(); meuo.mutation.ItemCleared() && !ok {
+ return errors.New(`ent: clearing a required unique edge "MaintenanceEntry.item"`)
+ }
+ return nil
+}
+
+func (meuo *MaintenanceEntryUpdateOne) sqlSave(ctx context.Context) (_node *MaintenanceEntry, err error) {
+ if err := meuo.check(); err != nil {
+ return _node, err
+ }
+ _spec := sqlgraph.NewUpdateSpec(maintenanceentry.Table, maintenanceentry.Columns, sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID))
+ id, ok := meuo.mutation.ID()
+ if !ok {
+ return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "MaintenanceEntry.id" for update`)}
+ }
+ _spec.Node.ID.Value = id
+ if fields := meuo.fields; len(fields) > 0 {
+ _spec.Node.Columns = make([]string, 0, len(fields))
+ _spec.Node.Columns = append(_spec.Node.Columns, maintenanceentry.FieldID)
+ for _, f := range fields {
+ if !maintenanceentry.ValidColumn(f) {
+ return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+ }
+ if f != maintenanceentry.FieldID {
+ _spec.Node.Columns = append(_spec.Node.Columns, f)
+ }
+ }
+ }
+ if ps := meuo.mutation.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ if value, ok := meuo.mutation.UpdatedAt(); ok {
+ _spec.SetField(maintenanceentry.FieldUpdatedAt, field.TypeTime, value)
+ }
+ if value, ok := meuo.mutation.Date(); ok {
+ _spec.SetField(maintenanceentry.FieldDate, field.TypeTime, value)
+ }
+ if meuo.mutation.DateCleared() {
+ _spec.ClearField(maintenanceentry.FieldDate, field.TypeTime)
+ }
+ if value, ok := meuo.mutation.ScheduledDate(); ok {
+ _spec.SetField(maintenanceentry.FieldScheduledDate, field.TypeTime, value)
+ }
+ if meuo.mutation.ScheduledDateCleared() {
+ _spec.ClearField(maintenanceentry.FieldScheduledDate, field.TypeTime)
+ }
+ if value, ok := meuo.mutation.Name(); ok {
+ _spec.SetField(maintenanceentry.FieldName, field.TypeString, value)
+ }
+ if value, ok := meuo.mutation.Description(); ok {
+ _spec.SetField(maintenanceentry.FieldDescription, field.TypeString, value)
+ }
+ if meuo.mutation.DescriptionCleared() {
+ _spec.ClearField(maintenanceentry.FieldDescription, field.TypeString)
+ }
+ if value, ok := meuo.mutation.Cost(); ok {
+ _spec.SetField(maintenanceentry.FieldCost, field.TypeFloat64, value)
+ }
+ if value, ok := meuo.mutation.AddedCost(); ok {
+ _spec.AddField(maintenanceentry.FieldCost, field.TypeFloat64, value)
+ }
+ if meuo.mutation.ItemCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: maintenanceentry.ItemTable,
+ Columns: []string{maintenanceentry.ItemColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := meuo.mutation.ItemIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: maintenanceentry.ItemTable,
+ Columns: []string{maintenanceentry.ItemColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ _node = &MaintenanceEntry{config: meuo.config}
+ _spec.Assign = _node.assignValues
+ _spec.ScanValues = _node.scanValues
+ if err = sqlgraph.UpdateNode(ctx, meuo.driver, _spec); err != nil {
+ if _, ok := err.(*sqlgraph.NotFoundError); ok {
+ err = &NotFoundError{maintenanceentry.Label}
+ } else if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ return nil, err
+ }
+ meuo.mutation.done = true
+ return _node, nil
+}
diff --git a/backend/internal/data/ent/migrate/schema.go b/backend/internal/data/ent/migrate/schema.go
index fbaeb3a..2b58838 100644
--- a/backend/internal/data/ent/migrate/schema.go
+++ b/backend/internal/data/ent/migrate/schema.go
@@ -14,6 +14,7 @@ var (
{Name: "created_at", Type: field.TypeTime},
{Name: "updated_at", Type: field.TypeTime},
{Name: "type", Type: field.TypeEnum, Enums: []string{"photo", "manual", "warranty", "attachment", "receipt"}, Default: "attachment"},
+ {Name: "primary", Type: field.TypeBool, Default: false},
{Name: "document_attachments", Type: field.TypeUUID},
{Name: "item_attachments", Type: field.TypeUUID},
}
@@ -25,18 +26,38 @@ var (
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "attachments_documents_attachments",
- Columns: []*schema.Column{AttachmentsColumns[4]},
+ Columns: []*schema.Column{AttachmentsColumns[5]},
RefColumns: []*schema.Column{DocumentsColumns[0]},
OnDelete: schema.Cascade,
},
{
Symbol: "attachments_items_attachments",
- Columns: []*schema.Column{AttachmentsColumns[5]},
+ Columns: []*schema.Column{AttachmentsColumns[6]},
RefColumns: []*schema.Column{ItemsColumns[0]},
OnDelete: schema.Cascade,
},
},
}
+ // AuthRolesColumns holds the columns for the "auth_roles" table.
+ AuthRolesColumns = []*schema.Column{
+ {Name: "id", Type: field.TypeInt, Increment: true},
+ {Name: "role", Type: field.TypeEnum, Enums: []string{"admin", "user", "attachments"}, Default: "user"},
+ {Name: "auth_tokens_roles", Type: field.TypeUUID, Unique: true, Nullable: true},
+ }
+ // AuthRolesTable holds the schema information for the "auth_roles" table.
+ AuthRolesTable = &schema.Table{
+ Name: "auth_roles",
+ Columns: AuthRolesColumns,
+ PrimaryKey: []*schema.Column{AuthRolesColumns[0]},
+ ForeignKeys: []*schema.ForeignKey{
+ {
+ Symbol: "auth_roles_auth_tokens_roles",
+ Columns: []*schema.Column{AuthRolesColumns[2]},
+ RefColumns: []*schema.Column{AuthTokensColumns[0]},
+ OnDelete: schema.Cascade,
+ },
+ },
+ }
// AuthTokensColumns holds the columns for the "auth_tokens" table.
AuthTokensColumns = []*schema.Column{
{Name: "id", Type: field.TypeUUID},
@@ -90,44 +111,13 @@ var (
},
},
}
- // DocumentTokensColumns holds the columns for the "document_tokens" table.
- DocumentTokensColumns = []*schema.Column{
- {Name: "id", Type: field.TypeUUID},
- {Name: "created_at", Type: field.TypeTime},
- {Name: "updated_at", Type: field.TypeTime},
- {Name: "token", Type: field.TypeBytes, Unique: true},
- {Name: "uses", Type: field.TypeInt, Default: 1},
- {Name: "expires_at", Type: field.TypeTime},
- {Name: "document_document_tokens", Type: field.TypeUUID, Nullable: true},
- }
- // DocumentTokensTable holds the schema information for the "document_tokens" table.
- DocumentTokensTable = &schema.Table{
- Name: "document_tokens",
- Columns: DocumentTokensColumns,
- PrimaryKey: []*schema.Column{DocumentTokensColumns[0]},
- ForeignKeys: []*schema.ForeignKey{
- {
- Symbol: "document_tokens_documents_document_tokens",
- Columns: []*schema.Column{DocumentTokensColumns[6]},
- RefColumns: []*schema.Column{DocumentsColumns[0]},
- OnDelete: schema.Cascade,
- },
- },
- Indexes: []*schema.Index{
- {
- Name: "documenttoken_token",
- Unique: false,
- Columns: []*schema.Column{DocumentTokensColumns[3]},
- },
- },
- }
// GroupsColumns holds the columns for the "groups" table.
GroupsColumns = []*schema.Column{
{Name: "id", Type: field.TypeUUID},
{Name: "created_at", Type: field.TypeTime},
{Name: "updated_at", Type: field.TypeTime},
{Name: "name", Type: field.TypeString, Size: 255},
- {Name: "currency", Type: field.TypeEnum, Enums: []string{"usd", "eur", "gbp", "jpy", "zar", "aud", "nok", "sek", "dkk"}, Default: "usd"},
+ {Name: "currency", Type: field.TypeString, Default: "usd"},
}
// GroupsTable holds the schema information for the "groups" table.
GroupsTable = &schema.Table{
@@ -171,6 +161,7 @@ var (
{Name: "quantity", Type: field.TypeInt, Default: 1},
{Name: "insured", Type: field.TypeBool, Default: false},
{Name: "archived", Type: field.TypeBool, Default: false},
+ {Name: "asset_id", Type: field.TypeInt, Default: 0},
{Name: "serial_number", Type: field.TypeString, Nullable: true, Size: 255},
{Name: "model_number", Type: field.TypeString, Nullable: true, Size: 255},
{Name: "manufacturer", Type: field.TypeString, Nullable: true, Size: 255},
@@ -196,19 +187,19 @@ var (
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "items_groups_items",
- Columns: []*schema.Column{ItemsColumns[23]},
+ Columns: []*schema.Column{ItemsColumns[24]},
RefColumns: []*schema.Column{GroupsColumns[0]},
OnDelete: schema.Cascade,
},
{
Symbol: "items_items_children",
- Columns: []*schema.Column{ItemsColumns[24]},
+ Columns: []*schema.Column{ItemsColumns[25]},
RefColumns: []*schema.Column{ItemsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "items_locations_items",
- Columns: []*schema.Column{ItemsColumns[25]},
+ Columns: []*schema.Column{ItemsColumns[26]},
RefColumns: []*schema.Column{LocationsColumns[0]},
OnDelete: schema.Cascade,
},
@@ -222,23 +213,28 @@ var (
{
Name: "item_manufacturer",
Unique: false,
- Columns: []*schema.Column{ItemsColumns[12]},
+ Columns: []*schema.Column{ItemsColumns[13]},
},
{
Name: "item_model_number",
Unique: false,
- Columns: []*schema.Column{ItemsColumns[11]},
+ Columns: []*schema.Column{ItemsColumns[12]},
},
{
Name: "item_serial_number",
Unique: false,
- Columns: []*schema.Column{ItemsColumns[10]},
+ Columns: []*schema.Column{ItemsColumns[11]},
},
{
Name: "item_archived",
Unique: false,
Columns: []*schema.Column{ItemsColumns[9]},
},
+ {
+ Name: "item_asset_id",
+ Unique: false,
+ Columns: []*schema.Column{ItemsColumns[10]},
+ },
},
}
// ItemFieldsColumns holds the columns for the "item_fields" table.
@@ -323,6 +319,85 @@ var (
},
},
}
+ // MaintenanceEntriesColumns holds the columns for the "maintenance_entries" table.
+ MaintenanceEntriesColumns = []*schema.Column{
+ {Name: "id", Type: field.TypeUUID},
+ {Name: "created_at", Type: field.TypeTime},
+ {Name: "updated_at", Type: field.TypeTime},
+ {Name: "date", Type: field.TypeTime, Nullable: true},
+ {Name: "scheduled_date", Type: field.TypeTime, Nullable: true},
+ {Name: "name", Type: field.TypeString, Size: 255},
+ {Name: "description", Type: field.TypeString, Nullable: true, Size: 2500},
+ {Name: "cost", Type: field.TypeFloat64, Default: 0},
+ {Name: "item_id", Type: field.TypeUUID},
+ }
+ // MaintenanceEntriesTable holds the schema information for the "maintenance_entries" table.
+ MaintenanceEntriesTable = &schema.Table{
+ Name: "maintenance_entries",
+ Columns: MaintenanceEntriesColumns,
+ PrimaryKey: []*schema.Column{MaintenanceEntriesColumns[0]},
+ ForeignKeys: []*schema.ForeignKey{
+ {
+ Symbol: "maintenance_entries_items_maintenance_entries",
+ Columns: []*schema.Column{MaintenanceEntriesColumns[8]},
+ RefColumns: []*schema.Column{ItemsColumns[0]},
+ OnDelete: schema.Cascade,
+ },
+ },
+ }
+ // NotifiersColumns holds the columns for the "notifiers" table.
+ NotifiersColumns = []*schema.Column{
+ {Name: "id", Type: field.TypeUUID},
+ {Name: "created_at", Type: field.TypeTime},
+ {Name: "updated_at", Type: field.TypeTime},
+ {Name: "name", Type: field.TypeString, Size: 255},
+ {Name: "url", Type: field.TypeString, Size: 2083},
+ {Name: "is_active", Type: field.TypeBool, Default: true},
+ {Name: "group_id", Type: field.TypeUUID},
+ {Name: "user_id", Type: field.TypeUUID},
+ }
+ // NotifiersTable holds the schema information for the "notifiers" table.
+ NotifiersTable = &schema.Table{
+ Name: "notifiers",
+ Columns: NotifiersColumns,
+ PrimaryKey: []*schema.Column{NotifiersColumns[0]},
+ ForeignKeys: []*schema.ForeignKey{
+ {
+ Symbol: "notifiers_groups_notifiers",
+ Columns: []*schema.Column{NotifiersColumns[6]},
+ RefColumns: []*schema.Column{GroupsColumns[0]},
+ OnDelete: schema.Cascade,
+ },
+ {
+ Symbol: "notifiers_users_notifiers",
+ Columns: []*schema.Column{NotifiersColumns[7]},
+ RefColumns: []*schema.Column{UsersColumns[0]},
+ OnDelete: schema.Cascade,
+ },
+ },
+ Indexes: []*schema.Index{
+ {
+ Name: "notifier_user_id",
+ Unique: false,
+ Columns: []*schema.Column{NotifiersColumns[7]},
+ },
+ {
+ Name: "notifier_user_id_is_active",
+ Unique: false,
+ Columns: []*schema.Column{NotifiersColumns[7], NotifiersColumns[5]},
+ },
+ {
+ Name: "notifier_group_id",
+ Unique: false,
+ Columns: []*schema.Column{NotifiersColumns[6]},
+ },
+ {
+ Name: "notifier_group_id_is_active",
+ Unique: false,
+ Columns: []*schema.Column{NotifiersColumns[6], NotifiersColumns[5]},
+ },
+ },
+ }
// UsersColumns holds the columns for the "users" table.
UsersColumns = []*schema.Column{
{Name: "id", Type: field.TypeUUID},
@@ -332,8 +407,8 @@ var (
{Name: "email", Type: field.TypeString, Unique: true, Size: 255},
{Name: "password", Type: field.TypeString, Size: 255},
{Name: "is_superuser", Type: field.TypeBool, Default: false},
- {Name: "role", Type: field.TypeEnum, Enums: []string{"user", "owner"}, Default: "user"},
{Name: "superuser", Type: field.TypeBool, Default: false},
+ {Name: "role", Type: field.TypeEnum, Enums: []string{"user", "owner"}, Default: "user"},
{Name: "activated_on", Type: field.TypeTime, Nullable: true},
{Name: "group_users", Type: field.TypeUUID},
}
@@ -379,15 +454,17 @@ var (
// Tables holds all the tables in the schema.
Tables = []*schema.Table{
AttachmentsTable,
+ AuthRolesTable,
AuthTokensTable,
DocumentsTable,
- DocumentTokensTable,
GroupsTable,
GroupInvitationTokensTable,
ItemsTable,
ItemFieldsTable,
LabelsTable,
LocationsTable,
+ MaintenanceEntriesTable,
+ NotifiersTable,
UsersTable,
LabelItemsTable,
}
@@ -396,9 +473,9 @@ var (
func init() {
AttachmentsTable.ForeignKeys[0].RefTable = DocumentsTable
AttachmentsTable.ForeignKeys[1].RefTable = ItemsTable
+ AuthRolesTable.ForeignKeys[0].RefTable = AuthTokensTable
AuthTokensTable.ForeignKeys[0].RefTable = UsersTable
DocumentsTable.ForeignKeys[0].RefTable = GroupsTable
- DocumentTokensTable.ForeignKeys[0].RefTable = DocumentsTable
GroupInvitationTokensTable.ForeignKeys[0].RefTable = GroupsTable
ItemsTable.ForeignKeys[0].RefTable = GroupsTable
ItemsTable.ForeignKeys[1].RefTable = ItemsTable
@@ -407,6 +484,9 @@ func init() {
LabelsTable.ForeignKeys[0].RefTable = GroupsTable
LocationsTable.ForeignKeys[0].RefTable = GroupsTable
LocationsTable.ForeignKeys[1].RefTable = LocationsTable
+ MaintenanceEntriesTable.ForeignKeys[0].RefTable = ItemsTable
+ NotifiersTable.ForeignKeys[0].RefTable = GroupsTable
+ NotifiersTable.ForeignKeys[1].RefTable = UsersTable
UsersTable.ForeignKeys[0].RefTable = GroupsTable
LabelItemsTable.ForeignKeys[0].RefTable = LabelsTable
LabelItemsTable.ForeignKeys[1].RefTable = ItemsTable
diff --git a/backend/internal/data/ent/mutation.go b/backend/internal/data/ent/mutation.go
index 73ccbde..6fa15d3 100644
--- a/backend/internal/data/ent/mutation.go
+++ b/backend/internal/data/ent/mutation.go
@@ -9,21 +9,23 @@ import (
"sync"
"time"
+ "entgo.io/ent"
+ "entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
"github.com/hay-kot/homebox/backend/internal/data/ent/document"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
"github.com/hay-kot/homebox/backend/internal/data/ent/groupinvitationtoken"
"github.com/hay-kot/homebox/backend/internal/data/ent/item"
"github.com/hay-kot/homebox/backend/internal/data/ent/itemfield"
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
-
- "entgo.io/ent"
)
const (
@@ -36,15 +38,17 @@ const (
// Node types.
TypeAttachment = "Attachment"
+ TypeAuthRoles = "AuthRoles"
TypeAuthTokens = "AuthTokens"
TypeDocument = "Document"
- TypeDocumentToken = "DocumentToken"
TypeGroup = "Group"
TypeGroupInvitationToken = "GroupInvitationToken"
TypeItem = "Item"
TypeItemField = "ItemField"
TypeLabel = "Label"
TypeLocation = "Location"
+ TypeMaintenanceEntry = "MaintenanceEntry"
+ TypeNotifier = "Notifier"
TypeUser = "User"
)
@@ -57,6 +61,7 @@ type AttachmentMutation struct {
created_at *time.Time
updated_at *time.Time
_type *attachment.Type
+ primary *bool
clearedFields map[string]struct{}
item *uuid.UUID
cleareditem bool
@@ -279,6 +284,42 @@ func (m *AttachmentMutation) ResetType() {
m._type = nil
}
+// SetPrimary sets the "primary" field.
+func (m *AttachmentMutation) SetPrimary(b bool) {
+ m.primary = &b
+}
+
+// Primary returns the value of the "primary" field in the mutation.
+func (m *AttachmentMutation) Primary() (r bool, exists bool) {
+ v := m.primary
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldPrimary returns the old "primary" field's value of the Attachment entity.
+// If the Attachment object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *AttachmentMutation) OldPrimary(ctx context.Context) (v bool, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldPrimary is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldPrimary requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldPrimary: %w", err)
+ }
+ return oldValue.Primary, nil
+}
+
+// ResetPrimary resets all changes to the "primary" field.
+func (m *AttachmentMutation) ResetPrimary() {
+ m.primary = nil
+}
+
// SetItemID sets the "item" edge to the Item entity by id.
func (m *AttachmentMutation) SetItemID(id uuid.UUID) {
m.item = &id
@@ -362,11 +403,26 @@ func (m *AttachmentMutation) Where(ps ...predicate.Attachment) {
m.predicates = append(m.predicates, ps...)
}
+// WhereP appends storage-level predicates to the AttachmentMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *AttachmentMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.Attachment, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
// Op returns the operation name.
func (m *AttachmentMutation) Op() Op {
return m.op
}
+// SetOp allows setting the mutation operation.
+func (m *AttachmentMutation) SetOp(op Op) {
+ m.op = op
+}
+
// Type returns the node type of this mutation (Attachment).
func (m *AttachmentMutation) Type() string {
return m.typ
@@ -376,7 +432,7 @@ func (m *AttachmentMutation) Type() string {
// order to get all numeric fields that were incremented/decremented, call
// AddedFields().
func (m *AttachmentMutation) Fields() []string {
- fields := make([]string, 0, 3)
+ fields := make([]string, 0, 4)
if m.created_at != nil {
fields = append(fields, attachment.FieldCreatedAt)
}
@@ -386,6 +442,9 @@ func (m *AttachmentMutation) Fields() []string {
if m._type != nil {
fields = append(fields, attachment.FieldType)
}
+ if m.primary != nil {
+ fields = append(fields, attachment.FieldPrimary)
+ }
return fields
}
@@ -400,6 +459,8 @@ func (m *AttachmentMutation) Field(name string) (ent.Value, bool) {
return m.UpdatedAt()
case attachment.FieldType:
return m.GetType()
+ case attachment.FieldPrimary:
+ return m.Primary()
}
return nil, false
}
@@ -415,6 +476,8 @@ func (m *AttachmentMutation) OldField(ctx context.Context, name string) (ent.Val
return m.OldUpdatedAt(ctx)
case attachment.FieldType:
return m.OldType(ctx)
+ case attachment.FieldPrimary:
+ return m.OldPrimary(ctx)
}
return nil, fmt.Errorf("unknown Attachment field %s", name)
}
@@ -445,6 +508,13 @@ func (m *AttachmentMutation) SetField(name string, value ent.Value) error {
}
m.SetType(v)
return nil
+ case attachment.FieldPrimary:
+ v, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetPrimary(v)
+ return nil
}
return fmt.Errorf("unknown Attachment field %s", name)
}
@@ -503,6 +573,9 @@ func (m *AttachmentMutation) ResetField(name string) error {
case attachment.FieldType:
m.ResetType()
return nil
+ case attachment.FieldPrimary:
+ m.ResetPrimary()
+ return nil
}
return fmt.Errorf("unknown Attachment field %s", name)
}
@@ -599,6 +672,399 @@ func (m *AttachmentMutation) ResetEdge(name string) error {
return fmt.Errorf("unknown Attachment edge %s", name)
}
+// AuthRolesMutation represents an operation that mutates the AuthRoles nodes in the graph.
+type AuthRolesMutation struct {
+ config
+ op Op
+ typ string
+ id *int
+ role *authroles.Role
+ clearedFields map[string]struct{}
+ token *uuid.UUID
+ clearedtoken bool
+ done bool
+ oldValue func(context.Context) (*AuthRoles, error)
+ predicates []predicate.AuthRoles
+}
+
+var _ ent.Mutation = (*AuthRolesMutation)(nil)
+
+// authrolesOption allows management of the mutation configuration using functional options.
+type authrolesOption func(*AuthRolesMutation)
+
+// newAuthRolesMutation creates new mutation for the AuthRoles entity.
+func newAuthRolesMutation(c config, op Op, opts ...authrolesOption) *AuthRolesMutation {
+ m := &AuthRolesMutation{
+ config: c,
+ op: op,
+ typ: TypeAuthRoles,
+ clearedFields: make(map[string]struct{}),
+ }
+ for _, opt := range opts {
+ opt(m)
+ }
+ return m
+}
+
+// withAuthRolesID sets the ID field of the mutation.
+func withAuthRolesID(id int) authrolesOption {
+ return func(m *AuthRolesMutation) {
+ var (
+ err error
+ once sync.Once
+ value *AuthRoles
+ )
+ m.oldValue = func(ctx context.Context) (*AuthRoles, error) {
+ once.Do(func() {
+ if m.done {
+ err = errors.New("querying old values post mutation is not allowed")
+ } else {
+ value, err = m.Client().AuthRoles.Get(ctx, id)
+ }
+ })
+ return value, err
+ }
+ m.id = &id
+ }
+}
+
+// withAuthRoles sets the old AuthRoles of the mutation.
+func withAuthRoles(node *AuthRoles) authrolesOption {
+ return func(m *AuthRolesMutation) {
+ m.oldValue = func(context.Context) (*AuthRoles, error) {
+ return node, nil
+ }
+ m.id = &node.ID
+ }
+}
+
+// Client returns a new `ent.Client` from the mutation. If the mutation was
+// executed in a transaction (ent.Tx), a transactional client is returned.
+func (m AuthRolesMutation) Client() *Client {
+ client := &Client{config: m.config}
+ client.init()
+ return client
+}
+
+// Tx returns an `ent.Tx` for mutations that were executed in transactions;
+// it returns an error otherwise.
+func (m AuthRolesMutation) Tx() (*Tx, error) {
+ if _, ok := m.driver.(*txDriver); !ok {
+ return nil, errors.New("ent: mutation is not running in a transaction")
+ }
+ tx := &Tx{config: m.config}
+ tx.init()
+ return tx, nil
+}
+
+// ID returns the ID value in the mutation. Note that the ID is only available
+// if it was provided to the builder or after it was returned from the database.
+func (m *AuthRolesMutation) ID() (id int, exists bool) {
+ if m.id == nil {
+ return
+ }
+ return *m.id, true
+}
+
+// IDs queries the database and returns the entity ids that match the mutation's predicate.
+// That means, if the mutation is applied within a transaction with an isolation level such
+// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
+// or updated by the mutation.
+func (m *AuthRolesMutation) IDs(ctx context.Context) ([]int, error) {
+ switch {
+ case m.op.Is(OpUpdateOne | OpDeleteOne):
+ id, exists := m.ID()
+ if exists {
+ return []int{id}, nil
+ }
+ fallthrough
+ case m.op.Is(OpUpdate | OpDelete):
+ return m.Client().AuthRoles.Query().Where(m.predicates...).IDs(ctx)
+ default:
+ return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
+ }
+}
+
+// SetRole sets the "role" field.
+func (m *AuthRolesMutation) SetRole(a authroles.Role) {
+ m.role = &a
+}
+
+// Role returns the value of the "role" field in the mutation.
+func (m *AuthRolesMutation) Role() (r authroles.Role, exists bool) {
+ v := m.role
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldRole returns the old "role" field's value of the AuthRoles entity.
+// If the AuthRoles object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *AuthRolesMutation) OldRole(ctx context.Context) (v authroles.Role, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldRole is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldRole requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldRole: %w", err)
+ }
+ return oldValue.Role, nil
+}
+
+// ResetRole resets all changes to the "role" field.
+func (m *AuthRolesMutation) ResetRole() {
+ m.role = nil
+}
+
+// SetTokenID sets the "token" edge to the AuthTokens entity by id.
+func (m *AuthRolesMutation) SetTokenID(id uuid.UUID) {
+ m.token = &id
+}
+
+// ClearToken clears the "token" edge to the AuthTokens entity.
+func (m *AuthRolesMutation) ClearToken() {
+ m.clearedtoken = true
+}
+
+// TokenCleared reports if the "token" edge to the AuthTokens entity was cleared.
+func (m *AuthRolesMutation) TokenCleared() bool {
+ return m.clearedtoken
+}
+
+// TokenID returns the "token" edge ID in the mutation.
+func (m *AuthRolesMutation) TokenID() (id uuid.UUID, exists bool) {
+ if m.token != nil {
+ return *m.token, true
+ }
+ return
+}
+
+// TokenIDs returns the "token" edge IDs in the mutation.
+// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
+// TokenID instead. It exists only for internal usage by the builders.
+func (m *AuthRolesMutation) TokenIDs() (ids []uuid.UUID) {
+ if id := m.token; id != nil {
+ ids = append(ids, *id)
+ }
+ return
+}
+
+// ResetToken resets all changes to the "token" edge.
+func (m *AuthRolesMutation) ResetToken() {
+ m.token = nil
+ m.clearedtoken = false
+}
+
+// Where appends a list predicates to the AuthRolesMutation builder.
+func (m *AuthRolesMutation) Where(ps ...predicate.AuthRoles) {
+ m.predicates = append(m.predicates, ps...)
+}
+
+// WhereP appends storage-level predicates to the AuthRolesMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *AuthRolesMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.AuthRoles, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
+// Op returns the operation name.
+func (m *AuthRolesMutation) Op() Op {
+ return m.op
+}
+
+// SetOp allows setting the mutation operation.
+func (m *AuthRolesMutation) SetOp(op Op) {
+ m.op = op
+}
+
+// Type returns the node type of this mutation (AuthRoles).
+func (m *AuthRolesMutation) Type() string {
+ return m.typ
+}
+
+// Fields returns all fields that were changed during this mutation. Note that in
+// order to get all numeric fields that were incremented/decremented, call
+// AddedFields().
+func (m *AuthRolesMutation) Fields() []string {
+ fields := make([]string, 0, 1)
+ if m.role != nil {
+ fields = append(fields, authroles.FieldRole)
+ }
+ return fields
+}
+
+// Field returns the value of a field with the given name. The second boolean
+// return value indicates that this field was not set, or was not defined in the
+// schema.
+func (m *AuthRolesMutation) Field(name string) (ent.Value, bool) {
+ switch name {
+ case authroles.FieldRole:
+ return m.Role()
+ }
+ return nil, false
+}
+
+// OldField returns the old value of the field from the database. An error is
+// returned if the mutation operation is not UpdateOne, or the query to the
+// database failed.
+func (m *AuthRolesMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
+ switch name {
+ case authroles.FieldRole:
+ return m.OldRole(ctx)
+ }
+ return nil, fmt.Errorf("unknown AuthRoles field %s", name)
+}
+
+// SetField sets the value of a field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *AuthRolesMutation) SetField(name string, value ent.Value) error {
+ switch name {
+ case authroles.FieldRole:
+ v, ok := value.(authroles.Role)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetRole(v)
+ return nil
+ }
+ return fmt.Errorf("unknown AuthRoles field %s", name)
+}
+
+// AddedFields returns all numeric fields that were incremented/decremented during
+// this mutation.
+func (m *AuthRolesMutation) AddedFields() []string {
+ return nil
+}
+
+// AddedField returns the numeric value that was incremented/decremented on a field
+// with the given name. The second boolean return value indicates that this field
+// was not set, or was not defined in the schema.
+func (m *AuthRolesMutation) AddedField(name string) (ent.Value, bool) {
+ return nil, false
+}
+
+// AddField adds the value to the field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *AuthRolesMutation) AddField(name string, value ent.Value) error {
+ switch name {
+ }
+ return fmt.Errorf("unknown AuthRoles numeric field %s", name)
+}
+
+// ClearedFields returns all nullable fields that were cleared during this
+// mutation.
+func (m *AuthRolesMutation) ClearedFields() []string {
+ return nil
+}
+
+// FieldCleared returns a boolean indicating if a field with the given name was
+// cleared in this mutation.
+func (m *AuthRolesMutation) FieldCleared(name string) bool {
+ _, ok := m.clearedFields[name]
+ return ok
+}
+
+// ClearField clears the value of the field with the given name. It returns an
+// error if the field is not defined in the schema.
+func (m *AuthRolesMutation) ClearField(name string) error {
+ return fmt.Errorf("unknown AuthRoles nullable field %s", name)
+}
+
+// ResetField resets all changes in the mutation for the field with the given name.
+// It returns an error if the field is not defined in the schema.
+func (m *AuthRolesMutation) ResetField(name string) error {
+ switch name {
+ case authroles.FieldRole:
+ m.ResetRole()
+ return nil
+ }
+ return fmt.Errorf("unknown AuthRoles field %s", name)
+}
+
+// AddedEdges returns all edge names that were set/added in this mutation.
+func (m *AuthRolesMutation) AddedEdges() []string {
+ edges := make([]string, 0, 1)
+ if m.token != nil {
+ edges = append(edges, authroles.EdgeToken)
+ }
+ return edges
+}
+
+// AddedIDs returns all IDs (to other nodes) that were added for the given edge
+// name in this mutation.
+func (m *AuthRolesMutation) AddedIDs(name string) []ent.Value {
+ switch name {
+ case authroles.EdgeToken:
+ if id := m.token; id != nil {
+ return []ent.Value{*id}
+ }
+ }
+ return nil
+}
+
+// RemovedEdges returns all edge names that were removed in this mutation.
+func (m *AuthRolesMutation) RemovedEdges() []string {
+ edges := make([]string, 0, 1)
+ return edges
+}
+
+// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
+// the given name in this mutation.
+func (m *AuthRolesMutation) RemovedIDs(name string) []ent.Value {
+ return nil
+}
+
+// ClearedEdges returns all edge names that were cleared in this mutation.
+func (m *AuthRolesMutation) ClearedEdges() []string {
+ edges := make([]string, 0, 1)
+ if m.clearedtoken {
+ edges = append(edges, authroles.EdgeToken)
+ }
+ return edges
+}
+
+// EdgeCleared returns a boolean which indicates if the edge with the given name
+// was cleared in this mutation.
+func (m *AuthRolesMutation) EdgeCleared(name string) bool {
+ switch name {
+ case authroles.EdgeToken:
+ return m.clearedtoken
+ }
+ return false
+}
+
+// ClearEdge clears the value of the edge with the given name. It returns an error
+// if that edge is not defined in the schema.
+func (m *AuthRolesMutation) ClearEdge(name string) error {
+ switch name {
+ case authroles.EdgeToken:
+ m.ClearToken()
+ return nil
+ }
+ return fmt.Errorf("unknown AuthRoles unique edge %s", name)
+}
+
+// ResetEdge resets all changes to the edge with the given name in this mutation.
+// It returns an error if the edge is not defined in the schema.
+func (m *AuthRolesMutation) ResetEdge(name string) error {
+ switch name {
+ case authroles.EdgeToken:
+ m.ResetToken()
+ return nil
+ }
+ return fmt.Errorf("unknown AuthRoles edge %s", name)
+}
+
// AuthTokensMutation represents an operation that mutates the AuthTokens nodes in the graph.
type AuthTokensMutation struct {
config
@@ -612,6 +1078,8 @@ type AuthTokensMutation struct {
clearedFields map[string]struct{}
user *uuid.UUID
cleareduser bool
+ roles *int
+ clearedroles bool
done bool
oldValue func(context.Context) (*AuthTokens, error)
predicates []predicate.AuthTokens
@@ -904,16 +1372,70 @@ func (m *AuthTokensMutation) ResetUser() {
m.cleareduser = false
}
+// SetRolesID sets the "roles" edge to the AuthRoles entity by id.
+func (m *AuthTokensMutation) SetRolesID(id int) {
+ m.roles = &id
+}
+
+// ClearRoles clears the "roles" edge to the AuthRoles entity.
+func (m *AuthTokensMutation) ClearRoles() {
+ m.clearedroles = true
+}
+
+// RolesCleared reports if the "roles" edge to the AuthRoles entity was cleared.
+func (m *AuthTokensMutation) RolesCleared() bool {
+ return m.clearedroles
+}
+
+// RolesID returns the "roles" edge ID in the mutation.
+func (m *AuthTokensMutation) RolesID() (id int, exists bool) {
+ if m.roles != nil {
+ return *m.roles, true
+ }
+ return
+}
+
+// RolesIDs returns the "roles" edge IDs in the mutation.
+// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
+// RolesID instead. It exists only for internal usage by the builders.
+func (m *AuthTokensMutation) RolesIDs() (ids []int) {
+ if id := m.roles; id != nil {
+ ids = append(ids, *id)
+ }
+ return
+}
+
+// ResetRoles resets all changes to the "roles" edge.
+func (m *AuthTokensMutation) ResetRoles() {
+ m.roles = nil
+ m.clearedroles = false
+}
+
// Where appends a list predicates to the AuthTokensMutation builder.
func (m *AuthTokensMutation) Where(ps ...predicate.AuthTokens) {
m.predicates = append(m.predicates, ps...)
}
+// WhereP appends storage-level predicates to the AuthTokensMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *AuthTokensMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.AuthTokens, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
// Op returns the operation name.
func (m *AuthTokensMutation) Op() Op {
return m.op
}
+// SetOp allows setting the mutation operation.
+func (m *AuthTokensMutation) SetOp(op Op) {
+ m.op = op
+}
+
// Type returns the node type of this mutation (AuthTokens).
func (m *AuthTokensMutation) Type() string {
return m.typ
@@ -1073,10 +1595,13 @@ func (m *AuthTokensMutation) ResetField(name string) error {
// AddedEdges returns all edge names that were set/added in this mutation.
func (m *AuthTokensMutation) AddedEdges() []string {
- edges := make([]string, 0, 1)
+ edges := make([]string, 0, 2)
if m.user != nil {
edges = append(edges, authtokens.EdgeUser)
}
+ if m.roles != nil {
+ edges = append(edges, authtokens.EdgeRoles)
+ }
return edges
}
@@ -1088,13 +1613,17 @@ func (m *AuthTokensMutation) AddedIDs(name string) []ent.Value {
if id := m.user; id != nil {
return []ent.Value{*id}
}
+ case authtokens.EdgeRoles:
+ if id := m.roles; id != nil {
+ return []ent.Value{*id}
+ }
}
return nil
}
// RemovedEdges returns all edge names that were removed in this mutation.
func (m *AuthTokensMutation) RemovedEdges() []string {
- edges := make([]string, 0, 1)
+ edges := make([]string, 0, 2)
return edges
}
@@ -1106,10 +1635,13 @@ func (m *AuthTokensMutation) RemovedIDs(name string) []ent.Value {
// ClearedEdges returns all edge names that were cleared in this mutation.
func (m *AuthTokensMutation) ClearedEdges() []string {
- edges := make([]string, 0, 1)
+ edges := make([]string, 0, 2)
if m.cleareduser {
edges = append(edges, authtokens.EdgeUser)
}
+ if m.clearedroles {
+ edges = append(edges, authtokens.EdgeRoles)
+ }
return edges
}
@@ -1119,6 +1651,8 @@ func (m *AuthTokensMutation) EdgeCleared(name string) bool {
switch name {
case authtokens.EdgeUser:
return m.cleareduser
+ case authtokens.EdgeRoles:
+ return m.clearedroles
}
return false
}
@@ -1130,6 +1664,9 @@ func (m *AuthTokensMutation) ClearEdge(name string) error {
case authtokens.EdgeUser:
m.ClearUser()
return nil
+ case authtokens.EdgeRoles:
+ m.ClearRoles()
+ return nil
}
return fmt.Errorf("unknown AuthTokens unique edge %s", name)
}
@@ -1141,6 +1678,9 @@ func (m *AuthTokensMutation) ResetEdge(name string) error {
case authtokens.EdgeUser:
m.ResetUser()
return nil
+ case authtokens.EdgeRoles:
+ m.ResetRoles()
+ return nil
}
return fmt.Errorf("unknown AuthTokens edge %s", name)
}
@@ -1148,25 +1688,22 @@ func (m *AuthTokensMutation) ResetEdge(name string) error {
// DocumentMutation represents an operation that mutates the Document nodes in the graph.
type DocumentMutation struct {
config
- op Op
- typ string
- id *uuid.UUID
- created_at *time.Time
- updated_at *time.Time
- title *string
- _path *string
- clearedFields map[string]struct{}
- group *uuid.UUID
- clearedgroup bool
- document_tokens map[uuid.UUID]struct{}
- removeddocument_tokens map[uuid.UUID]struct{}
- cleareddocument_tokens bool
- attachments map[uuid.UUID]struct{}
- removedattachments map[uuid.UUID]struct{}
- clearedattachments bool
- done bool
- oldValue func(context.Context) (*Document, error)
- predicates []predicate.Document
+ op Op
+ typ string
+ id *uuid.UUID
+ created_at *time.Time
+ updated_at *time.Time
+ title *string
+ _path *string
+ clearedFields map[string]struct{}
+ group *uuid.UUID
+ clearedgroup bool
+ attachments map[uuid.UUID]struct{}
+ removedattachments map[uuid.UUID]struct{}
+ clearedattachments bool
+ done bool
+ oldValue func(context.Context) (*Document, error)
+ predicates []predicate.Document
}
var _ ent.Mutation = (*DocumentMutation)(nil)
@@ -1456,60 +1993,6 @@ func (m *DocumentMutation) ResetGroup() {
m.clearedgroup = false
}
-// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by ids.
-func (m *DocumentMutation) AddDocumentTokenIDs(ids ...uuid.UUID) {
- if m.document_tokens == nil {
- m.document_tokens = make(map[uuid.UUID]struct{})
- }
- for i := range ids {
- m.document_tokens[ids[i]] = struct{}{}
- }
-}
-
-// ClearDocumentTokens clears the "document_tokens" edge to the DocumentToken entity.
-func (m *DocumentMutation) ClearDocumentTokens() {
- m.cleareddocument_tokens = true
-}
-
-// DocumentTokensCleared reports if the "document_tokens" edge to the DocumentToken entity was cleared.
-func (m *DocumentMutation) DocumentTokensCleared() bool {
- return m.cleareddocument_tokens
-}
-
-// RemoveDocumentTokenIDs removes the "document_tokens" edge to the DocumentToken entity by IDs.
-func (m *DocumentMutation) RemoveDocumentTokenIDs(ids ...uuid.UUID) {
- if m.removeddocument_tokens == nil {
- m.removeddocument_tokens = make(map[uuid.UUID]struct{})
- }
- for i := range ids {
- delete(m.document_tokens, ids[i])
- m.removeddocument_tokens[ids[i]] = struct{}{}
- }
-}
-
-// RemovedDocumentTokens returns the removed IDs of the "document_tokens" edge to the DocumentToken entity.
-func (m *DocumentMutation) RemovedDocumentTokensIDs() (ids []uuid.UUID) {
- for id := range m.removeddocument_tokens {
- ids = append(ids, id)
- }
- return
-}
-
-// DocumentTokensIDs returns the "document_tokens" edge IDs in the mutation.
-func (m *DocumentMutation) DocumentTokensIDs() (ids []uuid.UUID) {
- for id := range m.document_tokens {
- ids = append(ids, id)
- }
- return
-}
-
-// ResetDocumentTokens resets all changes to the "document_tokens" edge.
-func (m *DocumentMutation) ResetDocumentTokens() {
- m.document_tokens = nil
- m.cleareddocument_tokens = false
- m.removeddocument_tokens = nil
-}
-
// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by ids.
func (m *DocumentMutation) AddAttachmentIDs(ids ...uuid.UUID) {
if m.attachments == nil {
@@ -1569,11 +2052,26 @@ func (m *DocumentMutation) Where(ps ...predicate.Document) {
m.predicates = append(m.predicates, ps...)
}
+// WhereP appends storage-level predicates to the DocumentMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *DocumentMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.Document, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
// Op returns the operation name.
func (m *DocumentMutation) Op() Op {
return m.op
}
+// SetOp allows setting the mutation operation.
+func (m *DocumentMutation) SetOp(op Op) {
+ m.op = op
+}
+
// Type returns the node type of this mutation (Document).
func (m *DocumentMutation) Type() string {
return m.typ
@@ -1733,13 +2231,10 @@ func (m *DocumentMutation) ResetField(name string) error {
// AddedEdges returns all edge names that were set/added in this mutation.
func (m *DocumentMutation) AddedEdges() []string {
- edges := make([]string, 0, 3)
+ edges := make([]string, 0, 2)
if m.group != nil {
edges = append(edges, document.EdgeGroup)
}
- if m.document_tokens != nil {
- edges = append(edges, document.EdgeDocumentTokens)
- }
if m.attachments != nil {
edges = append(edges, document.EdgeAttachments)
}
@@ -1754,12 +2249,6 @@ func (m *DocumentMutation) AddedIDs(name string) []ent.Value {
if id := m.group; id != nil {
return []ent.Value{*id}
}
- case document.EdgeDocumentTokens:
- ids := make([]ent.Value, 0, len(m.document_tokens))
- for id := range m.document_tokens {
- ids = append(ids, id)
- }
- return ids
case document.EdgeAttachments:
ids := make([]ent.Value, 0, len(m.attachments))
for id := range m.attachments {
@@ -1772,10 +2261,7 @@ func (m *DocumentMutation) AddedIDs(name string) []ent.Value {
// RemovedEdges returns all edge names that were removed in this mutation.
func (m *DocumentMutation) RemovedEdges() []string {
- edges := make([]string, 0, 3)
- if m.removeddocument_tokens != nil {
- edges = append(edges, document.EdgeDocumentTokens)
- }
+ edges := make([]string, 0, 2)
if m.removedattachments != nil {
edges = append(edges, document.EdgeAttachments)
}
@@ -1786,12 +2272,6 @@ func (m *DocumentMutation) RemovedEdges() []string {
// the given name in this mutation.
func (m *DocumentMutation) RemovedIDs(name string) []ent.Value {
switch name {
- case document.EdgeDocumentTokens:
- ids := make([]ent.Value, 0, len(m.removeddocument_tokens))
- for id := range m.removeddocument_tokens {
- ids = append(ids, id)
- }
- return ids
case document.EdgeAttachments:
ids := make([]ent.Value, 0, len(m.removedattachments))
for id := range m.removedattachments {
@@ -1804,13 +2284,10 @@ func (m *DocumentMutation) RemovedIDs(name string) []ent.Value {
// ClearedEdges returns all edge names that were cleared in this mutation.
func (m *DocumentMutation) ClearedEdges() []string {
- edges := make([]string, 0, 3)
+ edges := make([]string, 0, 2)
if m.clearedgroup {
edges = append(edges, document.EdgeGroup)
}
- if m.cleareddocument_tokens {
- edges = append(edges, document.EdgeDocumentTokens)
- }
if m.clearedattachments {
edges = append(edges, document.EdgeAttachments)
}
@@ -1823,8 +2300,6 @@ func (m *DocumentMutation) EdgeCleared(name string) bool {
switch name {
case document.EdgeGroup:
return m.clearedgroup
- case document.EdgeDocumentTokens:
- return m.cleareddocument_tokens
case document.EdgeAttachments:
return m.clearedattachments
}
@@ -1849,9 +2324,6 @@ func (m *DocumentMutation) ResetEdge(name string) error {
case document.EdgeGroup:
m.ResetGroup()
return nil
- case document.EdgeDocumentTokens:
- m.ResetDocumentTokens()
- return nil
case document.EdgeAttachments:
m.ResetAttachments()
return nil
@@ -1859,642 +2331,6 @@ func (m *DocumentMutation) ResetEdge(name string) error {
return fmt.Errorf("unknown Document edge %s", name)
}
-// DocumentTokenMutation represents an operation that mutates the DocumentToken nodes in the graph.
-type DocumentTokenMutation struct {
- config
- op Op
- typ string
- id *uuid.UUID
- created_at *time.Time
- updated_at *time.Time
- token *[]byte
- uses *int
- adduses *int
- expires_at *time.Time
- clearedFields map[string]struct{}
- document *uuid.UUID
- cleareddocument bool
- done bool
- oldValue func(context.Context) (*DocumentToken, error)
- predicates []predicate.DocumentToken
-}
-
-var _ ent.Mutation = (*DocumentTokenMutation)(nil)
-
-// documenttokenOption allows management of the mutation configuration using functional options.
-type documenttokenOption func(*DocumentTokenMutation)
-
-// newDocumentTokenMutation creates new mutation for the DocumentToken entity.
-func newDocumentTokenMutation(c config, op Op, opts ...documenttokenOption) *DocumentTokenMutation {
- m := &DocumentTokenMutation{
- config: c,
- op: op,
- typ: TypeDocumentToken,
- clearedFields: make(map[string]struct{}),
- }
- for _, opt := range opts {
- opt(m)
- }
- return m
-}
-
-// withDocumentTokenID sets the ID field of the mutation.
-func withDocumentTokenID(id uuid.UUID) documenttokenOption {
- return func(m *DocumentTokenMutation) {
- var (
- err error
- once sync.Once
- value *DocumentToken
- )
- m.oldValue = func(ctx context.Context) (*DocumentToken, error) {
- once.Do(func() {
- if m.done {
- err = errors.New("querying old values post mutation is not allowed")
- } else {
- value, err = m.Client().DocumentToken.Get(ctx, id)
- }
- })
- return value, err
- }
- m.id = &id
- }
-}
-
-// withDocumentToken sets the old DocumentToken of the mutation.
-func withDocumentToken(node *DocumentToken) documenttokenOption {
- return func(m *DocumentTokenMutation) {
- m.oldValue = func(context.Context) (*DocumentToken, error) {
- return node, nil
- }
- m.id = &node.ID
- }
-}
-
-// Client returns a new `ent.Client` from the mutation. If the mutation was
-// executed in a transaction (ent.Tx), a transactional client is returned.
-func (m DocumentTokenMutation) Client() *Client {
- client := &Client{config: m.config}
- client.init()
- return client
-}
-
-// Tx returns an `ent.Tx` for mutations that were executed in transactions;
-// it returns an error otherwise.
-func (m DocumentTokenMutation) Tx() (*Tx, error) {
- if _, ok := m.driver.(*txDriver); !ok {
- return nil, errors.New("ent: mutation is not running in a transaction")
- }
- tx := &Tx{config: m.config}
- tx.init()
- return tx, nil
-}
-
-// SetID sets the value of the id field. Note that this
-// operation is only accepted on creation of DocumentToken entities.
-func (m *DocumentTokenMutation) SetID(id uuid.UUID) {
- m.id = &id
-}
-
-// ID returns the ID value in the mutation. Note that the ID is only available
-// if it was provided to the builder or after it was returned from the database.
-func (m *DocumentTokenMutation) ID() (id uuid.UUID, exists bool) {
- if m.id == nil {
- return
- }
- return *m.id, true
-}
-
-// IDs queries the database and returns the entity ids that match the mutation's predicate.
-// That means, if the mutation is applied within a transaction with an isolation level such
-// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
-// or updated by the mutation.
-func (m *DocumentTokenMutation) IDs(ctx context.Context) ([]uuid.UUID, error) {
- switch {
- case m.op.Is(OpUpdateOne | OpDeleteOne):
- id, exists := m.ID()
- if exists {
- return []uuid.UUID{id}, nil
- }
- fallthrough
- case m.op.Is(OpUpdate | OpDelete):
- return m.Client().DocumentToken.Query().Where(m.predicates...).IDs(ctx)
- default:
- return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
- }
-}
-
-// SetCreatedAt sets the "created_at" field.
-func (m *DocumentTokenMutation) SetCreatedAt(t time.Time) {
- m.created_at = &t
-}
-
-// CreatedAt returns the value of the "created_at" field in the mutation.
-func (m *DocumentTokenMutation) CreatedAt() (r time.Time, exists bool) {
- v := m.created_at
- if v == nil {
- return
- }
- return *v, true
-}
-
-// OldCreatedAt returns the old "created_at" field's value of the DocumentToken entity.
-// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *DocumentTokenMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) {
- if !m.op.Is(OpUpdateOne) {
- return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations")
- }
- if m.id == nil || m.oldValue == nil {
- return v, errors.New("OldCreatedAt requires an ID field in the mutation")
- }
- oldValue, err := m.oldValue(ctx)
- if err != nil {
- return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err)
- }
- return oldValue.CreatedAt, nil
-}
-
-// ResetCreatedAt resets all changes to the "created_at" field.
-func (m *DocumentTokenMutation) ResetCreatedAt() {
- m.created_at = nil
-}
-
-// SetUpdatedAt sets the "updated_at" field.
-func (m *DocumentTokenMutation) SetUpdatedAt(t time.Time) {
- m.updated_at = &t
-}
-
-// UpdatedAt returns the value of the "updated_at" field in the mutation.
-func (m *DocumentTokenMutation) UpdatedAt() (r time.Time, exists bool) {
- v := m.updated_at
- if v == nil {
- return
- }
- return *v, true
-}
-
-// OldUpdatedAt returns the old "updated_at" field's value of the DocumentToken entity.
-// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *DocumentTokenMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) {
- if !m.op.Is(OpUpdateOne) {
- return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations")
- }
- if m.id == nil || m.oldValue == nil {
- return v, errors.New("OldUpdatedAt requires an ID field in the mutation")
- }
- oldValue, err := m.oldValue(ctx)
- if err != nil {
- return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err)
- }
- return oldValue.UpdatedAt, nil
-}
-
-// ResetUpdatedAt resets all changes to the "updated_at" field.
-func (m *DocumentTokenMutation) ResetUpdatedAt() {
- m.updated_at = nil
-}
-
-// SetToken sets the "token" field.
-func (m *DocumentTokenMutation) SetToken(b []byte) {
- m.token = &b
-}
-
-// Token returns the value of the "token" field in the mutation.
-func (m *DocumentTokenMutation) Token() (r []byte, exists bool) {
- v := m.token
- if v == nil {
- return
- }
- return *v, true
-}
-
-// OldToken returns the old "token" field's value of the DocumentToken entity.
-// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *DocumentTokenMutation) OldToken(ctx context.Context) (v []byte, err error) {
- if !m.op.Is(OpUpdateOne) {
- return v, errors.New("OldToken is only allowed on UpdateOne operations")
- }
- if m.id == nil || m.oldValue == nil {
- return v, errors.New("OldToken requires an ID field in the mutation")
- }
- oldValue, err := m.oldValue(ctx)
- if err != nil {
- return v, fmt.Errorf("querying old value for OldToken: %w", err)
- }
- return oldValue.Token, nil
-}
-
-// ResetToken resets all changes to the "token" field.
-func (m *DocumentTokenMutation) ResetToken() {
- m.token = nil
-}
-
-// SetUses sets the "uses" field.
-func (m *DocumentTokenMutation) SetUses(i int) {
- m.uses = &i
- m.adduses = nil
-}
-
-// Uses returns the value of the "uses" field in the mutation.
-func (m *DocumentTokenMutation) Uses() (r int, exists bool) {
- v := m.uses
- if v == nil {
- return
- }
- return *v, true
-}
-
-// OldUses returns the old "uses" field's value of the DocumentToken entity.
-// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *DocumentTokenMutation) OldUses(ctx context.Context) (v int, err error) {
- if !m.op.Is(OpUpdateOne) {
- return v, errors.New("OldUses is only allowed on UpdateOne operations")
- }
- if m.id == nil || m.oldValue == nil {
- return v, errors.New("OldUses requires an ID field in the mutation")
- }
- oldValue, err := m.oldValue(ctx)
- if err != nil {
- return v, fmt.Errorf("querying old value for OldUses: %w", err)
- }
- return oldValue.Uses, nil
-}
-
-// AddUses adds i to the "uses" field.
-func (m *DocumentTokenMutation) AddUses(i int) {
- if m.adduses != nil {
- *m.adduses += i
- } else {
- m.adduses = &i
- }
-}
-
-// AddedUses returns the value that was added to the "uses" field in this mutation.
-func (m *DocumentTokenMutation) AddedUses() (r int, exists bool) {
- v := m.adduses
- if v == nil {
- return
- }
- return *v, true
-}
-
-// ResetUses resets all changes to the "uses" field.
-func (m *DocumentTokenMutation) ResetUses() {
- m.uses = nil
- m.adduses = nil
-}
-
-// SetExpiresAt sets the "expires_at" field.
-func (m *DocumentTokenMutation) SetExpiresAt(t time.Time) {
- m.expires_at = &t
-}
-
-// ExpiresAt returns the value of the "expires_at" field in the mutation.
-func (m *DocumentTokenMutation) ExpiresAt() (r time.Time, exists bool) {
- v := m.expires_at
- if v == nil {
- return
- }
- return *v, true
-}
-
-// OldExpiresAt returns the old "expires_at" field's value of the DocumentToken entity.
-// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *DocumentTokenMutation) OldExpiresAt(ctx context.Context) (v time.Time, err error) {
- if !m.op.Is(OpUpdateOne) {
- return v, errors.New("OldExpiresAt is only allowed on UpdateOne operations")
- }
- if m.id == nil || m.oldValue == nil {
- return v, errors.New("OldExpiresAt requires an ID field in the mutation")
- }
- oldValue, err := m.oldValue(ctx)
- if err != nil {
- return v, fmt.Errorf("querying old value for OldExpiresAt: %w", err)
- }
- return oldValue.ExpiresAt, nil
-}
-
-// ResetExpiresAt resets all changes to the "expires_at" field.
-func (m *DocumentTokenMutation) ResetExpiresAt() {
- m.expires_at = nil
-}
-
-// SetDocumentID sets the "document" edge to the Document entity by id.
-func (m *DocumentTokenMutation) SetDocumentID(id uuid.UUID) {
- m.document = &id
-}
-
-// ClearDocument clears the "document" edge to the Document entity.
-func (m *DocumentTokenMutation) ClearDocument() {
- m.cleareddocument = true
-}
-
-// DocumentCleared reports if the "document" edge to the Document entity was cleared.
-func (m *DocumentTokenMutation) DocumentCleared() bool {
- return m.cleareddocument
-}
-
-// DocumentID returns the "document" edge ID in the mutation.
-func (m *DocumentTokenMutation) DocumentID() (id uuid.UUID, exists bool) {
- if m.document != nil {
- return *m.document, true
- }
- return
-}
-
-// DocumentIDs returns the "document" edge IDs in the mutation.
-// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
-// DocumentID instead. It exists only for internal usage by the builders.
-func (m *DocumentTokenMutation) DocumentIDs() (ids []uuid.UUID) {
- if id := m.document; id != nil {
- ids = append(ids, *id)
- }
- return
-}
-
-// ResetDocument resets all changes to the "document" edge.
-func (m *DocumentTokenMutation) ResetDocument() {
- m.document = nil
- m.cleareddocument = false
-}
-
-// Where appends a list predicates to the DocumentTokenMutation builder.
-func (m *DocumentTokenMutation) Where(ps ...predicate.DocumentToken) {
- m.predicates = append(m.predicates, ps...)
-}
-
-// Op returns the operation name.
-func (m *DocumentTokenMutation) Op() Op {
- return m.op
-}
-
-// Type returns the node type of this mutation (DocumentToken).
-func (m *DocumentTokenMutation) Type() string {
- return m.typ
-}
-
-// Fields returns all fields that were changed during this mutation. Note that in
-// order to get all numeric fields that were incremented/decremented, call
-// AddedFields().
-func (m *DocumentTokenMutation) Fields() []string {
- fields := make([]string, 0, 5)
- if m.created_at != nil {
- fields = append(fields, documenttoken.FieldCreatedAt)
- }
- if m.updated_at != nil {
- fields = append(fields, documenttoken.FieldUpdatedAt)
- }
- if m.token != nil {
- fields = append(fields, documenttoken.FieldToken)
- }
- if m.uses != nil {
- fields = append(fields, documenttoken.FieldUses)
- }
- if m.expires_at != nil {
- fields = append(fields, documenttoken.FieldExpiresAt)
- }
- return fields
-}
-
-// Field returns the value of a field with the given name. The second boolean
-// return value indicates that this field was not set, or was not defined in the
-// schema.
-func (m *DocumentTokenMutation) Field(name string) (ent.Value, bool) {
- switch name {
- case documenttoken.FieldCreatedAt:
- return m.CreatedAt()
- case documenttoken.FieldUpdatedAt:
- return m.UpdatedAt()
- case documenttoken.FieldToken:
- return m.Token()
- case documenttoken.FieldUses:
- return m.Uses()
- case documenttoken.FieldExpiresAt:
- return m.ExpiresAt()
- }
- return nil, false
-}
-
-// OldField returns the old value of the field from the database. An error is
-// returned if the mutation operation is not UpdateOne, or the query to the
-// database failed.
-func (m *DocumentTokenMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
- switch name {
- case documenttoken.FieldCreatedAt:
- return m.OldCreatedAt(ctx)
- case documenttoken.FieldUpdatedAt:
- return m.OldUpdatedAt(ctx)
- case documenttoken.FieldToken:
- return m.OldToken(ctx)
- case documenttoken.FieldUses:
- return m.OldUses(ctx)
- case documenttoken.FieldExpiresAt:
- return m.OldExpiresAt(ctx)
- }
- return nil, fmt.Errorf("unknown DocumentToken field %s", name)
-}
-
-// SetField sets the value of a field with the given name. It returns an error if
-// the field is not defined in the schema, or if the type mismatched the field
-// type.
-func (m *DocumentTokenMutation) SetField(name string, value ent.Value) error {
- switch name {
- case documenttoken.FieldCreatedAt:
- v, ok := value.(time.Time)
- if !ok {
- return fmt.Errorf("unexpected type %T for field %s", value, name)
- }
- m.SetCreatedAt(v)
- return nil
- case documenttoken.FieldUpdatedAt:
- v, ok := value.(time.Time)
- if !ok {
- return fmt.Errorf("unexpected type %T for field %s", value, name)
- }
- m.SetUpdatedAt(v)
- return nil
- case documenttoken.FieldToken:
- v, ok := value.([]byte)
- if !ok {
- return fmt.Errorf("unexpected type %T for field %s", value, name)
- }
- m.SetToken(v)
- return nil
- case documenttoken.FieldUses:
- v, ok := value.(int)
- if !ok {
- return fmt.Errorf("unexpected type %T for field %s", value, name)
- }
- m.SetUses(v)
- return nil
- case documenttoken.FieldExpiresAt:
- v, ok := value.(time.Time)
- if !ok {
- return fmt.Errorf("unexpected type %T for field %s", value, name)
- }
- m.SetExpiresAt(v)
- return nil
- }
- return fmt.Errorf("unknown DocumentToken field %s", name)
-}
-
-// AddedFields returns all numeric fields that were incremented/decremented during
-// this mutation.
-func (m *DocumentTokenMutation) AddedFields() []string {
- var fields []string
- if m.adduses != nil {
- fields = append(fields, documenttoken.FieldUses)
- }
- return fields
-}
-
-// AddedField returns the numeric value that was incremented/decremented on a field
-// with the given name. The second boolean return value indicates that this field
-// was not set, or was not defined in the schema.
-func (m *DocumentTokenMutation) AddedField(name string) (ent.Value, bool) {
- switch name {
- case documenttoken.FieldUses:
- return m.AddedUses()
- }
- return nil, false
-}
-
-// AddField adds the value to the field with the given name. It returns an error if
-// the field is not defined in the schema, or if the type mismatched the field
-// type.
-func (m *DocumentTokenMutation) AddField(name string, value ent.Value) error {
- switch name {
- case documenttoken.FieldUses:
- v, ok := value.(int)
- if !ok {
- return fmt.Errorf("unexpected type %T for field %s", value, name)
- }
- m.AddUses(v)
- return nil
- }
- return fmt.Errorf("unknown DocumentToken numeric field %s", name)
-}
-
-// ClearedFields returns all nullable fields that were cleared during this
-// mutation.
-func (m *DocumentTokenMutation) ClearedFields() []string {
- return nil
-}
-
-// FieldCleared returns a boolean indicating if a field with the given name was
-// cleared in this mutation.
-func (m *DocumentTokenMutation) FieldCleared(name string) bool {
- _, ok := m.clearedFields[name]
- return ok
-}
-
-// ClearField clears the value of the field with the given name. It returns an
-// error if the field is not defined in the schema.
-func (m *DocumentTokenMutation) ClearField(name string) error {
- return fmt.Errorf("unknown DocumentToken nullable field %s", name)
-}
-
-// ResetField resets all changes in the mutation for the field with the given name.
-// It returns an error if the field is not defined in the schema.
-func (m *DocumentTokenMutation) ResetField(name string) error {
- switch name {
- case documenttoken.FieldCreatedAt:
- m.ResetCreatedAt()
- return nil
- case documenttoken.FieldUpdatedAt:
- m.ResetUpdatedAt()
- return nil
- case documenttoken.FieldToken:
- m.ResetToken()
- return nil
- case documenttoken.FieldUses:
- m.ResetUses()
- return nil
- case documenttoken.FieldExpiresAt:
- m.ResetExpiresAt()
- return nil
- }
- return fmt.Errorf("unknown DocumentToken field %s", name)
-}
-
-// AddedEdges returns all edge names that were set/added in this mutation.
-func (m *DocumentTokenMutation) AddedEdges() []string {
- edges := make([]string, 0, 1)
- if m.document != nil {
- edges = append(edges, documenttoken.EdgeDocument)
- }
- return edges
-}
-
-// AddedIDs returns all IDs (to other nodes) that were added for the given edge
-// name in this mutation.
-func (m *DocumentTokenMutation) AddedIDs(name string) []ent.Value {
- switch name {
- case documenttoken.EdgeDocument:
- if id := m.document; id != nil {
- return []ent.Value{*id}
- }
- }
- return nil
-}
-
-// RemovedEdges returns all edge names that were removed in this mutation.
-func (m *DocumentTokenMutation) RemovedEdges() []string {
- edges := make([]string, 0, 1)
- return edges
-}
-
-// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
-// the given name in this mutation.
-func (m *DocumentTokenMutation) RemovedIDs(name string) []ent.Value {
- return nil
-}
-
-// ClearedEdges returns all edge names that were cleared in this mutation.
-func (m *DocumentTokenMutation) ClearedEdges() []string {
- edges := make([]string, 0, 1)
- if m.cleareddocument {
- edges = append(edges, documenttoken.EdgeDocument)
- }
- return edges
-}
-
-// EdgeCleared returns a boolean which indicates if the edge with the given name
-// was cleared in this mutation.
-func (m *DocumentTokenMutation) EdgeCleared(name string) bool {
- switch name {
- case documenttoken.EdgeDocument:
- return m.cleareddocument
- }
- return false
-}
-
-// ClearEdge clears the value of the edge with the given name. It returns an error
-// if that edge is not defined in the schema.
-func (m *DocumentTokenMutation) ClearEdge(name string) error {
- switch name {
- case documenttoken.EdgeDocument:
- m.ClearDocument()
- return nil
- }
- return fmt.Errorf("unknown DocumentToken unique edge %s", name)
-}
-
-// ResetEdge resets all changes to the edge with the given name in this mutation.
-// It returns an error if the edge is not defined in the schema.
-func (m *DocumentTokenMutation) ResetEdge(name string) error {
- switch name {
- case documenttoken.EdgeDocument:
- m.ResetDocument()
- return nil
- }
- return fmt.Errorf("unknown DocumentToken edge %s", name)
-}
-
// GroupMutation represents an operation that mutates the Group nodes in the graph.
type GroupMutation struct {
config
@@ -2504,7 +2340,7 @@ type GroupMutation struct {
created_at *time.Time
updated_at *time.Time
name *string
- currency *group.Currency
+ currency *string
clearedFields map[string]struct{}
users map[uuid.UUID]struct{}
removedusers map[uuid.UUID]struct{}
@@ -2524,6 +2360,9 @@ type GroupMutation struct {
invitation_tokens map[uuid.UUID]struct{}
removedinvitation_tokens map[uuid.UUID]struct{}
clearedinvitation_tokens bool
+ notifiers map[uuid.UUID]struct{}
+ removednotifiers map[uuid.UUID]struct{}
+ clearednotifiers bool
done bool
oldValue func(context.Context) (*Group, error)
predicates []predicate.Group
@@ -2742,12 +2581,12 @@ func (m *GroupMutation) ResetName() {
}
// SetCurrency sets the "currency" field.
-func (m *GroupMutation) SetCurrency(gr group.Currency) {
- m.currency = &gr
+func (m *GroupMutation) SetCurrency(s string) {
+ m.currency = &s
}
// Currency returns the value of the "currency" field in the mutation.
-func (m *GroupMutation) Currency() (r group.Currency, exists bool) {
+func (m *GroupMutation) Currency() (r string, exists bool) {
v := m.currency
if v == nil {
return
@@ -2758,7 +2597,7 @@ func (m *GroupMutation) Currency() (r group.Currency, exists bool) {
// OldCurrency returns the old "currency" field's value of the Group entity.
// If the Group object wasn't provided to the builder, the object is fetched from the database.
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *GroupMutation) OldCurrency(ctx context.Context) (v group.Currency, err error) {
+func (m *GroupMutation) OldCurrency(ctx context.Context) (v string, err error) {
if !m.op.Is(OpUpdateOne) {
return v, errors.New("OldCurrency is only allowed on UpdateOne operations")
}
@@ -3101,16 +2940,85 @@ func (m *GroupMutation) ResetInvitationTokens() {
m.removedinvitation_tokens = nil
}
+// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by ids.
+func (m *GroupMutation) AddNotifierIDs(ids ...uuid.UUID) {
+ if m.notifiers == nil {
+ m.notifiers = make(map[uuid.UUID]struct{})
+ }
+ for i := range ids {
+ m.notifiers[ids[i]] = struct{}{}
+ }
+}
+
+// ClearNotifiers clears the "notifiers" edge to the Notifier entity.
+func (m *GroupMutation) ClearNotifiers() {
+ m.clearednotifiers = true
+}
+
+// NotifiersCleared reports if the "notifiers" edge to the Notifier entity was cleared.
+func (m *GroupMutation) NotifiersCleared() bool {
+ return m.clearednotifiers
+}
+
+// RemoveNotifierIDs removes the "notifiers" edge to the Notifier entity by IDs.
+func (m *GroupMutation) RemoveNotifierIDs(ids ...uuid.UUID) {
+ if m.removednotifiers == nil {
+ m.removednotifiers = make(map[uuid.UUID]struct{})
+ }
+ for i := range ids {
+ delete(m.notifiers, ids[i])
+ m.removednotifiers[ids[i]] = struct{}{}
+ }
+}
+
+// RemovedNotifiers returns the removed IDs of the "notifiers" edge to the Notifier entity.
+func (m *GroupMutation) RemovedNotifiersIDs() (ids []uuid.UUID) {
+ for id := range m.removednotifiers {
+ ids = append(ids, id)
+ }
+ return
+}
+
+// NotifiersIDs returns the "notifiers" edge IDs in the mutation.
+func (m *GroupMutation) NotifiersIDs() (ids []uuid.UUID) {
+ for id := range m.notifiers {
+ ids = append(ids, id)
+ }
+ return
+}
+
+// ResetNotifiers resets all changes to the "notifiers" edge.
+func (m *GroupMutation) ResetNotifiers() {
+ m.notifiers = nil
+ m.clearednotifiers = false
+ m.removednotifiers = nil
+}
+
// Where appends a list predicates to the GroupMutation builder.
func (m *GroupMutation) Where(ps ...predicate.Group) {
m.predicates = append(m.predicates, ps...)
}
+// WhereP appends storage-level predicates to the GroupMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *GroupMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.Group, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
// Op returns the operation name.
func (m *GroupMutation) Op() Op {
return m.op
}
+// SetOp allows setting the mutation operation.
+func (m *GroupMutation) SetOp(op Op) {
+ m.op = op
+}
+
// Type returns the node type of this mutation (Group).
func (m *GroupMutation) Type() string {
return m.typ
@@ -3197,7 +3105,7 @@ func (m *GroupMutation) SetField(name string, value ent.Value) error {
m.SetName(v)
return nil
case group.FieldCurrency:
- v, ok := value.(group.Currency)
+ v, ok := value.(string)
if !ok {
return fmt.Errorf("unexpected type %T for field %s", value, name)
}
@@ -3270,7 +3178,7 @@ func (m *GroupMutation) ResetField(name string) error {
// AddedEdges returns all edge names that were set/added in this mutation.
func (m *GroupMutation) AddedEdges() []string {
- edges := make([]string, 0, 6)
+ edges := make([]string, 0, 7)
if m.users != nil {
edges = append(edges, group.EdgeUsers)
}
@@ -3289,6 +3197,9 @@ func (m *GroupMutation) AddedEdges() []string {
if m.invitation_tokens != nil {
edges = append(edges, group.EdgeInvitationTokens)
}
+ if m.notifiers != nil {
+ edges = append(edges, group.EdgeNotifiers)
+ }
return edges
}
@@ -3332,13 +3243,19 @@ func (m *GroupMutation) AddedIDs(name string) []ent.Value {
ids = append(ids, id)
}
return ids
+ case group.EdgeNotifiers:
+ ids := make([]ent.Value, 0, len(m.notifiers))
+ for id := range m.notifiers {
+ ids = append(ids, id)
+ }
+ return ids
}
return nil
}
// RemovedEdges returns all edge names that were removed in this mutation.
func (m *GroupMutation) RemovedEdges() []string {
- edges := make([]string, 0, 6)
+ edges := make([]string, 0, 7)
if m.removedusers != nil {
edges = append(edges, group.EdgeUsers)
}
@@ -3357,6 +3274,9 @@ func (m *GroupMutation) RemovedEdges() []string {
if m.removedinvitation_tokens != nil {
edges = append(edges, group.EdgeInvitationTokens)
}
+ if m.removednotifiers != nil {
+ edges = append(edges, group.EdgeNotifiers)
+ }
return edges
}
@@ -3400,13 +3320,19 @@ func (m *GroupMutation) RemovedIDs(name string) []ent.Value {
ids = append(ids, id)
}
return ids
+ case group.EdgeNotifiers:
+ ids := make([]ent.Value, 0, len(m.removednotifiers))
+ for id := range m.removednotifiers {
+ ids = append(ids, id)
+ }
+ return ids
}
return nil
}
// ClearedEdges returns all edge names that were cleared in this mutation.
func (m *GroupMutation) ClearedEdges() []string {
- edges := make([]string, 0, 6)
+ edges := make([]string, 0, 7)
if m.clearedusers {
edges = append(edges, group.EdgeUsers)
}
@@ -3425,6 +3351,9 @@ func (m *GroupMutation) ClearedEdges() []string {
if m.clearedinvitation_tokens {
edges = append(edges, group.EdgeInvitationTokens)
}
+ if m.clearednotifiers {
+ edges = append(edges, group.EdgeNotifiers)
+ }
return edges
}
@@ -3444,6 +3373,8 @@ func (m *GroupMutation) EdgeCleared(name string) bool {
return m.cleareddocuments
case group.EdgeInvitationTokens:
return m.clearedinvitation_tokens
+ case group.EdgeNotifiers:
+ return m.clearednotifiers
}
return false
}
@@ -3478,6 +3409,9 @@ func (m *GroupMutation) ResetEdge(name string) error {
case group.EdgeInvitationTokens:
m.ResetInvitationTokens()
return nil
+ case group.EdgeNotifiers:
+ m.ResetNotifiers()
+ return nil
}
return fmt.Errorf("unknown Group edge %s", name)
}
@@ -3850,11 +3784,26 @@ func (m *GroupInvitationTokenMutation) Where(ps ...predicate.GroupInvitationToke
m.predicates = append(m.predicates, ps...)
}
+// WhereP appends storage-level predicates to the GroupInvitationTokenMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *GroupInvitationTokenMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.GroupInvitationToken, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
// Op returns the operation name.
func (m *GroupInvitationTokenMutation) Op() Op {
return m.op
}
+// SetOp allows setting the mutation operation.
+func (m *GroupInvitationTokenMutation) SetOp(op Op) {
+ m.op = op
+}
+
// Type returns the node type of this mutation (GroupInvitationToken).
func (m *GroupInvitationTokenMutation) Type() string {
return m.typ
@@ -4121,56 +4070,61 @@ func (m *GroupInvitationTokenMutation) ResetEdge(name string) error {
// ItemMutation represents an operation that mutates the Item nodes in the graph.
type ItemMutation struct {
config
- op Op
- typ string
- id *uuid.UUID
- created_at *time.Time
- updated_at *time.Time
- name *string
- description *string
- import_ref *string
- notes *string
- quantity *int
- addquantity *int
- insured *bool
- archived *bool
- serial_number *string
- model_number *string
- manufacturer *string
- lifetime_warranty *bool
- warranty_expires *time.Time
- warranty_details *string
- purchase_time *time.Time
- purchase_from *string
- purchase_price *float64
- addpurchase_price *float64
- sold_time *time.Time
- sold_to *string
- sold_price *float64
- addsold_price *float64
- sold_notes *string
- clearedFields map[string]struct{}
- parent *uuid.UUID
- clearedparent bool
- children map[uuid.UUID]struct{}
- removedchildren map[uuid.UUID]struct{}
- clearedchildren bool
- group *uuid.UUID
- clearedgroup bool
- label map[uuid.UUID]struct{}
- removedlabel map[uuid.UUID]struct{}
- clearedlabel bool
- location *uuid.UUID
- clearedlocation bool
- fields map[uuid.UUID]struct{}
- removedfields map[uuid.UUID]struct{}
- clearedfields bool
- attachments map[uuid.UUID]struct{}
- removedattachments map[uuid.UUID]struct{}
- clearedattachments bool
- done bool
- oldValue func(context.Context) (*Item, error)
- predicates []predicate.Item
+ op Op
+ typ string
+ id *uuid.UUID
+ created_at *time.Time
+ updated_at *time.Time
+ name *string
+ description *string
+ import_ref *string
+ notes *string
+ quantity *int
+ addquantity *int
+ insured *bool
+ archived *bool
+ asset_id *int
+ addasset_id *int
+ serial_number *string
+ model_number *string
+ manufacturer *string
+ lifetime_warranty *bool
+ warranty_expires *time.Time
+ warranty_details *string
+ purchase_time *time.Time
+ purchase_from *string
+ purchase_price *float64
+ addpurchase_price *float64
+ sold_time *time.Time
+ sold_to *string
+ sold_price *float64
+ addsold_price *float64
+ sold_notes *string
+ clearedFields map[string]struct{}
+ group *uuid.UUID
+ clearedgroup bool
+ parent *uuid.UUID
+ clearedparent bool
+ children map[uuid.UUID]struct{}
+ removedchildren map[uuid.UUID]struct{}
+ clearedchildren bool
+ label map[uuid.UUID]struct{}
+ removedlabel map[uuid.UUID]struct{}
+ clearedlabel bool
+ location *uuid.UUID
+ clearedlocation bool
+ fields map[uuid.UUID]struct{}
+ removedfields map[uuid.UUID]struct{}
+ clearedfields bool
+ maintenance_entries map[uuid.UUID]struct{}
+ removedmaintenance_entries map[uuid.UUID]struct{}
+ clearedmaintenance_entries bool
+ attachments map[uuid.UUID]struct{}
+ removedattachments map[uuid.UUID]struct{}
+ clearedattachments bool
+ done bool
+ oldValue func(context.Context) (*Item, error)
+ predicates []predicate.Item
}
var _ ent.Mutation = (*ItemMutation)(nil)
@@ -4660,6 +4614,62 @@ func (m *ItemMutation) ResetArchived() {
m.archived = nil
}
+// SetAssetID sets the "asset_id" field.
+func (m *ItemMutation) SetAssetID(i int) {
+ m.asset_id = &i
+ m.addasset_id = nil
+}
+
+// AssetID returns the value of the "asset_id" field in the mutation.
+func (m *ItemMutation) AssetID() (r int, exists bool) {
+ v := m.asset_id
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldAssetID returns the old "asset_id" field's value of the Item entity.
+// If the Item object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ItemMutation) OldAssetID(ctx context.Context) (v int, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldAssetID is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldAssetID requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldAssetID: %w", err)
+ }
+ return oldValue.AssetID, nil
+}
+
+// AddAssetID adds i to the "asset_id" field.
+func (m *ItemMutation) AddAssetID(i int) {
+ if m.addasset_id != nil {
+ *m.addasset_id += i
+ } else {
+ m.addasset_id = &i
+ }
+}
+
+// AddedAssetID returns the value that was added to the "asset_id" field in this mutation.
+func (m *ItemMutation) AddedAssetID() (r int, exists bool) {
+ v := m.addasset_id
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// ResetAssetID resets all changes to the "asset_id" field.
+func (m *ItemMutation) ResetAssetID() {
+ m.asset_id = nil
+ m.addasset_id = nil
+}
+
// SetSerialNumber sets the "serial_number" field.
func (m *ItemMutation) SetSerialNumber(s string) {
m.serial_number = &s
@@ -5298,6 +5308,45 @@ func (m *ItemMutation) ResetSoldNotes() {
delete(m.clearedFields, item.FieldSoldNotes)
}
+// SetGroupID sets the "group" edge to the Group entity by id.
+func (m *ItemMutation) SetGroupID(id uuid.UUID) {
+ m.group = &id
+}
+
+// ClearGroup clears the "group" edge to the Group entity.
+func (m *ItemMutation) ClearGroup() {
+ m.clearedgroup = true
+}
+
+// GroupCleared reports if the "group" edge to the Group entity was cleared.
+func (m *ItemMutation) GroupCleared() bool {
+ return m.clearedgroup
+}
+
+// GroupID returns the "group" edge ID in the mutation.
+func (m *ItemMutation) GroupID() (id uuid.UUID, exists bool) {
+ if m.group != nil {
+ return *m.group, true
+ }
+ return
+}
+
+// GroupIDs returns the "group" edge IDs in the mutation.
+// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
+// GroupID instead. It exists only for internal usage by the builders.
+func (m *ItemMutation) GroupIDs() (ids []uuid.UUID) {
+ if id := m.group; id != nil {
+ ids = append(ids, *id)
+ }
+ return
+}
+
+// ResetGroup resets all changes to the "group" edge.
+func (m *ItemMutation) ResetGroup() {
+ m.group = nil
+ m.clearedgroup = false
+}
+
// SetParentID sets the "parent" edge to the Item entity by id.
func (m *ItemMutation) SetParentID(id uuid.UUID) {
m.parent = &id
@@ -5391,45 +5440,6 @@ func (m *ItemMutation) ResetChildren() {
m.removedchildren = nil
}
-// SetGroupID sets the "group" edge to the Group entity by id.
-func (m *ItemMutation) SetGroupID(id uuid.UUID) {
- m.group = &id
-}
-
-// ClearGroup clears the "group" edge to the Group entity.
-func (m *ItemMutation) ClearGroup() {
- m.clearedgroup = true
-}
-
-// GroupCleared reports if the "group" edge to the Group entity was cleared.
-func (m *ItemMutation) GroupCleared() bool {
- return m.clearedgroup
-}
-
-// GroupID returns the "group" edge ID in the mutation.
-func (m *ItemMutation) GroupID() (id uuid.UUID, exists bool) {
- if m.group != nil {
- return *m.group, true
- }
- return
-}
-
-// GroupIDs returns the "group" edge IDs in the mutation.
-// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
-// GroupID instead. It exists only for internal usage by the builders.
-func (m *ItemMutation) GroupIDs() (ids []uuid.UUID) {
- if id := m.group; id != nil {
- ids = append(ids, *id)
- }
- return
-}
-
-// ResetGroup resets all changes to the "group" edge.
-func (m *ItemMutation) ResetGroup() {
- m.group = nil
- m.clearedgroup = false
-}
-
// AddLabelIDs adds the "label" edge to the Label entity by ids.
func (m *ItemMutation) AddLabelIDs(ids ...uuid.UUID) {
if m.label == nil {
@@ -5577,6 +5587,60 @@ func (m *ItemMutation) ResetFields() {
m.removedfields = nil
}
+// AddMaintenanceEntryIDs adds the "maintenance_entries" edge to the MaintenanceEntry entity by ids.
+func (m *ItemMutation) AddMaintenanceEntryIDs(ids ...uuid.UUID) {
+ if m.maintenance_entries == nil {
+ m.maintenance_entries = make(map[uuid.UUID]struct{})
+ }
+ for i := range ids {
+ m.maintenance_entries[ids[i]] = struct{}{}
+ }
+}
+
+// ClearMaintenanceEntries clears the "maintenance_entries" edge to the MaintenanceEntry entity.
+func (m *ItemMutation) ClearMaintenanceEntries() {
+ m.clearedmaintenance_entries = true
+}
+
+// MaintenanceEntriesCleared reports if the "maintenance_entries" edge to the MaintenanceEntry entity was cleared.
+func (m *ItemMutation) MaintenanceEntriesCleared() bool {
+ return m.clearedmaintenance_entries
+}
+
+// RemoveMaintenanceEntryIDs removes the "maintenance_entries" edge to the MaintenanceEntry entity by IDs.
+func (m *ItemMutation) RemoveMaintenanceEntryIDs(ids ...uuid.UUID) {
+ if m.removedmaintenance_entries == nil {
+ m.removedmaintenance_entries = make(map[uuid.UUID]struct{})
+ }
+ for i := range ids {
+ delete(m.maintenance_entries, ids[i])
+ m.removedmaintenance_entries[ids[i]] = struct{}{}
+ }
+}
+
+// RemovedMaintenanceEntries returns the removed IDs of the "maintenance_entries" edge to the MaintenanceEntry entity.
+func (m *ItemMutation) RemovedMaintenanceEntriesIDs() (ids []uuid.UUID) {
+ for id := range m.removedmaintenance_entries {
+ ids = append(ids, id)
+ }
+ return
+}
+
+// MaintenanceEntriesIDs returns the "maintenance_entries" edge IDs in the mutation.
+func (m *ItemMutation) MaintenanceEntriesIDs() (ids []uuid.UUID) {
+ for id := range m.maintenance_entries {
+ ids = append(ids, id)
+ }
+ return
+}
+
+// ResetMaintenanceEntries resets all changes to the "maintenance_entries" edge.
+func (m *ItemMutation) ResetMaintenanceEntries() {
+ m.maintenance_entries = nil
+ m.clearedmaintenance_entries = false
+ m.removedmaintenance_entries = nil
+}
+
// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by ids.
func (m *ItemMutation) AddAttachmentIDs(ids ...uuid.UUID) {
if m.attachments == nil {
@@ -5636,11 +5700,26 @@ func (m *ItemMutation) Where(ps ...predicate.Item) {
m.predicates = append(m.predicates, ps...)
}
+// WhereP appends storage-level predicates to the ItemMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *ItemMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.Item, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
// Op returns the operation name.
func (m *ItemMutation) Op() Op {
return m.op
}
+// SetOp allows setting the mutation operation.
+func (m *ItemMutation) SetOp(op Op) {
+ m.op = op
+}
+
// Type returns the node type of this mutation (Item).
func (m *ItemMutation) Type() string {
return m.typ
@@ -5650,7 +5729,7 @@ func (m *ItemMutation) Type() string {
// order to get all numeric fields that were incremented/decremented, call
// AddedFields().
func (m *ItemMutation) Fields() []string {
- fields := make([]string, 0, 22)
+ fields := make([]string, 0, 23)
if m.created_at != nil {
fields = append(fields, item.FieldCreatedAt)
}
@@ -5678,6 +5757,9 @@ func (m *ItemMutation) Fields() []string {
if m.archived != nil {
fields = append(fields, item.FieldArchived)
}
+ if m.asset_id != nil {
+ fields = append(fields, item.FieldAssetID)
+ }
if m.serial_number != nil {
fields = append(fields, item.FieldSerialNumber)
}
@@ -5743,6 +5825,8 @@ func (m *ItemMutation) Field(name string) (ent.Value, bool) {
return m.Insured()
case item.FieldArchived:
return m.Archived()
+ case item.FieldAssetID:
+ return m.AssetID()
case item.FieldSerialNumber:
return m.SerialNumber()
case item.FieldModelNumber:
@@ -5796,6 +5880,8 @@ func (m *ItemMutation) OldField(ctx context.Context, name string) (ent.Value, er
return m.OldInsured(ctx)
case item.FieldArchived:
return m.OldArchived(ctx)
+ case item.FieldAssetID:
+ return m.OldAssetID(ctx)
case item.FieldSerialNumber:
return m.OldSerialNumber(ctx)
case item.FieldModelNumber:
@@ -5894,6 +5980,13 @@ func (m *ItemMutation) SetField(name string, value ent.Value) error {
}
m.SetArchived(v)
return nil
+ case item.FieldAssetID:
+ v, ok := value.(int)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetAssetID(v)
+ return nil
case item.FieldSerialNumber:
v, ok := value.(string)
if !ok {
@@ -5996,6 +6089,9 @@ func (m *ItemMutation) AddedFields() []string {
if m.addquantity != nil {
fields = append(fields, item.FieldQuantity)
}
+ if m.addasset_id != nil {
+ fields = append(fields, item.FieldAssetID)
+ }
if m.addpurchase_price != nil {
fields = append(fields, item.FieldPurchasePrice)
}
@@ -6012,6 +6108,8 @@ func (m *ItemMutation) AddedField(name string) (ent.Value, bool) {
switch name {
case item.FieldQuantity:
return m.AddedQuantity()
+ case item.FieldAssetID:
+ return m.AddedAssetID()
case item.FieldPurchasePrice:
return m.AddedPurchasePrice()
case item.FieldSoldPrice:
@@ -6032,6 +6130,13 @@ func (m *ItemMutation) AddField(name string, value ent.Value) error {
}
m.AddQuantity(v)
return nil
+ case item.FieldAssetID:
+ v, ok := value.(int)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.AddAssetID(v)
+ return nil
case item.FieldPurchasePrice:
v, ok := value.(float64)
if !ok {
@@ -6181,6 +6286,9 @@ func (m *ItemMutation) ResetField(name string) error {
case item.FieldArchived:
m.ResetArchived()
return nil
+ case item.FieldAssetID:
+ m.ResetAssetID()
+ return nil
case item.FieldSerialNumber:
m.ResetSerialNumber()
return nil
@@ -6226,16 +6334,16 @@ func (m *ItemMutation) ResetField(name string) error {
// AddedEdges returns all edge names that were set/added in this mutation.
func (m *ItemMutation) AddedEdges() []string {
- edges := make([]string, 0, 7)
+ edges := make([]string, 0, 8)
+ if m.group != nil {
+ edges = append(edges, item.EdgeGroup)
+ }
if m.parent != nil {
edges = append(edges, item.EdgeParent)
}
if m.children != nil {
edges = append(edges, item.EdgeChildren)
}
- if m.group != nil {
- edges = append(edges, item.EdgeGroup)
- }
if m.label != nil {
edges = append(edges, item.EdgeLabel)
}
@@ -6245,6 +6353,9 @@ func (m *ItemMutation) AddedEdges() []string {
if m.fields != nil {
edges = append(edges, item.EdgeFields)
}
+ if m.maintenance_entries != nil {
+ edges = append(edges, item.EdgeMaintenanceEntries)
+ }
if m.attachments != nil {
edges = append(edges, item.EdgeAttachments)
}
@@ -6255,6 +6366,10 @@ func (m *ItemMutation) AddedEdges() []string {
// name in this mutation.
func (m *ItemMutation) AddedIDs(name string) []ent.Value {
switch name {
+ case item.EdgeGroup:
+ if id := m.group; id != nil {
+ return []ent.Value{*id}
+ }
case item.EdgeParent:
if id := m.parent; id != nil {
return []ent.Value{*id}
@@ -6265,10 +6380,6 @@ func (m *ItemMutation) AddedIDs(name string) []ent.Value {
ids = append(ids, id)
}
return ids
- case item.EdgeGroup:
- if id := m.group; id != nil {
- return []ent.Value{*id}
- }
case item.EdgeLabel:
ids := make([]ent.Value, 0, len(m.label))
for id := range m.label {
@@ -6285,6 +6396,12 @@ func (m *ItemMutation) AddedIDs(name string) []ent.Value {
ids = append(ids, id)
}
return ids
+ case item.EdgeMaintenanceEntries:
+ ids := make([]ent.Value, 0, len(m.maintenance_entries))
+ for id := range m.maintenance_entries {
+ ids = append(ids, id)
+ }
+ return ids
case item.EdgeAttachments:
ids := make([]ent.Value, 0, len(m.attachments))
for id := range m.attachments {
@@ -6297,7 +6414,7 @@ func (m *ItemMutation) AddedIDs(name string) []ent.Value {
// RemovedEdges returns all edge names that were removed in this mutation.
func (m *ItemMutation) RemovedEdges() []string {
- edges := make([]string, 0, 7)
+ edges := make([]string, 0, 8)
if m.removedchildren != nil {
edges = append(edges, item.EdgeChildren)
}
@@ -6307,6 +6424,9 @@ func (m *ItemMutation) RemovedEdges() []string {
if m.removedfields != nil {
edges = append(edges, item.EdgeFields)
}
+ if m.removedmaintenance_entries != nil {
+ edges = append(edges, item.EdgeMaintenanceEntries)
+ }
if m.removedattachments != nil {
edges = append(edges, item.EdgeAttachments)
}
@@ -6335,6 +6455,12 @@ func (m *ItemMutation) RemovedIDs(name string) []ent.Value {
ids = append(ids, id)
}
return ids
+ case item.EdgeMaintenanceEntries:
+ ids := make([]ent.Value, 0, len(m.removedmaintenance_entries))
+ for id := range m.removedmaintenance_entries {
+ ids = append(ids, id)
+ }
+ return ids
case item.EdgeAttachments:
ids := make([]ent.Value, 0, len(m.removedattachments))
for id := range m.removedattachments {
@@ -6347,16 +6473,16 @@ func (m *ItemMutation) RemovedIDs(name string) []ent.Value {
// ClearedEdges returns all edge names that were cleared in this mutation.
func (m *ItemMutation) ClearedEdges() []string {
- edges := make([]string, 0, 7)
+ edges := make([]string, 0, 8)
+ if m.clearedgroup {
+ edges = append(edges, item.EdgeGroup)
+ }
if m.clearedparent {
edges = append(edges, item.EdgeParent)
}
if m.clearedchildren {
edges = append(edges, item.EdgeChildren)
}
- if m.clearedgroup {
- edges = append(edges, item.EdgeGroup)
- }
if m.clearedlabel {
edges = append(edges, item.EdgeLabel)
}
@@ -6366,6 +6492,9 @@ func (m *ItemMutation) ClearedEdges() []string {
if m.clearedfields {
edges = append(edges, item.EdgeFields)
}
+ if m.clearedmaintenance_entries {
+ edges = append(edges, item.EdgeMaintenanceEntries)
+ }
if m.clearedattachments {
edges = append(edges, item.EdgeAttachments)
}
@@ -6376,18 +6505,20 @@ func (m *ItemMutation) ClearedEdges() []string {
// was cleared in this mutation.
func (m *ItemMutation) EdgeCleared(name string) bool {
switch name {
+ case item.EdgeGroup:
+ return m.clearedgroup
case item.EdgeParent:
return m.clearedparent
case item.EdgeChildren:
return m.clearedchildren
- case item.EdgeGroup:
- return m.clearedgroup
case item.EdgeLabel:
return m.clearedlabel
case item.EdgeLocation:
return m.clearedlocation
case item.EdgeFields:
return m.clearedfields
+ case item.EdgeMaintenanceEntries:
+ return m.clearedmaintenance_entries
case item.EdgeAttachments:
return m.clearedattachments
}
@@ -6398,12 +6529,12 @@ func (m *ItemMutation) EdgeCleared(name string) bool {
// if that edge is not defined in the schema.
func (m *ItemMutation) ClearEdge(name string) error {
switch name {
- case item.EdgeParent:
- m.ClearParent()
- return nil
case item.EdgeGroup:
m.ClearGroup()
return nil
+ case item.EdgeParent:
+ m.ClearParent()
+ return nil
case item.EdgeLocation:
m.ClearLocation()
return nil
@@ -6415,15 +6546,15 @@ func (m *ItemMutation) ClearEdge(name string) error {
// It returns an error if the edge is not defined in the schema.
func (m *ItemMutation) ResetEdge(name string) error {
switch name {
+ case item.EdgeGroup:
+ m.ResetGroup()
+ return nil
case item.EdgeParent:
m.ResetParent()
return nil
case item.EdgeChildren:
m.ResetChildren()
return nil
- case item.EdgeGroup:
- m.ResetGroup()
- return nil
case item.EdgeLabel:
m.ResetLabel()
return nil
@@ -6433,6 +6564,9 @@ func (m *ItemMutation) ResetEdge(name string) error {
case item.EdgeFields:
m.ResetFields()
return nil
+ case item.EdgeMaintenanceEntries:
+ m.ResetMaintenanceEntries()
+ return nil
case item.EdgeAttachments:
m.ResetAttachments()
return nil
@@ -6996,11 +7130,26 @@ func (m *ItemFieldMutation) Where(ps ...predicate.ItemField) {
m.predicates = append(m.predicates, ps...)
}
+// WhereP appends storage-level predicates to the ItemFieldMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *ItemFieldMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.ItemField, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
// Op returns the operation name.
func (m *ItemFieldMutation) Op() Op {
return m.op
}
+// SetOp allows setting the mutation operation.
+func (m *ItemFieldMutation) SetOp(op Op) {
+ m.op = op
+}
+
// Type returns the node type of this mutation (ItemField).
func (m *ItemFieldMutation) Type() string {
return m.typ
@@ -7783,11 +7932,26 @@ func (m *LabelMutation) Where(ps ...predicate.Label) {
m.predicates = append(m.predicates, ps...)
}
+// WhereP appends storage-level predicates to the LabelMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *LabelMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.Label, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
// Op returns the operation name.
func (m *LabelMutation) Op() Op {
return m.op
}
+// SetOp allows setting the mutation operation.
+func (m *LabelMutation) SetOp(op Op) {
+ m.op = op
+}
+
// Type returns the node type of this mutation (Label).
func (m *LabelMutation) Type() string {
return m.typ
@@ -8090,13 +8254,13 @@ type LocationMutation struct {
name *string
description *string
clearedFields map[string]struct{}
+ group *uuid.UUID
+ clearedgroup bool
parent *uuid.UUID
clearedparent bool
children map[uuid.UUID]struct{}
removedchildren map[uuid.UUID]struct{}
clearedchildren bool
- group *uuid.UUID
- clearedgroup bool
items map[uuid.UUID]struct{}
removeditems map[uuid.UUID]struct{}
cleareditems bool
@@ -8366,6 +8530,45 @@ func (m *LocationMutation) ResetDescription() {
delete(m.clearedFields, location.FieldDescription)
}
+// SetGroupID sets the "group" edge to the Group entity by id.
+func (m *LocationMutation) SetGroupID(id uuid.UUID) {
+ m.group = &id
+}
+
+// ClearGroup clears the "group" edge to the Group entity.
+func (m *LocationMutation) ClearGroup() {
+ m.clearedgroup = true
+}
+
+// GroupCleared reports if the "group" edge to the Group entity was cleared.
+func (m *LocationMutation) GroupCleared() bool {
+ return m.clearedgroup
+}
+
+// GroupID returns the "group" edge ID in the mutation.
+func (m *LocationMutation) GroupID() (id uuid.UUID, exists bool) {
+ if m.group != nil {
+ return *m.group, true
+ }
+ return
+}
+
+// GroupIDs returns the "group" edge IDs in the mutation.
+// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
+// GroupID instead. It exists only for internal usage by the builders.
+func (m *LocationMutation) GroupIDs() (ids []uuid.UUID) {
+ if id := m.group; id != nil {
+ ids = append(ids, *id)
+ }
+ return
+}
+
+// ResetGroup resets all changes to the "group" edge.
+func (m *LocationMutation) ResetGroup() {
+ m.group = nil
+ m.clearedgroup = false
+}
+
// SetParentID sets the "parent" edge to the Location entity by id.
func (m *LocationMutation) SetParentID(id uuid.UUID) {
m.parent = &id
@@ -8459,45 +8662,6 @@ func (m *LocationMutation) ResetChildren() {
m.removedchildren = nil
}
-// SetGroupID sets the "group" edge to the Group entity by id.
-func (m *LocationMutation) SetGroupID(id uuid.UUID) {
- m.group = &id
-}
-
-// ClearGroup clears the "group" edge to the Group entity.
-func (m *LocationMutation) ClearGroup() {
- m.clearedgroup = true
-}
-
-// GroupCleared reports if the "group" edge to the Group entity was cleared.
-func (m *LocationMutation) GroupCleared() bool {
- return m.clearedgroup
-}
-
-// GroupID returns the "group" edge ID in the mutation.
-func (m *LocationMutation) GroupID() (id uuid.UUID, exists bool) {
- if m.group != nil {
- return *m.group, true
- }
- return
-}
-
-// GroupIDs returns the "group" edge IDs in the mutation.
-// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
-// GroupID instead. It exists only for internal usage by the builders.
-func (m *LocationMutation) GroupIDs() (ids []uuid.UUID) {
- if id := m.group; id != nil {
- ids = append(ids, *id)
- }
- return
-}
-
-// ResetGroup resets all changes to the "group" edge.
-func (m *LocationMutation) ResetGroup() {
- m.group = nil
- m.clearedgroup = false
-}
-
// AddItemIDs adds the "items" edge to the Item entity by ids.
func (m *LocationMutation) AddItemIDs(ids ...uuid.UUID) {
if m.items == nil {
@@ -8557,11 +8721,26 @@ func (m *LocationMutation) Where(ps ...predicate.Location) {
m.predicates = append(m.predicates, ps...)
}
+// WhereP appends storage-level predicates to the LocationMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *LocationMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.Location, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
// Op returns the operation name.
func (m *LocationMutation) Op() Op {
return m.op
}
+// SetOp allows setting the mutation operation.
+func (m *LocationMutation) SetOp(op Op) {
+ m.op = op
+}
+
// Type returns the node type of this mutation (Location).
func (m *LocationMutation) Type() string {
return m.typ
@@ -8731,15 +8910,15 @@ func (m *LocationMutation) ResetField(name string) error {
// AddedEdges returns all edge names that were set/added in this mutation.
func (m *LocationMutation) AddedEdges() []string {
edges := make([]string, 0, 4)
+ if m.group != nil {
+ edges = append(edges, location.EdgeGroup)
+ }
if m.parent != nil {
edges = append(edges, location.EdgeParent)
}
if m.children != nil {
edges = append(edges, location.EdgeChildren)
}
- if m.group != nil {
- edges = append(edges, location.EdgeGroup)
- }
if m.items != nil {
edges = append(edges, location.EdgeItems)
}
@@ -8750,6 +8929,10 @@ func (m *LocationMutation) AddedEdges() []string {
// name in this mutation.
func (m *LocationMutation) AddedIDs(name string) []ent.Value {
switch name {
+ case location.EdgeGroup:
+ if id := m.group; id != nil {
+ return []ent.Value{*id}
+ }
case location.EdgeParent:
if id := m.parent; id != nil {
return []ent.Value{*id}
@@ -8760,10 +8943,6 @@ func (m *LocationMutation) AddedIDs(name string) []ent.Value {
ids = append(ids, id)
}
return ids
- case location.EdgeGroup:
- if id := m.group; id != nil {
- return []ent.Value{*id}
- }
case location.EdgeItems:
ids := make([]ent.Value, 0, len(m.items))
for id := range m.items {
@@ -8809,15 +8988,15 @@ func (m *LocationMutation) RemovedIDs(name string) []ent.Value {
// ClearedEdges returns all edge names that were cleared in this mutation.
func (m *LocationMutation) ClearedEdges() []string {
edges := make([]string, 0, 4)
+ if m.clearedgroup {
+ edges = append(edges, location.EdgeGroup)
+ }
if m.clearedparent {
edges = append(edges, location.EdgeParent)
}
if m.clearedchildren {
edges = append(edges, location.EdgeChildren)
}
- if m.clearedgroup {
- edges = append(edges, location.EdgeGroup)
- }
if m.cleareditems {
edges = append(edges, location.EdgeItems)
}
@@ -8828,12 +9007,12 @@ func (m *LocationMutation) ClearedEdges() []string {
// was cleared in this mutation.
func (m *LocationMutation) EdgeCleared(name string) bool {
switch name {
+ case location.EdgeGroup:
+ return m.clearedgroup
case location.EdgeParent:
return m.clearedparent
case location.EdgeChildren:
return m.clearedchildren
- case location.EdgeGroup:
- return m.clearedgroup
case location.EdgeItems:
return m.cleareditems
}
@@ -8844,12 +9023,12 @@ func (m *LocationMutation) EdgeCleared(name string) bool {
// if that edge is not defined in the schema.
func (m *LocationMutation) ClearEdge(name string) error {
switch name {
- case location.EdgeParent:
- m.ClearParent()
- return nil
case location.EdgeGroup:
m.ClearGroup()
return nil
+ case location.EdgeParent:
+ m.ClearParent()
+ return nil
}
return fmt.Errorf("unknown Location unique edge %s", name)
}
@@ -8858,15 +9037,15 @@ func (m *LocationMutation) ClearEdge(name string) error {
// It returns an error if the edge is not defined in the schema.
func (m *LocationMutation) ResetEdge(name string) error {
switch name {
+ case location.EdgeGroup:
+ m.ResetGroup()
+ return nil
case location.EdgeParent:
m.ResetParent()
return nil
case location.EdgeChildren:
m.ResetChildren()
return nil
- case location.EdgeGroup:
- m.ResetGroup()
- return nil
case location.EdgeItems:
m.ResetItems()
return nil
@@ -8874,6 +9053,1622 @@ func (m *LocationMutation) ResetEdge(name string) error {
return fmt.Errorf("unknown Location edge %s", name)
}
+// MaintenanceEntryMutation represents an operation that mutates the MaintenanceEntry nodes in the graph.
+type MaintenanceEntryMutation struct {
+ config
+ op Op
+ typ string
+ id *uuid.UUID
+ created_at *time.Time
+ updated_at *time.Time
+ date *time.Time
+ scheduled_date *time.Time
+ name *string
+ description *string
+ cost *float64
+ addcost *float64
+ clearedFields map[string]struct{}
+ item *uuid.UUID
+ cleareditem bool
+ done bool
+ oldValue func(context.Context) (*MaintenanceEntry, error)
+ predicates []predicate.MaintenanceEntry
+}
+
+var _ ent.Mutation = (*MaintenanceEntryMutation)(nil)
+
+// maintenanceentryOption allows management of the mutation configuration using functional options.
+type maintenanceentryOption func(*MaintenanceEntryMutation)
+
+// newMaintenanceEntryMutation creates new mutation for the MaintenanceEntry entity.
+func newMaintenanceEntryMutation(c config, op Op, opts ...maintenanceentryOption) *MaintenanceEntryMutation {
+ m := &MaintenanceEntryMutation{
+ config: c,
+ op: op,
+ typ: TypeMaintenanceEntry,
+ clearedFields: make(map[string]struct{}),
+ }
+ for _, opt := range opts {
+ opt(m)
+ }
+ return m
+}
+
+// withMaintenanceEntryID sets the ID field of the mutation.
+func withMaintenanceEntryID(id uuid.UUID) maintenanceentryOption {
+ return func(m *MaintenanceEntryMutation) {
+ var (
+ err error
+ once sync.Once
+ value *MaintenanceEntry
+ )
+ m.oldValue = func(ctx context.Context) (*MaintenanceEntry, error) {
+ once.Do(func() {
+ if m.done {
+ err = errors.New("querying old values post mutation is not allowed")
+ } else {
+ value, err = m.Client().MaintenanceEntry.Get(ctx, id)
+ }
+ })
+ return value, err
+ }
+ m.id = &id
+ }
+}
+
+// withMaintenanceEntry sets the old MaintenanceEntry of the mutation.
+func withMaintenanceEntry(node *MaintenanceEntry) maintenanceentryOption {
+ return func(m *MaintenanceEntryMutation) {
+ m.oldValue = func(context.Context) (*MaintenanceEntry, error) {
+ return node, nil
+ }
+ m.id = &node.ID
+ }
+}
+
+// Client returns a new `ent.Client` from the mutation. If the mutation was
+// executed in a transaction (ent.Tx), a transactional client is returned.
+func (m MaintenanceEntryMutation) Client() *Client {
+ client := &Client{config: m.config}
+ client.init()
+ return client
+}
+
+// Tx returns an `ent.Tx` for mutations that were executed in transactions;
+// it returns an error otherwise.
+func (m MaintenanceEntryMutation) Tx() (*Tx, error) {
+ if _, ok := m.driver.(*txDriver); !ok {
+ return nil, errors.New("ent: mutation is not running in a transaction")
+ }
+ tx := &Tx{config: m.config}
+ tx.init()
+ return tx, nil
+}
+
+// SetID sets the value of the id field. Note that this
+// operation is only accepted on creation of MaintenanceEntry entities.
+func (m *MaintenanceEntryMutation) SetID(id uuid.UUID) {
+ m.id = &id
+}
+
+// ID returns the ID value in the mutation. Note that the ID is only available
+// if it was provided to the builder or after it was returned from the database.
+func (m *MaintenanceEntryMutation) ID() (id uuid.UUID, exists bool) {
+ if m.id == nil {
+ return
+ }
+ return *m.id, true
+}
+
+// IDs queries the database and returns the entity ids that match the mutation's predicate.
+// That means, if the mutation is applied within a transaction with an isolation level such
+// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
+// or updated by the mutation.
+func (m *MaintenanceEntryMutation) IDs(ctx context.Context) ([]uuid.UUID, error) {
+ switch {
+ case m.op.Is(OpUpdateOne | OpDeleteOne):
+ id, exists := m.ID()
+ if exists {
+ return []uuid.UUID{id}, nil
+ }
+ fallthrough
+ case m.op.Is(OpUpdate | OpDelete):
+ return m.Client().MaintenanceEntry.Query().Where(m.predicates...).IDs(ctx)
+ default:
+ return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
+ }
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (m *MaintenanceEntryMutation) SetCreatedAt(t time.Time) {
+ m.created_at = &t
+}
+
+// CreatedAt returns the value of the "created_at" field in the mutation.
+func (m *MaintenanceEntryMutation) CreatedAt() (r time.Time, exists bool) {
+ v := m.created_at
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldCreatedAt returns the old "created_at" field's value of the MaintenanceEntry entity.
+// If the MaintenanceEntry object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *MaintenanceEntryMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldCreatedAt requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err)
+ }
+ return oldValue.CreatedAt, nil
+}
+
+// ResetCreatedAt resets all changes to the "created_at" field.
+func (m *MaintenanceEntryMutation) ResetCreatedAt() {
+ m.created_at = nil
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (m *MaintenanceEntryMutation) SetUpdatedAt(t time.Time) {
+ m.updated_at = &t
+}
+
+// UpdatedAt returns the value of the "updated_at" field in the mutation.
+func (m *MaintenanceEntryMutation) UpdatedAt() (r time.Time, exists bool) {
+ v := m.updated_at
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldUpdatedAt returns the old "updated_at" field's value of the MaintenanceEntry entity.
+// If the MaintenanceEntry object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *MaintenanceEntryMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldUpdatedAt requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err)
+ }
+ return oldValue.UpdatedAt, nil
+}
+
+// ResetUpdatedAt resets all changes to the "updated_at" field.
+func (m *MaintenanceEntryMutation) ResetUpdatedAt() {
+ m.updated_at = nil
+}
+
+// SetItemID sets the "item_id" field.
+func (m *MaintenanceEntryMutation) SetItemID(u uuid.UUID) {
+ m.item = &u
+}
+
+// ItemID returns the value of the "item_id" field in the mutation.
+func (m *MaintenanceEntryMutation) ItemID() (r uuid.UUID, exists bool) {
+ v := m.item
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldItemID returns the old "item_id" field's value of the MaintenanceEntry entity.
+// If the MaintenanceEntry object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *MaintenanceEntryMutation) OldItemID(ctx context.Context) (v uuid.UUID, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldItemID is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldItemID requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldItemID: %w", err)
+ }
+ return oldValue.ItemID, nil
+}
+
+// ResetItemID resets all changes to the "item_id" field.
+func (m *MaintenanceEntryMutation) ResetItemID() {
+ m.item = nil
+}
+
+// SetDate sets the "date" field.
+func (m *MaintenanceEntryMutation) SetDate(t time.Time) {
+ m.date = &t
+}
+
+// Date returns the value of the "date" field in the mutation.
+func (m *MaintenanceEntryMutation) Date() (r time.Time, exists bool) {
+ v := m.date
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldDate returns the old "date" field's value of the MaintenanceEntry entity.
+// If the MaintenanceEntry object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *MaintenanceEntryMutation) OldDate(ctx context.Context) (v time.Time, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldDate is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldDate requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldDate: %w", err)
+ }
+ return oldValue.Date, nil
+}
+
+// ClearDate clears the value of the "date" field.
+func (m *MaintenanceEntryMutation) ClearDate() {
+ m.date = nil
+ m.clearedFields[maintenanceentry.FieldDate] = struct{}{}
+}
+
+// DateCleared returns if the "date" field was cleared in this mutation.
+func (m *MaintenanceEntryMutation) DateCleared() bool {
+ _, ok := m.clearedFields[maintenanceentry.FieldDate]
+ return ok
+}
+
+// ResetDate resets all changes to the "date" field.
+func (m *MaintenanceEntryMutation) ResetDate() {
+ m.date = nil
+ delete(m.clearedFields, maintenanceentry.FieldDate)
+}
+
+// SetScheduledDate sets the "scheduled_date" field.
+func (m *MaintenanceEntryMutation) SetScheduledDate(t time.Time) {
+ m.scheduled_date = &t
+}
+
+// ScheduledDate returns the value of the "scheduled_date" field in the mutation.
+func (m *MaintenanceEntryMutation) ScheduledDate() (r time.Time, exists bool) {
+ v := m.scheduled_date
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldScheduledDate returns the old "scheduled_date" field's value of the MaintenanceEntry entity.
+// If the MaintenanceEntry object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *MaintenanceEntryMutation) OldScheduledDate(ctx context.Context) (v time.Time, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldScheduledDate is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldScheduledDate requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldScheduledDate: %w", err)
+ }
+ return oldValue.ScheduledDate, nil
+}
+
+// ClearScheduledDate clears the value of the "scheduled_date" field.
+func (m *MaintenanceEntryMutation) ClearScheduledDate() {
+ m.scheduled_date = nil
+ m.clearedFields[maintenanceentry.FieldScheduledDate] = struct{}{}
+}
+
+// ScheduledDateCleared returns if the "scheduled_date" field was cleared in this mutation.
+func (m *MaintenanceEntryMutation) ScheduledDateCleared() bool {
+ _, ok := m.clearedFields[maintenanceentry.FieldScheduledDate]
+ return ok
+}
+
+// ResetScheduledDate resets all changes to the "scheduled_date" field.
+func (m *MaintenanceEntryMutation) ResetScheduledDate() {
+ m.scheduled_date = nil
+ delete(m.clearedFields, maintenanceentry.FieldScheduledDate)
+}
+
+// SetName sets the "name" field.
+func (m *MaintenanceEntryMutation) SetName(s string) {
+ m.name = &s
+}
+
+// Name returns the value of the "name" field in the mutation.
+func (m *MaintenanceEntryMutation) Name() (r string, exists bool) {
+ v := m.name
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldName returns the old "name" field's value of the MaintenanceEntry entity.
+// If the MaintenanceEntry object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *MaintenanceEntryMutation) OldName(ctx context.Context) (v string, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldName is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldName requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldName: %w", err)
+ }
+ return oldValue.Name, nil
+}
+
+// ResetName resets all changes to the "name" field.
+func (m *MaintenanceEntryMutation) ResetName() {
+ m.name = nil
+}
+
+// SetDescription sets the "description" field.
+func (m *MaintenanceEntryMutation) SetDescription(s string) {
+ m.description = &s
+}
+
+// Description returns the value of the "description" field in the mutation.
+func (m *MaintenanceEntryMutation) Description() (r string, exists bool) {
+ v := m.description
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldDescription returns the old "description" field's value of the MaintenanceEntry entity.
+// If the MaintenanceEntry object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *MaintenanceEntryMutation) OldDescription(ctx context.Context) (v string, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldDescription is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldDescription requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldDescription: %w", err)
+ }
+ return oldValue.Description, nil
+}
+
+// ClearDescription clears the value of the "description" field.
+func (m *MaintenanceEntryMutation) ClearDescription() {
+ m.description = nil
+ m.clearedFields[maintenanceentry.FieldDescription] = struct{}{}
+}
+
+// DescriptionCleared returns if the "description" field was cleared in this mutation.
+func (m *MaintenanceEntryMutation) DescriptionCleared() bool {
+ _, ok := m.clearedFields[maintenanceentry.FieldDescription]
+ return ok
+}
+
+// ResetDescription resets all changes to the "description" field.
+func (m *MaintenanceEntryMutation) ResetDescription() {
+ m.description = nil
+ delete(m.clearedFields, maintenanceentry.FieldDescription)
+}
+
+// SetCost sets the "cost" field.
+func (m *MaintenanceEntryMutation) SetCost(f float64) {
+ m.cost = &f
+ m.addcost = nil
+}
+
+// Cost returns the value of the "cost" field in the mutation.
+func (m *MaintenanceEntryMutation) Cost() (r float64, exists bool) {
+ v := m.cost
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldCost returns the old "cost" field's value of the MaintenanceEntry entity.
+// If the MaintenanceEntry object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *MaintenanceEntryMutation) OldCost(ctx context.Context) (v float64, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldCost is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldCost requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldCost: %w", err)
+ }
+ return oldValue.Cost, nil
+}
+
+// AddCost adds f to the "cost" field.
+func (m *MaintenanceEntryMutation) AddCost(f float64) {
+ if m.addcost != nil {
+ *m.addcost += f
+ } else {
+ m.addcost = &f
+ }
+}
+
+// AddedCost returns the value that was added to the "cost" field in this mutation.
+func (m *MaintenanceEntryMutation) AddedCost() (r float64, exists bool) {
+ v := m.addcost
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// ResetCost resets all changes to the "cost" field.
+func (m *MaintenanceEntryMutation) ResetCost() {
+ m.cost = nil
+ m.addcost = nil
+}
+
+// ClearItem clears the "item" edge to the Item entity.
+func (m *MaintenanceEntryMutation) ClearItem() {
+ m.cleareditem = true
+ m.clearedFields[maintenanceentry.FieldItemID] = struct{}{}
+}
+
+// ItemCleared reports if the "item" edge to the Item entity was cleared.
+func (m *MaintenanceEntryMutation) ItemCleared() bool {
+ return m.cleareditem
+}
+
+// ItemIDs returns the "item" edge IDs in the mutation.
+// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
+// ItemID instead. It exists only for internal usage by the builders.
+func (m *MaintenanceEntryMutation) ItemIDs() (ids []uuid.UUID) {
+ if id := m.item; id != nil {
+ ids = append(ids, *id)
+ }
+ return
+}
+
+// ResetItem resets all changes to the "item" edge.
+func (m *MaintenanceEntryMutation) ResetItem() {
+ m.item = nil
+ m.cleareditem = false
+}
+
+// Where appends a list predicates to the MaintenanceEntryMutation builder.
+func (m *MaintenanceEntryMutation) Where(ps ...predicate.MaintenanceEntry) {
+ m.predicates = append(m.predicates, ps...)
+}
+
+// WhereP appends storage-level predicates to the MaintenanceEntryMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *MaintenanceEntryMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.MaintenanceEntry, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
+// Op returns the operation name.
+func (m *MaintenanceEntryMutation) Op() Op {
+ return m.op
+}
+
+// SetOp allows setting the mutation operation.
+func (m *MaintenanceEntryMutation) SetOp(op Op) {
+ m.op = op
+}
+
+// Type returns the node type of this mutation (MaintenanceEntry).
+func (m *MaintenanceEntryMutation) Type() string {
+ return m.typ
+}
+
+// Fields returns all fields that were changed during this mutation. Note that in
+// order to get all numeric fields that were incremented/decremented, call
+// AddedFields().
+func (m *MaintenanceEntryMutation) Fields() []string {
+ fields := make([]string, 0, 8)
+ if m.created_at != nil {
+ fields = append(fields, maintenanceentry.FieldCreatedAt)
+ }
+ if m.updated_at != nil {
+ fields = append(fields, maintenanceentry.FieldUpdatedAt)
+ }
+ if m.item != nil {
+ fields = append(fields, maintenanceentry.FieldItemID)
+ }
+ if m.date != nil {
+ fields = append(fields, maintenanceentry.FieldDate)
+ }
+ if m.scheduled_date != nil {
+ fields = append(fields, maintenanceentry.FieldScheduledDate)
+ }
+ if m.name != nil {
+ fields = append(fields, maintenanceentry.FieldName)
+ }
+ if m.description != nil {
+ fields = append(fields, maintenanceentry.FieldDescription)
+ }
+ if m.cost != nil {
+ fields = append(fields, maintenanceentry.FieldCost)
+ }
+ return fields
+}
+
+// Field returns the value of a field with the given name. The second boolean
+// return value indicates that this field was not set, or was not defined in the
+// schema.
+func (m *MaintenanceEntryMutation) Field(name string) (ent.Value, bool) {
+ switch name {
+ case maintenanceentry.FieldCreatedAt:
+ return m.CreatedAt()
+ case maintenanceentry.FieldUpdatedAt:
+ return m.UpdatedAt()
+ case maintenanceentry.FieldItemID:
+ return m.ItemID()
+ case maintenanceentry.FieldDate:
+ return m.Date()
+ case maintenanceentry.FieldScheduledDate:
+ return m.ScheduledDate()
+ case maintenanceentry.FieldName:
+ return m.Name()
+ case maintenanceentry.FieldDescription:
+ return m.Description()
+ case maintenanceentry.FieldCost:
+ return m.Cost()
+ }
+ return nil, false
+}
+
+// OldField returns the old value of the field from the database. An error is
+// returned if the mutation operation is not UpdateOne, or the query to the
+// database failed.
+func (m *MaintenanceEntryMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
+ switch name {
+ case maintenanceentry.FieldCreatedAt:
+ return m.OldCreatedAt(ctx)
+ case maintenanceentry.FieldUpdatedAt:
+ return m.OldUpdatedAt(ctx)
+ case maintenanceentry.FieldItemID:
+ return m.OldItemID(ctx)
+ case maintenanceentry.FieldDate:
+ return m.OldDate(ctx)
+ case maintenanceentry.FieldScheduledDate:
+ return m.OldScheduledDate(ctx)
+ case maintenanceentry.FieldName:
+ return m.OldName(ctx)
+ case maintenanceentry.FieldDescription:
+ return m.OldDescription(ctx)
+ case maintenanceentry.FieldCost:
+ return m.OldCost(ctx)
+ }
+ return nil, fmt.Errorf("unknown MaintenanceEntry field %s", name)
+}
+
+// SetField sets the value of a field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *MaintenanceEntryMutation) SetField(name string, value ent.Value) error {
+ switch name {
+ case maintenanceentry.FieldCreatedAt:
+ v, ok := value.(time.Time)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetCreatedAt(v)
+ return nil
+ case maintenanceentry.FieldUpdatedAt:
+ v, ok := value.(time.Time)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetUpdatedAt(v)
+ return nil
+ case maintenanceentry.FieldItemID:
+ v, ok := value.(uuid.UUID)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetItemID(v)
+ return nil
+ case maintenanceentry.FieldDate:
+ v, ok := value.(time.Time)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetDate(v)
+ return nil
+ case maintenanceentry.FieldScheduledDate:
+ v, ok := value.(time.Time)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetScheduledDate(v)
+ return nil
+ case maintenanceentry.FieldName:
+ v, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetName(v)
+ return nil
+ case maintenanceentry.FieldDescription:
+ v, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetDescription(v)
+ return nil
+ case maintenanceentry.FieldCost:
+ v, ok := value.(float64)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetCost(v)
+ return nil
+ }
+ return fmt.Errorf("unknown MaintenanceEntry field %s", name)
+}
+
+// AddedFields returns all numeric fields that were incremented/decremented during
+// this mutation.
+func (m *MaintenanceEntryMutation) AddedFields() []string {
+ var fields []string
+ if m.addcost != nil {
+ fields = append(fields, maintenanceentry.FieldCost)
+ }
+ return fields
+}
+
+// AddedField returns the numeric value that was incremented/decremented on a field
+// with the given name. The second boolean return value indicates that this field
+// was not set, or was not defined in the schema.
+func (m *MaintenanceEntryMutation) AddedField(name string) (ent.Value, bool) {
+ switch name {
+ case maintenanceentry.FieldCost:
+ return m.AddedCost()
+ }
+ return nil, false
+}
+
+// AddField adds the value to the field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *MaintenanceEntryMutation) AddField(name string, value ent.Value) error {
+ switch name {
+ case maintenanceentry.FieldCost:
+ v, ok := value.(float64)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.AddCost(v)
+ return nil
+ }
+ return fmt.Errorf("unknown MaintenanceEntry numeric field %s", name)
+}
+
+// ClearedFields returns all nullable fields that were cleared during this
+// mutation.
+func (m *MaintenanceEntryMutation) ClearedFields() []string {
+ var fields []string
+ if m.FieldCleared(maintenanceentry.FieldDate) {
+ fields = append(fields, maintenanceentry.FieldDate)
+ }
+ if m.FieldCleared(maintenanceentry.FieldScheduledDate) {
+ fields = append(fields, maintenanceentry.FieldScheduledDate)
+ }
+ if m.FieldCleared(maintenanceentry.FieldDescription) {
+ fields = append(fields, maintenanceentry.FieldDescription)
+ }
+ return fields
+}
+
+// FieldCleared returns a boolean indicating if a field with the given name was
+// cleared in this mutation.
+func (m *MaintenanceEntryMutation) FieldCleared(name string) bool {
+ _, ok := m.clearedFields[name]
+ return ok
+}
+
+// ClearField clears the value of the field with the given name. It returns an
+// error if the field is not defined in the schema.
+func (m *MaintenanceEntryMutation) ClearField(name string) error {
+ switch name {
+ case maintenanceentry.FieldDate:
+ m.ClearDate()
+ return nil
+ case maintenanceentry.FieldScheduledDate:
+ m.ClearScheduledDate()
+ return nil
+ case maintenanceentry.FieldDescription:
+ m.ClearDescription()
+ return nil
+ }
+ return fmt.Errorf("unknown MaintenanceEntry nullable field %s", name)
+}
+
+// ResetField resets all changes in the mutation for the field with the given name.
+// It returns an error if the field is not defined in the schema.
+func (m *MaintenanceEntryMutation) ResetField(name string) error {
+ switch name {
+ case maintenanceentry.FieldCreatedAt:
+ m.ResetCreatedAt()
+ return nil
+ case maintenanceentry.FieldUpdatedAt:
+ m.ResetUpdatedAt()
+ return nil
+ case maintenanceentry.FieldItemID:
+ m.ResetItemID()
+ return nil
+ case maintenanceentry.FieldDate:
+ m.ResetDate()
+ return nil
+ case maintenanceentry.FieldScheduledDate:
+ m.ResetScheduledDate()
+ return nil
+ case maintenanceentry.FieldName:
+ m.ResetName()
+ return nil
+ case maintenanceentry.FieldDescription:
+ m.ResetDescription()
+ return nil
+ case maintenanceentry.FieldCost:
+ m.ResetCost()
+ return nil
+ }
+ return fmt.Errorf("unknown MaintenanceEntry field %s", name)
+}
+
+// AddedEdges returns all edge names that were set/added in this mutation.
+func (m *MaintenanceEntryMutation) AddedEdges() []string {
+ edges := make([]string, 0, 1)
+ if m.item != nil {
+ edges = append(edges, maintenanceentry.EdgeItem)
+ }
+ return edges
+}
+
+// AddedIDs returns all IDs (to other nodes) that were added for the given edge
+// name in this mutation.
+func (m *MaintenanceEntryMutation) AddedIDs(name string) []ent.Value {
+ switch name {
+ case maintenanceentry.EdgeItem:
+ if id := m.item; id != nil {
+ return []ent.Value{*id}
+ }
+ }
+ return nil
+}
+
+// RemovedEdges returns all edge names that were removed in this mutation.
+func (m *MaintenanceEntryMutation) RemovedEdges() []string {
+ edges := make([]string, 0, 1)
+ return edges
+}
+
+// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
+// the given name in this mutation.
+func (m *MaintenanceEntryMutation) RemovedIDs(name string) []ent.Value {
+ return nil
+}
+
+// ClearedEdges returns all edge names that were cleared in this mutation.
+func (m *MaintenanceEntryMutation) ClearedEdges() []string {
+ edges := make([]string, 0, 1)
+ if m.cleareditem {
+ edges = append(edges, maintenanceentry.EdgeItem)
+ }
+ return edges
+}
+
+// EdgeCleared returns a boolean which indicates if the edge with the given name
+// was cleared in this mutation.
+func (m *MaintenanceEntryMutation) EdgeCleared(name string) bool {
+ switch name {
+ case maintenanceentry.EdgeItem:
+ return m.cleareditem
+ }
+ return false
+}
+
+// ClearEdge clears the value of the edge with the given name. It returns an error
+// if that edge is not defined in the schema.
+func (m *MaintenanceEntryMutation) ClearEdge(name string) error {
+ switch name {
+ case maintenanceentry.EdgeItem:
+ m.ClearItem()
+ return nil
+ }
+ return fmt.Errorf("unknown MaintenanceEntry unique edge %s", name)
+}
+
+// ResetEdge resets all changes to the edge with the given name in this mutation.
+// It returns an error if the edge is not defined in the schema.
+func (m *MaintenanceEntryMutation) ResetEdge(name string) error {
+ switch name {
+ case maintenanceentry.EdgeItem:
+ m.ResetItem()
+ return nil
+ }
+ return fmt.Errorf("unknown MaintenanceEntry edge %s", name)
+}
+
+// NotifierMutation represents an operation that mutates the Notifier nodes in the graph.
+type NotifierMutation struct {
+ config
+ op Op
+ typ string
+ id *uuid.UUID
+ created_at *time.Time
+ updated_at *time.Time
+ name *string
+ url *string
+ is_active *bool
+ clearedFields map[string]struct{}
+ group *uuid.UUID
+ clearedgroup bool
+ user *uuid.UUID
+ cleareduser bool
+ done bool
+ oldValue func(context.Context) (*Notifier, error)
+ predicates []predicate.Notifier
+}
+
+var _ ent.Mutation = (*NotifierMutation)(nil)
+
+// notifierOption allows management of the mutation configuration using functional options.
+type notifierOption func(*NotifierMutation)
+
+// newNotifierMutation creates new mutation for the Notifier entity.
+func newNotifierMutation(c config, op Op, opts ...notifierOption) *NotifierMutation {
+ m := &NotifierMutation{
+ config: c,
+ op: op,
+ typ: TypeNotifier,
+ clearedFields: make(map[string]struct{}),
+ }
+ for _, opt := range opts {
+ opt(m)
+ }
+ return m
+}
+
+// withNotifierID sets the ID field of the mutation.
+func withNotifierID(id uuid.UUID) notifierOption {
+ return func(m *NotifierMutation) {
+ var (
+ err error
+ once sync.Once
+ value *Notifier
+ )
+ m.oldValue = func(ctx context.Context) (*Notifier, error) {
+ once.Do(func() {
+ if m.done {
+ err = errors.New("querying old values post mutation is not allowed")
+ } else {
+ value, err = m.Client().Notifier.Get(ctx, id)
+ }
+ })
+ return value, err
+ }
+ m.id = &id
+ }
+}
+
+// withNotifier sets the old Notifier of the mutation.
+func withNotifier(node *Notifier) notifierOption {
+ return func(m *NotifierMutation) {
+ m.oldValue = func(context.Context) (*Notifier, error) {
+ return node, nil
+ }
+ m.id = &node.ID
+ }
+}
+
+// Client returns a new `ent.Client` from the mutation. If the mutation was
+// executed in a transaction (ent.Tx), a transactional client is returned.
+func (m NotifierMutation) Client() *Client {
+ client := &Client{config: m.config}
+ client.init()
+ return client
+}
+
+// Tx returns an `ent.Tx` for mutations that were executed in transactions;
+// it returns an error otherwise.
+func (m NotifierMutation) Tx() (*Tx, error) {
+ if _, ok := m.driver.(*txDriver); !ok {
+ return nil, errors.New("ent: mutation is not running in a transaction")
+ }
+ tx := &Tx{config: m.config}
+ tx.init()
+ return tx, nil
+}
+
+// SetID sets the value of the id field. Note that this
+// operation is only accepted on creation of Notifier entities.
+func (m *NotifierMutation) SetID(id uuid.UUID) {
+ m.id = &id
+}
+
+// ID returns the ID value in the mutation. Note that the ID is only available
+// if it was provided to the builder or after it was returned from the database.
+func (m *NotifierMutation) ID() (id uuid.UUID, exists bool) {
+ if m.id == nil {
+ return
+ }
+ return *m.id, true
+}
+
+// IDs queries the database and returns the entity ids that match the mutation's predicate.
+// That means, if the mutation is applied within a transaction with an isolation level such
+// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
+// or updated by the mutation.
+func (m *NotifierMutation) IDs(ctx context.Context) ([]uuid.UUID, error) {
+ switch {
+ case m.op.Is(OpUpdateOne | OpDeleteOne):
+ id, exists := m.ID()
+ if exists {
+ return []uuid.UUID{id}, nil
+ }
+ fallthrough
+ case m.op.Is(OpUpdate | OpDelete):
+ return m.Client().Notifier.Query().Where(m.predicates...).IDs(ctx)
+ default:
+ return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
+ }
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (m *NotifierMutation) SetCreatedAt(t time.Time) {
+ m.created_at = &t
+}
+
+// CreatedAt returns the value of the "created_at" field in the mutation.
+func (m *NotifierMutation) CreatedAt() (r time.Time, exists bool) {
+ v := m.created_at
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldCreatedAt returns the old "created_at" field's value of the Notifier entity.
+// If the Notifier object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *NotifierMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldCreatedAt requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err)
+ }
+ return oldValue.CreatedAt, nil
+}
+
+// ResetCreatedAt resets all changes to the "created_at" field.
+func (m *NotifierMutation) ResetCreatedAt() {
+ m.created_at = nil
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (m *NotifierMutation) SetUpdatedAt(t time.Time) {
+ m.updated_at = &t
+}
+
+// UpdatedAt returns the value of the "updated_at" field in the mutation.
+func (m *NotifierMutation) UpdatedAt() (r time.Time, exists bool) {
+ v := m.updated_at
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldUpdatedAt returns the old "updated_at" field's value of the Notifier entity.
+// If the Notifier object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *NotifierMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldUpdatedAt requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err)
+ }
+ return oldValue.UpdatedAt, nil
+}
+
+// ResetUpdatedAt resets all changes to the "updated_at" field.
+func (m *NotifierMutation) ResetUpdatedAt() {
+ m.updated_at = nil
+}
+
+// SetGroupID sets the "group_id" field.
+func (m *NotifierMutation) SetGroupID(u uuid.UUID) {
+ m.group = &u
+}
+
+// GroupID returns the value of the "group_id" field in the mutation.
+func (m *NotifierMutation) GroupID() (r uuid.UUID, exists bool) {
+ v := m.group
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldGroupID returns the old "group_id" field's value of the Notifier entity.
+// If the Notifier object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *NotifierMutation) OldGroupID(ctx context.Context) (v uuid.UUID, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldGroupID is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldGroupID requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldGroupID: %w", err)
+ }
+ return oldValue.GroupID, nil
+}
+
+// ResetGroupID resets all changes to the "group_id" field.
+func (m *NotifierMutation) ResetGroupID() {
+ m.group = nil
+}
+
+// SetUserID sets the "user_id" field.
+func (m *NotifierMutation) SetUserID(u uuid.UUID) {
+ m.user = &u
+}
+
+// UserID returns the value of the "user_id" field in the mutation.
+func (m *NotifierMutation) UserID() (r uuid.UUID, exists bool) {
+ v := m.user
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldUserID returns the old "user_id" field's value of the Notifier entity.
+// If the Notifier object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *NotifierMutation) OldUserID(ctx context.Context) (v uuid.UUID, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldUserID is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldUserID requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldUserID: %w", err)
+ }
+ return oldValue.UserID, nil
+}
+
+// ResetUserID resets all changes to the "user_id" field.
+func (m *NotifierMutation) ResetUserID() {
+ m.user = nil
+}
+
+// SetName sets the "name" field.
+func (m *NotifierMutation) SetName(s string) {
+ m.name = &s
+}
+
+// Name returns the value of the "name" field in the mutation.
+func (m *NotifierMutation) Name() (r string, exists bool) {
+ v := m.name
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldName returns the old "name" field's value of the Notifier entity.
+// If the Notifier object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *NotifierMutation) OldName(ctx context.Context) (v string, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldName is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldName requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldName: %w", err)
+ }
+ return oldValue.Name, nil
+}
+
+// ResetName resets all changes to the "name" field.
+func (m *NotifierMutation) ResetName() {
+ m.name = nil
+}
+
+// SetURL sets the "url" field.
+func (m *NotifierMutation) SetURL(s string) {
+ m.url = &s
+}
+
+// URL returns the value of the "url" field in the mutation.
+func (m *NotifierMutation) URL() (r string, exists bool) {
+ v := m.url
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldURL returns the old "url" field's value of the Notifier entity.
+// If the Notifier object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *NotifierMutation) OldURL(ctx context.Context) (v string, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldURL is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldURL requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldURL: %w", err)
+ }
+ return oldValue.URL, nil
+}
+
+// ResetURL resets all changes to the "url" field.
+func (m *NotifierMutation) ResetURL() {
+ m.url = nil
+}
+
+// SetIsActive sets the "is_active" field.
+func (m *NotifierMutation) SetIsActive(b bool) {
+ m.is_active = &b
+}
+
+// IsActive returns the value of the "is_active" field in the mutation.
+func (m *NotifierMutation) IsActive() (r bool, exists bool) {
+ v := m.is_active
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldIsActive returns the old "is_active" field's value of the Notifier entity.
+// If the Notifier object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *NotifierMutation) OldIsActive(ctx context.Context) (v bool, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldIsActive is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldIsActive requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldIsActive: %w", err)
+ }
+ return oldValue.IsActive, nil
+}
+
+// ResetIsActive resets all changes to the "is_active" field.
+func (m *NotifierMutation) ResetIsActive() {
+ m.is_active = nil
+}
+
+// ClearGroup clears the "group" edge to the Group entity.
+func (m *NotifierMutation) ClearGroup() {
+ m.clearedgroup = true
+ m.clearedFields[notifier.FieldGroupID] = struct{}{}
+}
+
+// GroupCleared reports if the "group" edge to the Group entity was cleared.
+func (m *NotifierMutation) GroupCleared() bool {
+ return m.clearedgroup
+}
+
+// GroupIDs returns the "group" edge IDs in the mutation.
+// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
+// GroupID instead. It exists only for internal usage by the builders.
+func (m *NotifierMutation) GroupIDs() (ids []uuid.UUID) {
+ if id := m.group; id != nil {
+ ids = append(ids, *id)
+ }
+ return
+}
+
+// ResetGroup resets all changes to the "group" edge.
+func (m *NotifierMutation) ResetGroup() {
+ m.group = nil
+ m.clearedgroup = false
+}
+
+// ClearUser clears the "user" edge to the User entity.
+func (m *NotifierMutation) ClearUser() {
+ m.cleareduser = true
+ m.clearedFields[notifier.FieldUserID] = struct{}{}
+}
+
+// UserCleared reports if the "user" edge to the User entity was cleared.
+func (m *NotifierMutation) UserCleared() bool {
+ return m.cleareduser
+}
+
+// UserIDs returns the "user" edge IDs in the mutation.
+// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
+// UserID instead. It exists only for internal usage by the builders.
+func (m *NotifierMutation) UserIDs() (ids []uuid.UUID) {
+ if id := m.user; id != nil {
+ ids = append(ids, *id)
+ }
+ return
+}
+
+// ResetUser resets all changes to the "user" edge.
+func (m *NotifierMutation) ResetUser() {
+ m.user = nil
+ m.cleareduser = false
+}
+
+// Where appends a list predicates to the NotifierMutation builder.
+func (m *NotifierMutation) Where(ps ...predicate.Notifier) {
+ m.predicates = append(m.predicates, ps...)
+}
+
+// WhereP appends storage-level predicates to the NotifierMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *NotifierMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.Notifier, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
+// Op returns the operation name.
+func (m *NotifierMutation) Op() Op {
+ return m.op
+}
+
+// SetOp allows setting the mutation operation.
+func (m *NotifierMutation) SetOp(op Op) {
+ m.op = op
+}
+
+// Type returns the node type of this mutation (Notifier).
+func (m *NotifierMutation) Type() string {
+ return m.typ
+}
+
+// Fields returns all fields that were changed during this mutation. Note that in
+// order to get all numeric fields that were incremented/decremented, call
+// AddedFields().
+func (m *NotifierMutation) Fields() []string {
+ fields := make([]string, 0, 7)
+ if m.created_at != nil {
+ fields = append(fields, notifier.FieldCreatedAt)
+ }
+ if m.updated_at != nil {
+ fields = append(fields, notifier.FieldUpdatedAt)
+ }
+ if m.group != nil {
+ fields = append(fields, notifier.FieldGroupID)
+ }
+ if m.user != nil {
+ fields = append(fields, notifier.FieldUserID)
+ }
+ if m.name != nil {
+ fields = append(fields, notifier.FieldName)
+ }
+ if m.url != nil {
+ fields = append(fields, notifier.FieldURL)
+ }
+ if m.is_active != nil {
+ fields = append(fields, notifier.FieldIsActive)
+ }
+ return fields
+}
+
+// Field returns the value of a field with the given name. The second boolean
+// return value indicates that this field was not set, or was not defined in the
+// schema.
+func (m *NotifierMutation) Field(name string) (ent.Value, bool) {
+ switch name {
+ case notifier.FieldCreatedAt:
+ return m.CreatedAt()
+ case notifier.FieldUpdatedAt:
+ return m.UpdatedAt()
+ case notifier.FieldGroupID:
+ return m.GroupID()
+ case notifier.FieldUserID:
+ return m.UserID()
+ case notifier.FieldName:
+ return m.Name()
+ case notifier.FieldURL:
+ return m.URL()
+ case notifier.FieldIsActive:
+ return m.IsActive()
+ }
+ return nil, false
+}
+
+// OldField returns the old value of the field from the database. An error is
+// returned if the mutation operation is not UpdateOne, or the query to the
+// database failed.
+func (m *NotifierMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
+ switch name {
+ case notifier.FieldCreatedAt:
+ return m.OldCreatedAt(ctx)
+ case notifier.FieldUpdatedAt:
+ return m.OldUpdatedAt(ctx)
+ case notifier.FieldGroupID:
+ return m.OldGroupID(ctx)
+ case notifier.FieldUserID:
+ return m.OldUserID(ctx)
+ case notifier.FieldName:
+ return m.OldName(ctx)
+ case notifier.FieldURL:
+ return m.OldURL(ctx)
+ case notifier.FieldIsActive:
+ return m.OldIsActive(ctx)
+ }
+ return nil, fmt.Errorf("unknown Notifier field %s", name)
+}
+
+// SetField sets the value of a field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *NotifierMutation) SetField(name string, value ent.Value) error {
+ switch name {
+ case notifier.FieldCreatedAt:
+ v, ok := value.(time.Time)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetCreatedAt(v)
+ return nil
+ case notifier.FieldUpdatedAt:
+ v, ok := value.(time.Time)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetUpdatedAt(v)
+ return nil
+ case notifier.FieldGroupID:
+ v, ok := value.(uuid.UUID)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetGroupID(v)
+ return nil
+ case notifier.FieldUserID:
+ v, ok := value.(uuid.UUID)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetUserID(v)
+ return nil
+ case notifier.FieldName:
+ v, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetName(v)
+ return nil
+ case notifier.FieldURL:
+ v, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetURL(v)
+ return nil
+ case notifier.FieldIsActive:
+ v, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetIsActive(v)
+ return nil
+ }
+ return fmt.Errorf("unknown Notifier field %s", name)
+}
+
+// AddedFields returns all numeric fields that were incremented/decremented during
+// this mutation.
+func (m *NotifierMutation) AddedFields() []string {
+ return nil
+}
+
+// AddedField returns the numeric value that was incremented/decremented on a field
+// with the given name. The second boolean return value indicates that this field
+// was not set, or was not defined in the schema.
+func (m *NotifierMutation) AddedField(name string) (ent.Value, bool) {
+ return nil, false
+}
+
+// AddField adds the value to the field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *NotifierMutation) AddField(name string, value ent.Value) error {
+ switch name {
+ }
+ return fmt.Errorf("unknown Notifier numeric field %s", name)
+}
+
+// ClearedFields returns all nullable fields that were cleared during this
+// mutation.
+func (m *NotifierMutation) ClearedFields() []string {
+ return nil
+}
+
+// FieldCleared returns a boolean indicating if a field with the given name was
+// cleared in this mutation.
+func (m *NotifierMutation) FieldCleared(name string) bool {
+ _, ok := m.clearedFields[name]
+ return ok
+}
+
+// ClearField clears the value of the field with the given name. It returns an
+// error if the field is not defined in the schema.
+func (m *NotifierMutation) ClearField(name string) error {
+ return fmt.Errorf("unknown Notifier nullable field %s", name)
+}
+
+// ResetField resets all changes in the mutation for the field with the given name.
+// It returns an error if the field is not defined in the schema.
+func (m *NotifierMutation) ResetField(name string) error {
+ switch name {
+ case notifier.FieldCreatedAt:
+ m.ResetCreatedAt()
+ return nil
+ case notifier.FieldUpdatedAt:
+ m.ResetUpdatedAt()
+ return nil
+ case notifier.FieldGroupID:
+ m.ResetGroupID()
+ return nil
+ case notifier.FieldUserID:
+ m.ResetUserID()
+ return nil
+ case notifier.FieldName:
+ m.ResetName()
+ return nil
+ case notifier.FieldURL:
+ m.ResetURL()
+ return nil
+ case notifier.FieldIsActive:
+ m.ResetIsActive()
+ return nil
+ }
+ return fmt.Errorf("unknown Notifier field %s", name)
+}
+
+// AddedEdges returns all edge names that were set/added in this mutation.
+func (m *NotifierMutation) AddedEdges() []string {
+ edges := make([]string, 0, 2)
+ if m.group != nil {
+ edges = append(edges, notifier.EdgeGroup)
+ }
+ if m.user != nil {
+ edges = append(edges, notifier.EdgeUser)
+ }
+ return edges
+}
+
+// AddedIDs returns all IDs (to other nodes) that were added for the given edge
+// name in this mutation.
+func (m *NotifierMutation) AddedIDs(name string) []ent.Value {
+ switch name {
+ case notifier.EdgeGroup:
+ if id := m.group; id != nil {
+ return []ent.Value{*id}
+ }
+ case notifier.EdgeUser:
+ if id := m.user; id != nil {
+ return []ent.Value{*id}
+ }
+ }
+ return nil
+}
+
+// RemovedEdges returns all edge names that were removed in this mutation.
+func (m *NotifierMutation) RemovedEdges() []string {
+ edges := make([]string, 0, 2)
+ return edges
+}
+
+// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
+// the given name in this mutation.
+func (m *NotifierMutation) RemovedIDs(name string) []ent.Value {
+ return nil
+}
+
+// ClearedEdges returns all edge names that were cleared in this mutation.
+func (m *NotifierMutation) ClearedEdges() []string {
+ edges := make([]string, 0, 2)
+ if m.clearedgroup {
+ edges = append(edges, notifier.EdgeGroup)
+ }
+ if m.cleareduser {
+ edges = append(edges, notifier.EdgeUser)
+ }
+ return edges
+}
+
+// EdgeCleared returns a boolean which indicates if the edge with the given name
+// was cleared in this mutation.
+func (m *NotifierMutation) EdgeCleared(name string) bool {
+ switch name {
+ case notifier.EdgeGroup:
+ return m.clearedgroup
+ case notifier.EdgeUser:
+ return m.cleareduser
+ }
+ return false
+}
+
+// ClearEdge clears the value of the edge with the given name. It returns an error
+// if that edge is not defined in the schema.
+func (m *NotifierMutation) ClearEdge(name string) error {
+ switch name {
+ case notifier.EdgeGroup:
+ m.ClearGroup()
+ return nil
+ case notifier.EdgeUser:
+ m.ClearUser()
+ return nil
+ }
+ return fmt.Errorf("unknown Notifier unique edge %s", name)
+}
+
+// ResetEdge resets all changes to the edge with the given name in this mutation.
+// It returns an error if the edge is not defined in the schema.
+func (m *NotifierMutation) ResetEdge(name string) error {
+ switch name {
+ case notifier.EdgeGroup:
+ m.ResetGroup()
+ return nil
+ case notifier.EdgeUser:
+ m.ResetUser()
+ return nil
+ }
+ return fmt.Errorf("unknown Notifier edge %s", name)
+}
+
// UserMutation represents an operation that mutates the User nodes in the graph.
type UserMutation struct {
config
@@ -8886,8 +10681,8 @@ type UserMutation struct {
email *string
password *string
is_superuser *bool
- role *user.Role
superuser *bool
+ role *user.Role
activated_on *time.Time
clearedFields map[string]struct{}
group *uuid.UUID
@@ -8895,6 +10690,9 @@ type UserMutation struct {
auth_tokens map[uuid.UUID]struct{}
removedauth_tokens map[uuid.UUID]struct{}
clearedauth_tokens bool
+ notifiers map[uuid.UUID]struct{}
+ removednotifiers map[uuid.UUID]struct{}
+ clearednotifiers bool
done bool
oldValue func(context.Context) (*User, error)
predicates []predicate.User
@@ -9220,42 +11018,6 @@ func (m *UserMutation) ResetIsSuperuser() {
m.is_superuser = nil
}
-// SetRole sets the "role" field.
-func (m *UserMutation) SetRole(u user.Role) {
- m.role = &u
-}
-
-// Role returns the value of the "role" field in the mutation.
-func (m *UserMutation) Role() (r user.Role, exists bool) {
- v := m.role
- if v == nil {
- return
- }
- return *v, true
-}
-
-// OldRole returns the old "role" field's value of the User entity.
-// If the User object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *UserMutation) OldRole(ctx context.Context) (v user.Role, err error) {
- if !m.op.Is(OpUpdateOne) {
- return v, errors.New("OldRole is only allowed on UpdateOne operations")
- }
- if m.id == nil || m.oldValue == nil {
- return v, errors.New("OldRole requires an ID field in the mutation")
- }
- oldValue, err := m.oldValue(ctx)
- if err != nil {
- return v, fmt.Errorf("querying old value for OldRole: %w", err)
- }
- return oldValue.Role, nil
-}
-
-// ResetRole resets all changes to the "role" field.
-func (m *UserMutation) ResetRole() {
- m.role = nil
-}
-
// SetSuperuser sets the "superuser" field.
func (m *UserMutation) SetSuperuser(b bool) {
m.superuser = &b
@@ -9292,6 +11054,42 @@ func (m *UserMutation) ResetSuperuser() {
m.superuser = nil
}
+// SetRole sets the "role" field.
+func (m *UserMutation) SetRole(u user.Role) {
+ m.role = &u
+}
+
+// Role returns the value of the "role" field in the mutation.
+func (m *UserMutation) Role() (r user.Role, exists bool) {
+ v := m.role
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldRole returns the old "role" field's value of the User entity.
+// If the User object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *UserMutation) OldRole(ctx context.Context) (v user.Role, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldRole is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldRole requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldRole: %w", err)
+ }
+ return oldValue.Role, nil
+}
+
+// ResetRole resets all changes to the "role" field.
+func (m *UserMutation) ResetRole() {
+ m.role = nil
+}
+
// SetActivatedOn sets the "activated_on" field.
func (m *UserMutation) SetActivatedOn(t time.Time) {
m.activated_on = &t
@@ -9434,16 +11232,85 @@ func (m *UserMutation) ResetAuthTokens() {
m.removedauth_tokens = nil
}
+// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by ids.
+func (m *UserMutation) AddNotifierIDs(ids ...uuid.UUID) {
+ if m.notifiers == nil {
+ m.notifiers = make(map[uuid.UUID]struct{})
+ }
+ for i := range ids {
+ m.notifiers[ids[i]] = struct{}{}
+ }
+}
+
+// ClearNotifiers clears the "notifiers" edge to the Notifier entity.
+func (m *UserMutation) ClearNotifiers() {
+ m.clearednotifiers = true
+}
+
+// NotifiersCleared reports if the "notifiers" edge to the Notifier entity was cleared.
+func (m *UserMutation) NotifiersCleared() bool {
+ return m.clearednotifiers
+}
+
+// RemoveNotifierIDs removes the "notifiers" edge to the Notifier entity by IDs.
+func (m *UserMutation) RemoveNotifierIDs(ids ...uuid.UUID) {
+ if m.removednotifiers == nil {
+ m.removednotifiers = make(map[uuid.UUID]struct{})
+ }
+ for i := range ids {
+ delete(m.notifiers, ids[i])
+ m.removednotifiers[ids[i]] = struct{}{}
+ }
+}
+
+// RemovedNotifiers returns the removed IDs of the "notifiers" edge to the Notifier entity.
+func (m *UserMutation) RemovedNotifiersIDs() (ids []uuid.UUID) {
+ for id := range m.removednotifiers {
+ ids = append(ids, id)
+ }
+ return
+}
+
+// NotifiersIDs returns the "notifiers" edge IDs in the mutation.
+func (m *UserMutation) NotifiersIDs() (ids []uuid.UUID) {
+ for id := range m.notifiers {
+ ids = append(ids, id)
+ }
+ return
+}
+
+// ResetNotifiers resets all changes to the "notifiers" edge.
+func (m *UserMutation) ResetNotifiers() {
+ m.notifiers = nil
+ m.clearednotifiers = false
+ m.removednotifiers = nil
+}
+
// Where appends a list predicates to the UserMutation builder.
func (m *UserMutation) Where(ps ...predicate.User) {
m.predicates = append(m.predicates, ps...)
}
+// WhereP appends storage-level predicates to the UserMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *UserMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.User, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
// Op returns the operation name.
func (m *UserMutation) Op() Op {
return m.op
}
+// SetOp allows setting the mutation operation.
+func (m *UserMutation) SetOp(op Op) {
+ m.op = op
+}
+
// Type returns the node type of this mutation (User).
func (m *UserMutation) Type() string {
return m.typ
@@ -9472,12 +11339,12 @@ func (m *UserMutation) Fields() []string {
if m.is_superuser != nil {
fields = append(fields, user.FieldIsSuperuser)
}
- if m.role != nil {
- fields = append(fields, user.FieldRole)
- }
if m.superuser != nil {
fields = append(fields, user.FieldSuperuser)
}
+ if m.role != nil {
+ fields = append(fields, user.FieldRole)
+ }
if m.activated_on != nil {
fields = append(fields, user.FieldActivatedOn)
}
@@ -9501,10 +11368,10 @@ func (m *UserMutation) Field(name string) (ent.Value, bool) {
return m.Password()
case user.FieldIsSuperuser:
return m.IsSuperuser()
- case user.FieldRole:
- return m.Role()
case user.FieldSuperuser:
return m.Superuser()
+ case user.FieldRole:
+ return m.Role()
case user.FieldActivatedOn:
return m.ActivatedOn()
}
@@ -9528,10 +11395,10 @@ func (m *UserMutation) OldField(ctx context.Context, name string) (ent.Value, er
return m.OldPassword(ctx)
case user.FieldIsSuperuser:
return m.OldIsSuperuser(ctx)
- case user.FieldRole:
- return m.OldRole(ctx)
case user.FieldSuperuser:
return m.OldSuperuser(ctx)
+ case user.FieldRole:
+ return m.OldRole(ctx)
case user.FieldActivatedOn:
return m.OldActivatedOn(ctx)
}
@@ -9585,13 +11452,6 @@ func (m *UserMutation) SetField(name string, value ent.Value) error {
}
m.SetIsSuperuser(v)
return nil
- case user.FieldRole:
- v, ok := value.(user.Role)
- if !ok {
- return fmt.Errorf("unexpected type %T for field %s", value, name)
- }
- m.SetRole(v)
- return nil
case user.FieldSuperuser:
v, ok := value.(bool)
if !ok {
@@ -9599,6 +11459,13 @@ func (m *UserMutation) SetField(name string, value ent.Value) error {
}
m.SetSuperuser(v)
return nil
+ case user.FieldRole:
+ v, ok := value.(user.Role)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetRole(v)
+ return nil
case user.FieldActivatedOn:
v, ok := value.(time.Time)
if !ok {
@@ -9682,12 +11549,12 @@ func (m *UserMutation) ResetField(name string) error {
case user.FieldIsSuperuser:
m.ResetIsSuperuser()
return nil
- case user.FieldRole:
- m.ResetRole()
- return nil
case user.FieldSuperuser:
m.ResetSuperuser()
return nil
+ case user.FieldRole:
+ m.ResetRole()
+ return nil
case user.FieldActivatedOn:
m.ResetActivatedOn()
return nil
@@ -9697,13 +11564,16 @@ func (m *UserMutation) ResetField(name string) error {
// AddedEdges returns all edge names that were set/added in this mutation.
func (m *UserMutation) AddedEdges() []string {
- edges := make([]string, 0, 2)
+ edges := make([]string, 0, 3)
if m.group != nil {
edges = append(edges, user.EdgeGroup)
}
if m.auth_tokens != nil {
edges = append(edges, user.EdgeAuthTokens)
}
+ if m.notifiers != nil {
+ edges = append(edges, user.EdgeNotifiers)
+ }
return edges
}
@@ -9721,16 +11591,25 @@ func (m *UserMutation) AddedIDs(name string) []ent.Value {
ids = append(ids, id)
}
return ids
+ case user.EdgeNotifiers:
+ ids := make([]ent.Value, 0, len(m.notifiers))
+ for id := range m.notifiers {
+ ids = append(ids, id)
+ }
+ return ids
}
return nil
}
// RemovedEdges returns all edge names that were removed in this mutation.
func (m *UserMutation) RemovedEdges() []string {
- edges := make([]string, 0, 2)
+ edges := make([]string, 0, 3)
if m.removedauth_tokens != nil {
edges = append(edges, user.EdgeAuthTokens)
}
+ if m.removednotifiers != nil {
+ edges = append(edges, user.EdgeNotifiers)
+ }
return edges
}
@@ -9744,19 +11623,28 @@ func (m *UserMutation) RemovedIDs(name string) []ent.Value {
ids = append(ids, id)
}
return ids
+ case user.EdgeNotifiers:
+ ids := make([]ent.Value, 0, len(m.removednotifiers))
+ for id := range m.removednotifiers {
+ ids = append(ids, id)
+ }
+ return ids
}
return nil
}
// ClearedEdges returns all edge names that were cleared in this mutation.
func (m *UserMutation) ClearedEdges() []string {
- edges := make([]string, 0, 2)
+ edges := make([]string, 0, 3)
if m.clearedgroup {
edges = append(edges, user.EdgeGroup)
}
if m.clearedauth_tokens {
edges = append(edges, user.EdgeAuthTokens)
}
+ if m.clearednotifiers {
+ edges = append(edges, user.EdgeNotifiers)
+ }
return edges
}
@@ -9768,6 +11656,8 @@ func (m *UserMutation) EdgeCleared(name string) bool {
return m.clearedgroup
case user.EdgeAuthTokens:
return m.clearedauth_tokens
+ case user.EdgeNotifiers:
+ return m.clearednotifiers
}
return false
}
@@ -9793,6 +11683,9 @@ func (m *UserMutation) ResetEdge(name string) error {
case user.EdgeAuthTokens:
m.ResetAuthTokens()
return nil
+ case user.EdgeNotifiers:
+ m.ResetNotifiers()
+ return nil
}
return fmt.Errorf("unknown User edge %s", name)
}
diff --git a/backend/internal/data/ent/notifier.go b/backend/internal/data/ent/notifier.go
new file mode 100644
index 0000000..05a267b
--- /dev/null
+++ b/backend/internal/data/ent/notifier.go
@@ -0,0 +1,226 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "entgo.io/ent"
+ "entgo.io/ent/dialect/sql"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/group"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/user"
+)
+
+// Notifier is the model entity for the Notifier schema.
+type Notifier struct {
+ config `json:"-"`
+ // ID of the ent.
+ ID uuid.UUID `json:"id,omitempty"`
+ // CreatedAt holds the value of the "created_at" field.
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ // UpdatedAt holds the value of the "updated_at" field.
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+ // GroupID holds the value of the "group_id" field.
+ GroupID uuid.UUID `json:"group_id,omitempty"`
+ // UserID holds the value of the "user_id" field.
+ UserID uuid.UUID `json:"user_id,omitempty"`
+ // Name holds the value of the "name" field.
+ Name string `json:"name,omitempty"`
+ // URL holds the value of the "url" field.
+ URL string `json:"-"`
+ // IsActive holds the value of the "is_active" field.
+ IsActive bool `json:"is_active,omitempty"`
+ // Edges holds the relations/edges for other nodes in the graph.
+ // The values are being populated by the NotifierQuery when eager-loading is set.
+ Edges NotifierEdges `json:"edges"`
+ selectValues sql.SelectValues
+}
+
+// NotifierEdges holds the relations/edges for other nodes in the graph.
+type NotifierEdges struct {
+ // Group holds the value of the group edge.
+ Group *Group `json:"group,omitempty"`
+ // User holds the value of the user edge.
+ User *User `json:"user,omitempty"`
+ // loadedTypes holds the information for reporting if a
+ // type was loaded (or requested) in eager-loading or not.
+ loadedTypes [2]bool
+}
+
+// GroupOrErr returns the Group value or an error if the edge
+// was not loaded in eager-loading, or loaded but was not found.
+func (e NotifierEdges) GroupOrErr() (*Group, error) {
+ if e.loadedTypes[0] {
+ if e.Group == nil {
+ // Edge was loaded but was not found.
+ return nil, &NotFoundError{label: group.Label}
+ }
+ return e.Group, nil
+ }
+ return nil, &NotLoadedError{edge: "group"}
+}
+
+// UserOrErr returns the User value or an error if the edge
+// was not loaded in eager-loading, or loaded but was not found.
+func (e NotifierEdges) UserOrErr() (*User, error) {
+ if e.loadedTypes[1] {
+ if e.User == nil {
+ // Edge was loaded but was not found.
+ return nil, &NotFoundError{label: user.Label}
+ }
+ return e.User, nil
+ }
+ return nil, &NotLoadedError{edge: "user"}
+}
+
+// scanValues returns the types for scanning values from sql.Rows.
+func (*Notifier) scanValues(columns []string) ([]any, error) {
+ values := make([]any, len(columns))
+ for i := range columns {
+ switch columns[i] {
+ case notifier.FieldIsActive:
+ values[i] = new(sql.NullBool)
+ case notifier.FieldName, notifier.FieldURL:
+ values[i] = new(sql.NullString)
+ case notifier.FieldCreatedAt, notifier.FieldUpdatedAt:
+ values[i] = new(sql.NullTime)
+ case notifier.FieldID, notifier.FieldGroupID, notifier.FieldUserID:
+ values[i] = new(uuid.UUID)
+ default:
+ values[i] = new(sql.UnknownType)
+ }
+ }
+ return values, nil
+}
+
+// assignValues assigns the values that were returned from sql.Rows (after scanning)
+// to the Notifier fields.
+func (n *Notifier) assignValues(columns []string, values []any) error {
+ if m, n := len(values), len(columns); m < n {
+ return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
+ }
+ for i := range columns {
+ switch columns[i] {
+ case notifier.FieldID:
+ if value, ok := values[i].(*uuid.UUID); !ok {
+ return fmt.Errorf("unexpected type %T for field id", values[i])
+ } else if value != nil {
+ n.ID = *value
+ }
+ case notifier.FieldCreatedAt:
+ if value, ok := values[i].(*sql.NullTime); !ok {
+ return fmt.Errorf("unexpected type %T for field created_at", values[i])
+ } else if value.Valid {
+ n.CreatedAt = value.Time
+ }
+ case notifier.FieldUpdatedAt:
+ if value, ok := values[i].(*sql.NullTime); !ok {
+ return fmt.Errorf("unexpected type %T for field updated_at", values[i])
+ } else if value.Valid {
+ n.UpdatedAt = value.Time
+ }
+ case notifier.FieldGroupID:
+ if value, ok := values[i].(*uuid.UUID); !ok {
+ return fmt.Errorf("unexpected type %T for field group_id", values[i])
+ } else if value != nil {
+ n.GroupID = *value
+ }
+ case notifier.FieldUserID:
+ if value, ok := values[i].(*uuid.UUID); !ok {
+ return fmt.Errorf("unexpected type %T for field user_id", values[i])
+ } else if value != nil {
+ n.UserID = *value
+ }
+ case notifier.FieldName:
+ if value, ok := values[i].(*sql.NullString); !ok {
+ return fmt.Errorf("unexpected type %T for field name", values[i])
+ } else if value.Valid {
+ n.Name = value.String
+ }
+ case notifier.FieldURL:
+ if value, ok := values[i].(*sql.NullString); !ok {
+ return fmt.Errorf("unexpected type %T for field url", values[i])
+ } else if value.Valid {
+ n.URL = value.String
+ }
+ case notifier.FieldIsActive:
+ if value, ok := values[i].(*sql.NullBool); !ok {
+ return fmt.Errorf("unexpected type %T for field is_active", values[i])
+ } else if value.Valid {
+ n.IsActive = value.Bool
+ }
+ default:
+ n.selectValues.Set(columns[i], values[i])
+ }
+ }
+ return nil
+}
+
+// Value returns the ent.Value that was dynamically selected and assigned to the Notifier.
+// This includes values selected through modifiers, order, etc.
+func (n *Notifier) Value(name string) (ent.Value, error) {
+ return n.selectValues.Get(name)
+}
+
+// QueryGroup queries the "group" edge of the Notifier entity.
+func (n *Notifier) QueryGroup() *GroupQuery {
+ return NewNotifierClient(n.config).QueryGroup(n)
+}
+
+// QueryUser queries the "user" edge of the Notifier entity.
+func (n *Notifier) QueryUser() *UserQuery {
+ return NewNotifierClient(n.config).QueryUser(n)
+}
+
+// Update returns a builder for updating this Notifier.
+// Note that you need to call Notifier.Unwrap() before calling this method if this Notifier
+// was returned from a transaction, and the transaction was committed or rolled back.
+func (n *Notifier) Update() *NotifierUpdateOne {
+ return NewNotifierClient(n.config).UpdateOne(n)
+}
+
+// Unwrap unwraps the Notifier entity that was returned from a transaction after it was closed,
+// so that all future queries will be executed through the driver which created the transaction.
+func (n *Notifier) Unwrap() *Notifier {
+ _tx, ok := n.config.driver.(*txDriver)
+ if !ok {
+ panic("ent: Notifier is not a transactional entity")
+ }
+ n.config.driver = _tx.drv
+ return n
+}
+
+// String implements the fmt.Stringer.
+func (n *Notifier) String() string {
+ var builder strings.Builder
+ builder.WriteString("Notifier(")
+ builder.WriteString(fmt.Sprintf("id=%v, ", n.ID))
+ builder.WriteString("created_at=")
+ builder.WriteString(n.CreatedAt.Format(time.ANSIC))
+ builder.WriteString(", ")
+ builder.WriteString("updated_at=")
+ builder.WriteString(n.UpdatedAt.Format(time.ANSIC))
+ builder.WriteString(", ")
+ builder.WriteString("group_id=")
+ builder.WriteString(fmt.Sprintf("%v", n.GroupID))
+ builder.WriteString(", ")
+ builder.WriteString("user_id=")
+ builder.WriteString(fmt.Sprintf("%v", n.UserID))
+ builder.WriteString(", ")
+ builder.WriteString("name=")
+ builder.WriteString(n.Name)
+ builder.WriteString(", ")
+ builder.WriteString("url=")
+ builder.WriteString(", ")
+ builder.WriteString("is_active=")
+ builder.WriteString(fmt.Sprintf("%v", n.IsActive))
+ builder.WriteByte(')')
+ return builder.String()
+}
+
+// Notifiers is a parsable slice of Notifier.
+type Notifiers []*Notifier
diff --git a/backend/internal/data/ent/notifier/notifier.go b/backend/internal/data/ent/notifier/notifier.go
new file mode 100644
index 0000000..d24b6bc
--- /dev/null
+++ b/backend/internal/data/ent/notifier/notifier.go
@@ -0,0 +1,162 @@
+// Code generated by ent, DO NOT EDIT.
+
+package notifier
+
+import (
+ "time"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "github.com/google/uuid"
+)
+
+const (
+ // Label holds the string label denoting the notifier type in the database.
+ Label = "notifier"
+ // FieldID holds the string denoting the id field in the database.
+ FieldID = "id"
+ // FieldCreatedAt holds the string denoting the created_at field in the database.
+ FieldCreatedAt = "created_at"
+ // FieldUpdatedAt holds the string denoting the updated_at field in the database.
+ FieldUpdatedAt = "updated_at"
+ // FieldGroupID holds the string denoting the group_id field in the database.
+ FieldGroupID = "group_id"
+ // FieldUserID holds the string denoting the user_id field in the database.
+ FieldUserID = "user_id"
+ // FieldName holds the string denoting the name field in the database.
+ FieldName = "name"
+ // FieldURL holds the string denoting the url field in the database.
+ FieldURL = "url"
+ // FieldIsActive holds the string denoting the is_active field in the database.
+ FieldIsActive = "is_active"
+ // EdgeGroup holds the string denoting the group edge name in mutations.
+ EdgeGroup = "group"
+ // EdgeUser holds the string denoting the user edge name in mutations.
+ EdgeUser = "user"
+ // Table holds the table name of the notifier in the database.
+ Table = "notifiers"
+ // GroupTable is the table that holds the group relation/edge.
+ GroupTable = "notifiers"
+ // GroupInverseTable is the table name for the Group entity.
+ // It exists in this package in order to avoid circular dependency with the "group" package.
+ GroupInverseTable = "groups"
+ // GroupColumn is the table column denoting the group relation/edge.
+ GroupColumn = "group_id"
+ // UserTable is the table that holds the user relation/edge.
+ UserTable = "notifiers"
+ // UserInverseTable is the table name for the User entity.
+ // It exists in this package in order to avoid circular dependency with the "user" package.
+ UserInverseTable = "users"
+ // UserColumn is the table column denoting the user relation/edge.
+ UserColumn = "user_id"
+)
+
+// Columns holds all SQL columns for notifier fields.
+var Columns = []string{
+ FieldID,
+ FieldCreatedAt,
+ FieldUpdatedAt,
+ FieldGroupID,
+ FieldUserID,
+ FieldName,
+ FieldURL,
+ FieldIsActive,
+}
+
+// ValidColumn reports if the column name is valid (part of the table columns).
+func ValidColumn(column string) bool {
+ for i := range Columns {
+ if column == Columns[i] {
+ return true
+ }
+ }
+ return false
+}
+
+var (
+ // DefaultCreatedAt holds the default value on creation for the "created_at" field.
+ DefaultCreatedAt func() time.Time
+ // DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
+ DefaultUpdatedAt func() time.Time
+ // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
+ UpdateDefaultUpdatedAt func() time.Time
+ // NameValidator is a validator for the "name" field. It is called by the builders before save.
+ NameValidator func(string) error
+ // URLValidator is a validator for the "url" field. It is called by the builders before save.
+ URLValidator func(string) error
+ // DefaultIsActive holds the default value on creation for the "is_active" field.
+ DefaultIsActive bool
+ // DefaultID holds the default value on creation for the "id" field.
+ DefaultID func() uuid.UUID
+)
+
+// OrderOption defines the ordering options for the Notifier queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByGroupID orders the results by the group_id field.
+func ByGroupID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldGroupID, opts...).ToFunc()
+}
+
+// ByUserID orders the results by the user_id field.
+func ByUserID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUserID, opts...).ToFunc()
+}
+
+// ByName orders the results by the name field.
+func ByName(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldName, opts...).ToFunc()
+}
+
+// ByURL orders the results by the url field.
+func ByURL(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldURL, opts...).ToFunc()
+}
+
+// ByIsActive orders the results by the is_active field.
+func ByIsActive(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldIsActive, opts...).ToFunc()
+}
+
+// ByGroupField orders the results by group field.
+func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
+ }
+}
+
+// ByUserField orders the results by user field.
+func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
+ }
+}
+func newGroupStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(GroupInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
+ )
+}
+func newUserStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(UserInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
+ )
+}
diff --git a/backend/internal/data/ent/notifier/where.go b/backend/internal/data/ent/notifier/where.go
new file mode 100644
index 0000000..fa9b3bc
--- /dev/null
+++ b/backend/internal/data/ent/notifier/where.go
@@ -0,0 +1,413 @@
+// Code generated by ent, DO NOT EDIT.
+
+package notifier
+
+import (
+ "time"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
+)
+
+// ID filters vertices based on their ID field.
+func ID(id uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldID, id))
+}
+
+// IDEQ applies the EQ predicate on the ID field.
+func IDEQ(id uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldID, id))
+}
+
+// IDNEQ applies the NEQ predicate on the ID field.
+func IDNEQ(id uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNEQ(FieldID, id))
+}
+
+// IDIn applies the In predicate on the ID field.
+func IDIn(ids ...uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldIn(FieldID, ids...))
+}
+
+// IDNotIn applies the NotIn predicate on the ID field.
+func IDNotIn(ids ...uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNotIn(FieldID, ids...))
+}
+
+// IDGT applies the GT predicate on the ID field.
+func IDGT(id uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldGT(FieldID, id))
+}
+
+// IDGTE applies the GTE predicate on the ID field.
+func IDGTE(id uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldGTE(FieldID, id))
+}
+
+// IDLT applies the LT predicate on the ID field.
+func IDLT(id uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldLT(FieldID, id))
+}
+
+// IDLTE applies the LTE predicate on the ID field.
+func IDLTE(id uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldLTE(FieldID, id))
+}
+
+// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
+func CreatedAt(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldCreatedAt, v))
+}
+
+// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
+func UpdatedAt(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldUpdatedAt, v))
+}
+
+// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ.
+func GroupID(v uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldGroupID, v))
+}
+
+// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ.
+func UserID(v uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldUserID, v))
+}
+
+// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
+func Name(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldName, v))
+}
+
+// URL applies equality check predicate on the "url" field. It's identical to URLEQ.
+func URL(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldURL, v))
+}
+
+// IsActive applies equality check predicate on the "is_active" field. It's identical to IsActiveEQ.
+func IsActive(v bool) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldIsActive, v))
+}
+
+// CreatedAtEQ applies the EQ predicate on the "created_at" field.
+func CreatedAtEQ(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldCreatedAt, v))
+}
+
+// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
+func CreatedAtNEQ(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNEQ(FieldCreatedAt, v))
+}
+
+// CreatedAtIn applies the In predicate on the "created_at" field.
+func CreatedAtIn(vs ...time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldIn(FieldCreatedAt, vs...))
+}
+
+// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
+func CreatedAtNotIn(vs ...time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNotIn(FieldCreatedAt, vs...))
+}
+
+// CreatedAtGT applies the GT predicate on the "created_at" field.
+func CreatedAtGT(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldGT(FieldCreatedAt, v))
+}
+
+// CreatedAtGTE applies the GTE predicate on the "created_at" field.
+func CreatedAtGTE(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldGTE(FieldCreatedAt, v))
+}
+
+// CreatedAtLT applies the LT predicate on the "created_at" field.
+func CreatedAtLT(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldLT(FieldCreatedAt, v))
+}
+
+// CreatedAtLTE applies the LTE predicate on the "created_at" field.
+func CreatedAtLTE(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldLTE(FieldCreatedAt, v))
+}
+
+// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
+func UpdatedAtEQ(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldUpdatedAt, v))
+}
+
+// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
+func UpdatedAtNEQ(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNEQ(FieldUpdatedAt, v))
+}
+
+// UpdatedAtIn applies the In predicate on the "updated_at" field.
+func UpdatedAtIn(vs ...time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldIn(FieldUpdatedAt, vs...))
+}
+
+// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
+func UpdatedAtNotIn(vs ...time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNotIn(FieldUpdatedAt, vs...))
+}
+
+// UpdatedAtGT applies the GT predicate on the "updated_at" field.
+func UpdatedAtGT(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldGT(FieldUpdatedAt, v))
+}
+
+// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
+func UpdatedAtGTE(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldGTE(FieldUpdatedAt, v))
+}
+
+// UpdatedAtLT applies the LT predicate on the "updated_at" field.
+func UpdatedAtLT(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldLT(FieldUpdatedAt, v))
+}
+
+// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
+func UpdatedAtLTE(v time.Time) predicate.Notifier {
+ return predicate.Notifier(sql.FieldLTE(FieldUpdatedAt, v))
+}
+
+// GroupIDEQ applies the EQ predicate on the "group_id" field.
+func GroupIDEQ(v uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldGroupID, v))
+}
+
+// GroupIDNEQ applies the NEQ predicate on the "group_id" field.
+func GroupIDNEQ(v uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNEQ(FieldGroupID, v))
+}
+
+// GroupIDIn applies the In predicate on the "group_id" field.
+func GroupIDIn(vs ...uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldIn(FieldGroupID, vs...))
+}
+
+// GroupIDNotIn applies the NotIn predicate on the "group_id" field.
+func GroupIDNotIn(vs ...uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNotIn(FieldGroupID, vs...))
+}
+
+// UserIDEQ applies the EQ predicate on the "user_id" field.
+func UserIDEQ(v uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldUserID, v))
+}
+
+// UserIDNEQ applies the NEQ predicate on the "user_id" field.
+func UserIDNEQ(v uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNEQ(FieldUserID, v))
+}
+
+// UserIDIn applies the In predicate on the "user_id" field.
+func UserIDIn(vs ...uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldIn(FieldUserID, vs...))
+}
+
+// UserIDNotIn applies the NotIn predicate on the "user_id" field.
+func UserIDNotIn(vs ...uuid.UUID) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNotIn(FieldUserID, vs...))
+}
+
+// NameEQ applies the EQ predicate on the "name" field.
+func NameEQ(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldName, v))
+}
+
+// NameNEQ applies the NEQ predicate on the "name" field.
+func NameNEQ(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNEQ(FieldName, v))
+}
+
+// NameIn applies the In predicate on the "name" field.
+func NameIn(vs ...string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldIn(FieldName, vs...))
+}
+
+// NameNotIn applies the NotIn predicate on the "name" field.
+func NameNotIn(vs ...string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNotIn(FieldName, vs...))
+}
+
+// NameGT applies the GT predicate on the "name" field.
+func NameGT(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldGT(FieldName, v))
+}
+
+// NameGTE applies the GTE predicate on the "name" field.
+func NameGTE(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldGTE(FieldName, v))
+}
+
+// NameLT applies the LT predicate on the "name" field.
+func NameLT(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldLT(FieldName, v))
+}
+
+// NameLTE applies the LTE predicate on the "name" field.
+func NameLTE(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldLTE(FieldName, v))
+}
+
+// NameContains applies the Contains predicate on the "name" field.
+func NameContains(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldContains(FieldName, v))
+}
+
+// NameHasPrefix applies the HasPrefix predicate on the "name" field.
+func NameHasPrefix(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldHasPrefix(FieldName, v))
+}
+
+// NameHasSuffix applies the HasSuffix predicate on the "name" field.
+func NameHasSuffix(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldHasSuffix(FieldName, v))
+}
+
+// NameEqualFold applies the EqualFold predicate on the "name" field.
+func NameEqualFold(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEqualFold(FieldName, v))
+}
+
+// NameContainsFold applies the ContainsFold predicate on the "name" field.
+func NameContainsFold(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldContainsFold(FieldName, v))
+}
+
+// URLEQ applies the EQ predicate on the "url" field.
+func URLEQ(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldURL, v))
+}
+
+// URLNEQ applies the NEQ predicate on the "url" field.
+func URLNEQ(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNEQ(FieldURL, v))
+}
+
+// URLIn applies the In predicate on the "url" field.
+func URLIn(vs ...string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldIn(FieldURL, vs...))
+}
+
+// URLNotIn applies the NotIn predicate on the "url" field.
+func URLNotIn(vs ...string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNotIn(FieldURL, vs...))
+}
+
+// URLGT applies the GT predicate on the "url" field.
+func URLGT(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldGT(FieldURL, v))
+}
+
+// URLGTE applies the GTE predicate on the "url" field.
+func URLGTE(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldGTE(FieldURL, v))
+}
+
+// URLLT applies the LT predicate on the "url" field.
+func URLLT(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldLT(FieldURL, v))
+}
+
+// URLLTE applies the LTE predicate on the "url" field.
+func URLLTE(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldLTE(FieldURL, v))
+}
+
+// URLContains applies the Contains predicate on the "url" field.
+func URLContains(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldContains(FieldURL, v))
+}
+
+// URLHasPrefix applies the HasPrefix predicate on the "url" field.
+func URLHasPrefix(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldHasPrefix(FieldURL, v))
+}
+
+// URLHasSuffix applies the HasSuffix predicate on the "url" field.
+func URLHasSuffix(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldHasSuffix(FieldURL, v))
+}
+
+// URLEqualFold applies the EqualFold predicate on the "url" field.
+func URLEqualFold(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEqualFold(FieldURL, v))
+}
+
+// URLContainsFold applies the ContainsFold predicate on the "url" field.
+func URLContainsFold(v string) predicate.Notifier {
+ return predicate.Notifier(sql.FieldContainsFold(FieldURL, v))
+}
+
+// IsActiveEQ applies the EQ predicate on the "is_active" field.
+func IsActiveEQ(v bool) predicate.Notifier {
+ return predicate.Notifier(sql.FieldEQ(FieldIsActive, v))
+}
+
+// IsActiveNEQ applies the NEQ predicate on the "is_active" field.
+func IsActiveNEQ(v bool) predicate.Notifier {
+ return predicate.Notifier(sql.FieldNEQ(FieldIsActive, v))
+}
+
+// HasGroup applies the HasEdge predicate on the "group" edge.
+func HasGroup() predicate.Notifier {
+ return predicate.Notifier(func(s *sql.Selector) {
+ step := sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
+ )
+ sqlgraph.HasNeighbors(s, step)
+ })
+}
+
+// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
+func HasGroupWith(preds ...predicate.Group) predicate.Notifier {
+ return predicate.Notifier(func(s *sql.Selector) {
+ step := newGroupStep()
+ sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+ for _, p := range preds {
+ p(s)
+ }
+ })
+ })
+}
+
+// HasUser applies the HasEdge predicate on the "user" edge.
+func HasUser() predicate.Notifier {
+ return predicate.Notifier(func(s *sql.Selector) {
+ step := sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
+ )
+ sqlgraph.HasNeighbors(s, step)
+ })
+}
+
+// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
+func HasUserWith(preds ...predicate.User) predicate.Notifier {
+ return predicate.Notifier(func(s *sql.Selector) {
+ step := newUserStep()
+ sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+ for _, p := range preds {
+ p(s)
+ }
+ })
+ })
+}
+
+// And groups predicates with the AND operator between them.
+func And(predicates ...predicate.Notifier) predicate.Notifier {
+ return predicate.Notifier(sql.AndPredicates(predicates...))
+}
+
+// Or groups predicates with the OR operator between them.
+func Or(predicates ...predicate.Notifier) predicate.Notifier {
+ return predicate.Notifier(sql.OrPredicates(predicates...))
+}
+
+// Not applies the not operator on the given predicate.
+func Not(p predicate.Notifier) predicate.Notifier {
+ return predicate.Notifier(sql.NotPredicates(p))
+}
diff --git a/backend/internal/data/ent/notifier_create.go b/backend/internal/data/ent/notifier_create.go
new file mode 100644
index 0000000..42265e2
--- /dev/null
+++ b/backend/internal/data/ent/notifier_create.go
@@ -0,0 +1,382 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/group"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/user"
+)
+
+// NotifierCreate is the builder for creating a Notifier entity.
+type NotifierCreate struct {
+ config
+ mutation *NotifierMutation
+ hooks []Hook
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (nc *NotifierCreate) SetCreatedAt(t time.Time) *NotifierCreate {
+ nc.mutation.SetCreatedAt(t)
+ return nc
+}
+
+// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
+func (nc *NotifierCreate) SetNillableCreatedAt(t *time.Time) *NotifierCreate {
+ if t != nil {
+ nc.SetCreatedAt(*t)
+ }
+ return nc
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (nc *NotifierCreate) SetUpdatedAt(t time.Time) *NotifierCreate {
+ nc.mutation.SetUpdatedAt(t)
+ return nc
+}
+
+// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
+func (nc *NotifierCreate) SetNillableUpdatedAt(t *time.Time) *NotifierCreate {
+ if t != nil {
+ nc.SetUpdatedAt(*t)
+ }
+ return nc
+}
+
+// SetGroupID sets the "group_id" field.
+func (nc *NotifierCreate) SetGroupID(u uuid.UUID) *NotifierCreate {
+ nc.mutation.SetGroupID(u)
+ return nc
+}
+
+// SetUserID sets the "user_id" field.
+func (nc *NotifierCreate) SetUserID(u uuid.UUID) *NotifierCreate {
+ nc.mutation.SetUserID(u)
+ return nc
+}
+
+// SetName sets the "name" field.
+func (nc *NotifierCreate) SetName(s string) *NotifierCreate {
+ nc.mutation.SetName(s)
+ return nc
+}
+
+// SetURL sets the "url" field.
+func (nc *NotifierCreate) SetURL(s string) *NotifierCreate {
+ nc.mutation.SetURL(s)
+ return nc
+}
+
+// SetIsActive sets the "is_active" field.
+func (nc *NotifierCreate) SetIsActive(b bool) *NotifierCreate {
+ nc.mutation.SetIsActive(b)
+ return nc
+}
+
+// SetNillableIsActive sets the "is_active" field if the given value is not nil.
+func (nc *NotifierCreate) SetNillableIsActive(b *bool) *NotifierCreate {
+ if b != nil {
+ nc.SetIsActive(*b)
+ }
+ return nc
+}
+
+// SetID sets the "id" field.
+func (nc *NotifierCreate) SetID(u uuid.UUID) *NotifierCreate {
+ nc.mutation.SetID(u)
+ return nc
+}
+
+// SetNillableID sets the "id" field if the given value is not nil.
+func (nc *NotifierCreate) SetNillableID(u *uuid.UUID) *NotifierCreate {
+ if u != nil {
+ nc.SetID(*u)
+ }
+ return nc
+}
+
+// SetGroup sets the "group" edge to the Group entity.
+func (nc *NotifierCreate) SetGroup(g *Group) *NotifierCreate {
+ return nc.SetGroupID(g.ID)
+}
+
+// SetUser sets the "user" edge to the User entity.
+func (nc *NotifierCreate) SetUser(u *User) *NotifierCreate {
+ return nc.SetUserID(u.ID)
+}
+
+// Mutation returns the NotifierMutation object of the builder.
+func (nc *NotifierCreate) Mutation() *NotifierMutation {
+ return nc.mutation
+}
+
+// Save creates the Notifier in the database.
+func (nc *NotifierCreate) Save(ctx context.Context) (*Notifier, error) {
+ nc.defaults()
+ return withHooks(ctx, nc.sqlSave, nc.mutation, nc.hooks)
+}
+
+// SaveX calls Save and panics if Save returns an error.
+func (nc *NotifierCreate) SaveX(ctx context.Context) *Notifier {
+ v, err := nc.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// Exec executes the query.
+func (nc *NotifierCreate) Exec(ctx context.Context) error {
+ _, err := nc.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (nc *NotifierCreate) ExecX(ctx context.Context) {
+ if err := nc.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
+
+// defaults sets the default values of the builder before save.
+func (nc *NotifierCreate) defaults() {
+ if _, ok := nc.mutation.CreatedAt(); !ok {
+ v := notifier.DefaultCreatedAt()
+ nc.mutation.SetCreatedAt(v)
+ }
+ if _, ok := nc.mutation.UpdatedAt(); !ok {
+ v := notifier.DefaultUpdatedAt()
+ nc.mutation.SetUpdatedAt(v)
+ }
+ if _, ok := nc.mutation.IsActive(); !ok {
+ v := notifier.DefaultIsActive
+ nc.mutation.SetIsActive(v)
+ }
+ if _, ok := nc.mutation.ID(); !ok {
+ v := notifier.DefaultID()
+ nc.mutation.SetID(v)
+ }
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (nc *NotifierCreate) check() error {
+ if _, ok := nc.mutation.CreatedAt(); !ok {
+ return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Notifier.created_at"`)}
+ }
+ if _, ok := nc.mutation.UpdatedAt(); !ok {
+ return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Notifier.updated_at"`)}
+ }
+ if _, ok := nc.mutation.GroupID(); !ok {
+ return &ValidationError{Name: "group_id", err: errors.New(`ent: missing required field "Notifier.group_id"`)}
+ }
+ if _, ok := nc.mutation.UserID(); !ok {
+ return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "Notifier.user_id"`)}
+ }
+ if _, ok := nc.mutation.Name(); !ok {
+ return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Notifier.name"`)}
+ }
+ if v, ok := nc.mutation.Name(); ok {
+ if err := notifier.NameValidator(v); err != nil {
+ return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Notifier.name": %w`, err)}
+ }
+ }
+ if _, ok := nc.mutation.URL(); !ok {
+ return &ValidationError{Name: "url", err: errors.New(`ent: missing required field "Notifier.url"`)}
+ }
+ if v, ok := nc.mutation.URL(); ok {
+ if err := notifier.URLValidator(v); err != nil {
+ return &ValidationError{Name: "url", err: fmt.Errorf(`ent: validator failed for field "Notifier.url": %w`, err)}
+ }
+ }
+ if _, ok := nc.mutation.IsActive(); !ok {
+ return &ValidationError{Name: "is_active", err: errors.New(`ent: missing required field "Notifier.is_active"`)}
+ }
+ if _, ok := nc.mutation.GroupID(); !ok {
+ return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "Notifier.group"`)}
+ }
+ if _, ok := nc.mutation.UserID(); !ok {
+ return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "Notifier.user"`)}
+ }
+ return nil
+}
+
+func (nc *NotifierCreate) sqlSave(ctx context.Context) (*Notifier, error) {
+ if err := nc.check(); err != nil {
+ return nil, err
+ }
+ _node, _spec := nc.createSpec()
+ if err := sqlgraph.CreateNode(ctx, nc.driver, _spec); err != nil {
+ if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ return nil, err
+ }
+ if _spec.ID.Value != nil {
+ if id, ok := _spec.ID.Value.(*uuid.UUID); ok {
+ _node.ID = *id
+ } else if err := _node.ID.Scan(_spec.ID.Value); err != nil {
+ return nil, err
+ }
+ }
+ nc.mutation.id = &_node.ID
+ nc.mutation.done = true
+ return _node, nil
+}
+
+func (nc *NotifierCreate) createSpec() (*Notifier, *sqlgraph.CreateSpec) {
+ var (
+ _node = &Notifier{config: nc.config}
+ _spec = sqlgraph.NewCreateSpec(notifier.Table, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID))
+ )
+ if id, ok := nc.mutation.ID(); ok {
+ _node.ID = id
+ _spec.ID.Value = &id
+ }
+ if value, ok := nc.mutation.CreatedAt(); ok {
+ _spec.SetField(notifier.FieldCreatedAt, field.TypeTime, value)
+ _node.CreatedAt = value
+ }
+ if value, ok := nc.mutation.UpdatedAt(); ok {
+ _spec.SetField(notifier.FieldUpdatedAt, field.TypeTime, value)
+ _node.UpdatedAt = value
+ }
+ if value, ok := nc.mutation.Name(); ok {
+ _spec.SetField(notifier.FieldName, field.TypeString, value)
+ _node.Name = value
+ }
+ if value, ok := nc.mutation.URL(); ok {
+ _spec.SetField(notifier.FieldURL, field.TypeString, value)
+ _node.URL = value
+ }
+ if value, ok := nc.mutation.IsActive(); ok {
+ _spec.SetField(notifier.FieldIsActive, field.TypeBool, value)
+ _node.IsActive = value
+ }
+ if nodes := nc.mutation.GroupIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: notifier.GroupTable,
+ Columns: []string{notifier.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _node.GroupID = nodes[0]
+ _spec.Edges = append(_spec.Edges, edge)
+ }
+ if nodes := nc.mutation.UserIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: notifier.UserTable,
+ Columns: []string{notifier.UserColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _node.UserID = nodes[0]
+ _spec.Edges = append(_spec.Edges, edge)
+ }
+ return _node, _spec
+}
+
+// NotifierCreateBulk is the builder for creating many Notifier entities in bulk.
+type NotifierCreateBulk struct {
+ config
+ err error
+ builders []*NotifierCreate
+}
+
+// Save creates the Notifier entities in the database.
+func (ncb *NotifierCreateBulk) Save(ctx context.Context) ([]*Notifier, error) {
+ if ncb.err != nil {
+ return nil, ncb.err
+ }
+ specs := make([]*sqlgraph.CreateSpec, len(ncb.builders))
+ nodes := make([]*Notifier, len(ncb.builders))
+ mutators := make([]Mutator, len(ncb.builders))
+ for i := range ncb.builders {
+ func(i int, root context.Context) {
+ builder := ncb.builders[i]
+ builder.defaults()
+ var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+ mutation, ok := m.(*NotifierMutation)
+ if !ok {
+ return nil, fmt.Errorf("unexpected mutation type %T", m)
+ }
+ if err := builder.check(); err != nil {
+ return nil, err
+ }
+ builder.mutation = mutation
+ var err error
+ nodes[i], specs[i] = builder.createSpec()
+ if i < len(mutators)-1 {
+ _, err = mutators[i+1].Mutate(root, ncb.builders[i+1].mutation)
+ } else {
+ spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
+ // Invoke the actual operation on the latest mutation in the chain.
+ if err = sqlgraph.BatchCreate(ctx, ncb.driver, spec); err != nil {
+ if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ mutation.id = &nodes[i].ID
+ mutation.done = true
+ return nodes[i], nil
+ })
+ for i := len(builder.hooks) - 1; i >= 0; i-- {
+ mut = builder.hooks[i](mut)
+ }
+ mutators[i] = mut
+ }(i, ctx)
+ }
+ if len(mutators) > 0 {
+ if _, err := mutators[0].Mutate(ctx, ncb.builders[0].mutation); err != nil {
+ return nil, err
+ }
+ }
+ return nodes, nil
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (ncb *NotifierCreateBulk) SaveX(ctx context.Context) []*Notifier {
+ v, err := ncb.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// Exec executes the query.
+func (ncb *NotifierCreateBulk) Exec(ctx context.Context) error {
+ _, err := ncb.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (ncb *NotifierCreateBulk) ExecX(ctx context.Context) {
+ if err := ncb.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
diff --git a/backend/internal/data/ent/notifier_delete.go b/backend/internal/data/ent/notifier_delete.go
new file mode 100644
index 0000000..586b093
--- /dev/null
+++ b/backend/internal/data/ent/notifier_delete.go
@@ -0,0 +1,88 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
+)
+
+// NotifierDelete is the builder for deleting a Notifier entity.
+type NotifierDelete struct {
+ config
+ hooks []Hook
+ mutation *NotifierMutation
+}
+
+// Where appends a list predicates to the NotifierDelete builder.
+func (nd *NotifierDelete) Where(ps ...predicate.Notifier) *NotifierDelete {
+ nd.mutation.Where(ps...)
+ return nd
+}
+
+// Exec executes the deletion query and returns how many vertices were deleted.
+func (nd *NotifierDelete) Exec(ctx context.Context) (int, error) {
+ return withHooks(ctx, nd.sqlExec, nd.mutation, nd.hooks)
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (nd *NotifierDelete) ExecX(ctx context.Context) int {
+ n, err := nd.Exec(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return n
+}
+
+func (nd *NotifierDelete) sqlExec(ctx context.Context) (int, error) {
+ _spec := sqlgraph.NewDeleteSpec(notifier.Table, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID))
+ if ps := nd.mutation.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ affected, err := sqlgraph.DeleteNodes(ctx, nd.driver, _spec)
+ if err != nil && sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ nd.mutation.done = true
+ return affected, err
+}
+
+// NotifierDeleteOne is the builder for deleting a single Notifier entity.
+type NotifierDeleteOne struct {
+ nd *NotifierDelete
+}
+
+// Where appends a list predicates to the NotifierDelete builder.
+func (ndo *NotifierDeleteOne) Where(ps ...predicate.Notifier) *NotifierDeleteOne {
+ ndo.nd.mutation.Where(ps...)
+ return ndo
+}
+
+// Exec executes the deletion query.
+func (ndo *NotifierDeleteOne) Exec(ctx context.Context) error {
+ n, err := ndo.nd.Exec(ctx)
+ switch {
+ case err != nil:
+ return err
+ case n == 0:
+ return &NotFoundError{notifier.Label}
+ default:
+ return nil
+ }
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (ndo *NotifierDeleteOne) ExecX(ctx context.Context) {
+ if err := ndo.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
diff --git a/backend/internal/data/ent/notifier_query.go b/backend/internal/data/ent/notifier_query.go
new file mode 100644
index 0000000..c88b4ef
--- /dev/null
+++ b/backend/internal/data/ent/notifier_query.go
@@ -0,0 +1,681 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+ "fmt"
+ "math"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/group"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/user"
+)
+
+// NotifierQuery is the builder for querying Notifier entities.
+type NotifierQuery struct {
+ config
+ ctx *QueryContext
+ order []notifier.OrderOption
+ inters []Interceptor
+ predicates []predicate.Notifier
+ withGroup *GroupQuery
+ withUser *UserQuery
+ // intermediate query (i.e. traversal path).
+ sql *sql.Selector
+ path func(context.Context) (*sql.Selector, error)
+}
+
+// Where adds a new predicate for the NotifierQuery builder.
+func (nq *NotifierQuery) Where(ps ...predicate.Notifier) *NotifierQuery {
+ nq.predicates = append(nq.predicates, ps...)
+ return nq
+}
+
+// Limit the number of records to be returned by this query.
+func (nq *NotifierQuery) Limit(limit int) *NotifierQuery {
+ nq.ctx.Limit = &limit
+ return nq
+}
+
+// Offset to start from.
+func (nq *NotifierQuery) Offset(offset int) *NotifierQuery {
+ nq.ctx.Offset = &offset
+ return nq
+}
+
+// Unique configures the query builder to filter duplicate records on query.
+// By default, unique is set to true, and can be disabled using this method.
+func (nq *NotifierQuery) Unique(unique bool) *NotifierQuery {
+ nq.ctx.Unique = &unique
+ return nq
+}
+
+// Order specifies how the records should be ordered.
+func (nq *NotifierQuery) Order(o ...notifier.OrderOption) *NotifierQuery {
+ nq.order = append(nq.order, o...)
+ return nq
+}
+
+// QueryGroup chains the current query on the "group" edge.
+func (nq *NotifierQuery) QueryGroup() *GroupQuery {
+ query := (&GroupClient{config: nq.config}).Query()
+ query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+ if err := nq.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ selector := nq.sqlQuery(ctx)
+ if err := selector.Err(); err != nil {
+ return nil, err
+ }
+ step := sqlgraph.NewStep(
+ sqlgraph.From(notifier.Table, notifier.FieldID, selector),
+ sqlgraph.To(group.Table, group.FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, notifier.GroupTable, notifier.GroupColumn),
+ )
+ fromU = sqlgraph.SetNeighbors(nq.driver.Dialect(), step)
+ return fromU, nil
+ }
+ return query
+}
+
+// QueryUser chains the current query on the "user" edge.
+func (nq *NotifierQuery) QueryUser() *UserQuery {
+ query := (&UserClient{config: nq.config}).Query()
+ query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+ if err := nq.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ selector := nq.sqlQuery(ctx)
+ if err := selector.Err(); err != nil {
+ return nil, err
+ }
+ step := sqlgraph.NewStep(
+ sqlgraph.From(notifier.Table, notifier.FieldID, selector),
+ sqlgraph.To(user.Table, user.FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, notifier.UserTable, notifier.UserColumn),
+ )
+ fromU = sqlgraph.SetNeighbors(nq.driver.Dialect(), step)
+ return fromU, nil
+ }
+ return query
+}
+
+// First returns the first Notifier entity from the query.
+// Returns a *NotFoundError when no Notifier was found.
+func (nq *NotifierQuery) First(ctx context.Context) (*Notifier, error) {
+ nodes, err := nq.Limit(1).All(setContextOp(ctx, nq.ctx, "First"))
+ if err != nil {
+ return nil, err
+ }
+ if len(nodes) == 0 {
+ return nil, &NotFoundError{notifier.Label}
+ }
+ return nodes[0], nil
+}
+
+// FirstX is like First, but panics if an error occurs.
+func (nq *NotifierQuery) FirstX(ctx context.Context) *Notifier {
+ node, err := nq.First(ctx)
+ if err != nil && !IsNotFound(err) {
+ panic(err)
+ }
+ return node
+}
+
+// FirstID returns the first Notifier ID from the query.
+// Returns a *NotFoundError when no Notifier ID was found.
+func (nq *NotifierQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
+ var ids []uuid.UUID
+ if ids, err = nq.Limit(1).IDs(setContextOp(ctx, nq.ctx, "FirstID")); err != nil {
+ return
+ }
+ if len(ids) == 0 {
+ err = &NotFoundError{notifier.Label}
+ return
+ }
+ return ids[0], nil
+}
+
+// FirstIDX is like FirstID, but panics if an error occurs.
+func (nq *NotifierQuery) FirstIDX(ctx context.Context) uuid.UUID {
+ id, err := nq.FirstID(ctx)
+ if err != nil && !IsNotFound(err) {
+ panic(err)
+ }
+ return id
+}
+
+// Only returns a single Notifier entity found by the query, ensuring it only returns one.
+// Returns a *NotSingularError when more than one Notifier entity is found.
+// Returns a *NotFoundError when no Notifier entities are found.
+func (nq *NotifierQuery) Only(ctx context.Context) (*Notifier, error) {
+ nodes, err := nq.Limit(2).All(setContextOp(ctx, nq.ctx, "Only"))
+ if err != nil {
+ return nil, err
+ }
+ switch len(nodes) {
+ case 1:
+ return nodes[0], nil
+ case 0:
+ return nil, &NotFoundError{notifier.Label}
+ default:
+ return nil, &NotSingularError{notifier.Label}
+ }
+}
+
+// OnlyX is like Only, but panics if an error occurs.
+func (nq *NotifierQuery) OnlyX(ctx context.Context) *Notifier {
+ node, err := nq.Only(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return node
+}
+
+// OnlyID is like Only, but returns the only Notifier ID in the query.
+// Returns a *NotSingularError when more than one Notifier ID is found.
+// Returns a *NotFoundError when no entities are found.
+func (nq *NotifierQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
+ var ids []uuid.UUID
+ if ids, err = nq.Limit(2).IDs(setContextOp(ctx, nq.ctx, "OnlyID")); err != nil {
+ return
+ }
+ switch len(ids) {
+ case 1:
+ id = ids[0]
+ case 0:
+ err = &NotFoundError{notifier.Label}
+ default:
+ err = &NotSingularError{notifier.Label}
+ }
+ return
+}
+
+// OnlyIDX is like OnlyID, but panics if an error occurs.
+func (nq *NotifierQuery) OnlyIDX(ctx context.Context) uuid.UUID {
+ id, err := nq.OnlyID(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+// All executes the query and returns a list of Notifiers.
+func (nq *NotifierQuery) All(ctx context.Context) ([]*Notifier, error) {
+ ctx = setContextOp(ctx, nq.ctx, "All")
+ if err := nq.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ qr := querierAll[[]*Notifier, *NotifierQuery]()
+ return withInterceptors[[]*Notifier](ctx, nq, qr, nq.inters)
+}
+
+// AllX is like All, but panics if an error occurs.
+func (nq *NotifierQuery) AllX(ctx context.Context) []*Notifier {
+ nodes, err := nq.All(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return nodes
+}
+
+// IDs executes the query and returns a list of Notifier IDs.
+func (nq *NotifierQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
+ if nq.ctx.Unique == nil && nq.path != nil {
+ nq.Unique(true)
+ }
+ ctx = setContextOp(ctx, nq.ctx, "IDs")
+ if err = nq.Select(notifier.FieldID).Scan(ctx, &ids); err != nil {
+ return nil, err
+ }
+ return ids, nil
+}
+
+// IDsX is like IDs, but panics if an error occurs.
+func (nq *NotifierQuery) IDsX(ctx context.Context) []uuid.UUID {
+ ids, err := nq.IDs(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return ids
+}
+
+// Count returns the count of the given query.
+func (nq *NotifierQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, nq.ctx, "Count")
+ if err := nq.prepareQuery(ctx); err != nil {
+ return 0, err
+ }
+ return withInterceptors[int](ctx, nq, querierCount[*NotifierQuery](), nq.inters)
+}
+
+// CountX is like Count, but panics if an error occurs.
+func (nq *NotifierQuery) CountX(ctx context.Context) int {
+ count, err := nq.Count(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return count
+}
+
+// Exist returns true if the query has elements in the graph.
+func (nq *NotifierQuery) Exist(ctx context.Context) (bool, error) {
+ ctx = setContextOp(ctx, nq.ctx, "Exist")
+ switch _, err := nq.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
+ }
+}
+
+// ExistX is like Exist, but panics if an error occurs.
+func (nq *NotifierQuery) ExistX(ctx context.Context) bool {
+ exist, err := nq.Exist(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return exist
+}
+
+// Clone returns a duplicate of the NotifierQuery builder, including all associated steps. It can be
+// used to prepare common query builders and use them differently after the clone is made.
+func (nq *NotifierQuery) Clone() *NotifierQuery {
+ if nq == nil {
+ return nil
+ }
+ return &NotifierQuery{
+ config: nq.config,
+ ctx: nq.ctx.Clone(),
+ order: append([]notifier.OrderOption{}, nq.order...),
+ inters: append([]Interceptor{}, nq.inters...),
+ predicates: append([]predicate.Notifier{}, nq.predicates...),
+ withGroup: nq.withGroup.Clone(),
+ withUser: nq.withUser.Clone(),
+ // clone intermediate query.
+ sql: nq.sql.Clone(),
+ path: nq.path,
+ }
+}
+
+// WithGroup tells the query-builder to eager-load the nodes that are connected to
+// the "group" edge. The optional arguments are used to configure the query builder of the edge.
+func (nq *NotifierQuery) WithGroup(opts ...func(*GroupQuery)) *NotifierQuery {
+ query := (&GroupClient{config: nq.config}).Query()
+ for _, opt := range opts {
+ opt(query)
+ }
+ nq.withGroup = query
+ return nq
+}
+
+// WithUser tells the query-builder to eager-load the nodes that are connected to
+// the "user" edge. The optional arguments are used to configure the query builder of the edge.
+func (nq *NotifierQuery) WithUser(opts ...func(*UserQuery)) *NotifierQuery {
+ query := (&UserClient{config: nq.config}).Query()
+ for _, opt := range opts {
+ opt(query)
+ }
+ nq.withUser = query
+ return nq
+}
+
+// GroupBy is used to group vertices by one or more fields/columns.
+// It is often used with aggregate functions, like: count, max, mean, min, sum.
+//
+// Example:
+//
+// var v []struct {
+// CreatedAt time.Time `json:"created_at,omitempty"`
+// Count int `json:"count,omitempty"`
+// }
+//
+// client.Notifier.Query().
+// GroupBy(notifier.FieldCreatedAt).
+// Aggregate(ent.Count()).
+// Scan(ctx, &v)
+func (nq *NotifierQuery) GroupBy(field string, fields ...string) *NotifierGroupBy {
+ nq.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &NotifierGroupBy{build: nq}
+ grbuild.flds = &nq.ctx.Fields
+ grbuild.label = notifier.Label
+ grbuild.scan = grbuild.Scan
+ return grbuild
+}
+
+// Select allows the selection one or more fields/columns for the given query,
+// instead of selecting all fields in the entity.
+//
+// Example:
+//
+// var v []struct {
+// CreatedAt time.Time `json:"created_at,omitempty"`
+// }
+//
+// client.Notifier.Query().
+// Select(notifier.FieldCreatedAt).
+// Scan(ctx, &v)
+func (nq *NotifierQuery) Select(fields ...string) *NotifierSelect {
+ nq.ctx.Fields = append(nq.ctx.Fields, fields...)
+ sbuild := &NotifierSelect{NotifierQuery: nq}
+ sbuild.label = notifier.Label
+ sbuild.flds, sbuild.scan = &nq.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a NotifierSelect configured with the given aggregations.
+func (nq *NotifierQuery) Aggregate(fns ...AggregateFunc) *NotifierSelect {
+ return nq.Select().Aggregate(fns...)
+}
+
+func (nq *NotifierQuery) prepareQuery(ctx context.Context) error {
+ for _, inter := range nq.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, nq); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range nq.ctx.Fields {
+ if !notifier.ValidColumn(f) {
+ return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+ }
+ }
+ if nq.path != nil {
+ prev, err := nq.path(ctx)
+ if err != nil {
+ return err
+ }
+ nq.sql = prev
+ }
+ return nil
+}
+
+func (nq *NotifierQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Notifier, error) {
+ var (
+ nodes = []*Notifier{}
+ _spec = nq.querySpec()
+ loadedTypes = [2]bool{
+ nq.withGroup != nil,
+ nq.withUser != nil,
+ }
+ )
+ _spec.ScanValues = func(columns []string) ([]any, error) {
+ return (*Notifier).scanValues(nil, columns)
+ }
+ _spec.Assign = func(columns []string, values []any) error {
+ node := &Notifier{config: nq.config}
+ nodes = append(nodes, node)
+ node.Edges.loadedTypes = loadedTypes
+ return node.assignValues(columns, values)
+ }
+ for i := range hooks {
+ hooks[i](ctx, _spec)
+ }
+ if err := sqlgraph.QueryNodes(ctx, nq.driver, _spec); err != nil {
+ return nil, err
+ }
+ if len(nodes) == 0 {
+ return nodes, nil
+ }
+ if query := nq.withGroup; query != nil {
+ if err := nq.loadGroup(ctx, query, nodes, nil,
+ func(n *Notifier, e *Group) { n.Edges.Group = e }); err != nil {
+ return nil, err
+ }
+ }
+ if query := nq.withUser; query != nil {
+ if err := nq.loadUser(ctx, query, nodes, nil,
+ func(n *Notifier, e *User) { n.Edges.User = e }); err != nil {
+ return nil, err
+ }
+ }
+ return nodes, nil
+}
+
+func (nq *NotifierQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Notifier, init func(*Notifier), assign func(*Notifier, *Group)) error {
+ ids := make([]uuid.UUID, 0, len(nodes))
+ nodeids := make(map[uuid.UUID][]*Notifier)
+ for i := range nodes {
+ fk := nodes[i].GroupID
+ if _, ok := nodeids[fk]; !ok {
+ ids = append(ids, fk)
+ }
+ nodeids[fk] = append(nodeids[fk], nodes[i])
+ }
+ if len(ids) == 0 {
+ return nil
+ }
+ query.Where(group.IDIn(ids...))
+ neighbors, err := query.All(ctx)
+ if err != nil {
+ return err
+ }
+ for _, n := range neighbors {
+ nodes, ok := nodeids[n.ID]
+ if !ok {
+ return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID)
+ }
+ for i := range nodes {
+ assign(nodes[i], n)
+ }
+ }
+ return nil
+}
+func (nq *NotifierQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*Notifier, init func(*Notifier), assign func(*Notifier, *User)) error {
+ ids := make([]uuid.UUID, 0, len(nodes))
+ nodeids := make(map[uuid.UUID][]*Notifier)
+ for i := range nodes {
+ fk := nodes[i].UserID
+ if _, ok := nodeids[fk]; !ok {
+ ids = append(ids, fk)
+ }
+ nodeids[fk] = append(nodeids[fk], nodes[i])
+ }
+ if len(ids) == 0 {
+ return nil
+ }
+ query.Where(user.IDIn(ids...))
+ neighbors, err := query.All(ctx)
+ if err != nil {
+ return err
+ }
+ for _, n := range neighbors {
+ nodes, ok := nodeids[n.ID]
+ if !ok {
+ return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID)
+ }
+ for i := range nodes {
+ assign(nodes[i], n)
+ }
+ }
+ return nil
+}
+
+func (nq *NotifierQuery) sqlCount(ctx context.Context) (int, error) {
+ _spec := nq.querySpec()
+ _spec.Node.Columns = nq.ctx.Fields
+ if len(nq.ctx.Fields) > 0 {
+ _spec.Unique = nq.ctx.Unique != nil && *nq.ctx.Unique
+ }
+ return sqlgraph.CountNodes(ctx, nq.driver, _spec)
+}
+
+func (nq *NotifierQuery) querySpec() *sqlgraph.QuerySpec {
+ _spec := sqlgraph.NewQuerySpec(notifier.Table, notifier.Columns, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID))
+ _spec.From = nq.sql
+ if unique := nq.ctx.Unique; unique != nil {
+ _spec.Unique = *unique
+ } else if nq.path != nil {
+ _spec.Unique = true
+ }
+ if fields := nq.ctx.Fields; len(fields) > 0 {
+ _spec.Node.Columns = make([]string, 0, len(fields))
+ _spec.Node.Columns = append(_spec.Node.Columns, notifier.FieldID)
+ for i := range fields {
+ if fields[i] != notifier.FieldID {
+ _spec.Node.Columns = append(_spec.Node.Columns, fields[i])
+ }
+ }
+ if nq.withGroup != nil {
+ _spec.Node.AddColumnOnce(notifier.FieldGroupID)
+ }
+ if nq.withUser != nil {
+ _spec.Node.AddColumnOnce(notifier.FieldUserID)
+ }
+ }
+ if ps := nq.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ if limit := nq.ctx.Limit; limit != nil {
+ _spec.Limit = *limit
+ }
+ if offset := nq.ctx.Offset; offset != nil {
+ _spec.Offset = *offset
+ }
+ if ps := nq.order; len(ps) > 0 {
+ _spec.Order = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ return _spec
+}
+
+func (nq *NotifierQuery) sqlQuery(ctx context.Context) *sql.Selector {
+ builder := sql.Dialect(nq.driver.Dialect())
+ t1 := builder.Table(notifier.Table)
+ columns := nq.ctx.Fields
+ if len(columns) == 0 {
+ columns = notifier.Columns
+ }
+ selector := builder.Select(t1.Columns(columns...)...).From(t1)
+ if nq.sql != nil {
+ selector = nq.sql
+ selector.Select(selector.Columns(columns...)...)
+ }
+ if nq.ctx.Unique != nil && *nq.ctx.Unique {
+ selector.Distinct()
+ }
+ for _, p := range nq.predicates {
+ p(selector)
+ }
+ for _, p := range nq.order {
+ p(selector)
+ }
+ if offset := nq.ctx.Offset; offset != nil {
+ // limit is mandatory for offset clause. We start
+ // with default value, and override it below if needed.
+ selector.Offset(*offset).Limit(math.MaxInt32)
+ }
+ if limit := nq.ctx.Limit; limit != nil {
+ selector.Limit(*limit)
+ }
+ return selector
+}
+
+// NotifierGroupBy is the group-by builder for Notifier entities.
+type NotifierGroupBy struct {
+ selector
+ build *NotifierQuery
+}
+
+// Aggregate adds the given aggregation functions to the group-by query.
+func (ngb *NotifierGroupBy) Aggregate(fns ...AggregateFunc) *NotifierGroupBy {
+ ngb.fns = append(ngb.fns, fns...)
+ return ngb
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (ngb *NotifierGroupBy) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, ngb.build.ctx, "GroupBy")
+ if err := ngb.build.prepareQuery(ctx); err != nil {
+ return err
+ }
+ return scanWithInterceptors[*NotifierQuery, *NotifierGroupBy](ctx, ngb.build, ngb, ngb.build.inters, v)
+}
+
+func (ngb *NotifierGroupBy) sqlScan(ctx context.Context, root *NotifierQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
+ aggregation := make([]string, 0, len(ngb.fns))
+ for _, fn := range ngb.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ if len(selector.SelectedColumns()) == 0 {
+ columns := make([]string, 0, len(*ngb.flds)+len(ngb.fns))
+ for _, f := range *ngb.flds {
+ columns = append(columns, selector.C(f))
+ }
+ columns = append(columns, aggregation...)
+ selector.Select(columns...)
+ }
+ selector.GroupBy(selector.Columns(*ngb.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := ngb.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
+}
+
+// NotifierSelect is the builder for selecting fields of Notifier entities.
+type NotifierSelect struct {
+ *NotifierQuery
+ selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (ns *NotifierSelect) Aggregate(fns ...AggregateFunc) *NotifierSelect {
+ ns.fns = append(ns.fns, fns...)
+ return ns
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (ns *NotifierSelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, ns.ctx, "Select")
+ if err := ns.prepareQuery(ctx); err != nil {
+ return err
+ }
+ return scanWithInterceptors[*NotifierQuery, *NotifierSelect](ctx, ns.NotifierQuery, ns, ns.inters, v)
+}
+
+func (ns *NotifierSelect) sqlScan(ctx context.Context, root *NotifierQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(ns.fns))
+ for _, fn := range ns.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*ns.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := ns.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
+}
diff --git a/backend/internal/data/ent/notifier_update.go b/backend/internal/data/ent/notifier_update.go
new file mode 100644
index 0000000..ea28f32
--- /dev/null
+++ b/backend/internal/data/ent/notifier_update.go
@@ -0,0 +1,581 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/group"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/user"
+)
+
+// NotifierUpdate is the builder for updating Notifier entities.
+type NotifierUpdate struct {
+ config
+ hooks []Hook
+ mutation *NotifierMutation
+}
+
+// Where appends a list predicates to the NotifierUpdate builder.
+func (nu *NotifierUpdate) Where(ps ...predicate.Notifier) *NotifierUpdate {
+ nu.mutation.Where(ps...)
+ return nu
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (nu *NotifierUpdate) SetUpdatedAt(t time.Time) *NotifierUpdate {
+ nu.mutation.SetUpdatedAt(t)
+ return nu
+}
+
+// SetGroupID sets the "group_id" field.
+func (nu *NotifierUpdate) SetGroupID(u uuid.UUID) *NotifierUpdate {
+ nu.mutation.SetGroupID(u)
+ return nu
+}
+
+// SetNillableGroupID sets the "group_id" field if the given value is not nil.
+func (nu *NotifierUpdate) SetNillableGroupID(u *uuid.UUID) *NotifierUpdate {
+ if u != nil {
+ nu.SetGroupID(*u)
+ }
+ return nu
+}
+
+// SetUserID sets the "user_id" field.
+func (nu *NotifierUpdate) SetUserID(u uuid.UUID) *NotifierUpdate {
+ nu.mutation.SetUserID(u)
+ return nu
+}
+
+// SetNillableUserID sets the "user_id" field if the given value is not nil.
+func (nu *NotifierUpdate) SetNillableUserID(u *uuid.UUID) *NotifierUpdate {
+ if u != nil {
+ nu.SetUserID(*u)
+ }
+ return nu
+}
+
+// SetName sets the "name" field.
+func (nu *NotifierUpdate) SetName(s string) *NotifierUpdate {
+ nu.mutation.SetName(s)
+ return nu
+}
+
+// SetNillableName sets the "name" field if the given value is not nil.
+func (nu *NotifierUpdate) SetNillableName(s *string) *NotifierUpdate {
+ if s != nil {
+ nu.SetName(*s)
+ }
+ return nu
+}
+
+// SetURL sets the "url" field.
+func (nu *NotifierUpdate) SetURL(s string) *NotifierUpdate {
+ nu.mutation.SetURL(s)
+ return nu
+}
+
+// SetNillableURL sets the "url" field if the given value is not nil.
+func (nu *NotifierUpdate) SetNillableURL(s *string) *NotifierUpdate {
+ if s != nil {
+ nu.SetURL(*s)
+ }
+ return nu
+}
+
+// SetIsActive sets the "is_active" field.
+func (nu *NotifierUpdate) SetIsActive(b bool) *NotifierUpdate {
+ nu.mutation.SetIsActive(b)
+ return nu
+}
+
+// SetNillableIsActive sets the "is_active" field if the given value is not nil.
+func (nu *NotifierUpdate) SetNillableIsActive(b *bool) *NotifierUpdate {
+ if b != nil {
+ nu.SetIsActive(*b)
+ }
+ return nu
+}
+
+// SetGroup sets the "group" edge to the Group entity.
+func (nu *NotifierUpdate) SetGroup(g *Group) *NotifierUpdate {
+ return nu.SetGroupID(g.ID)
+}
+
+// SetUser sets the "user" edge to the User entity.
+func (nu *NotifierUpdate) SetUser(u *User) *NotifierUpdate {
+ return nu.SetUserID(u.ID)
+}
+
+// Mutation returns the NotifierMutation object of the builder.
+func (nu *NotifierUpdate) Mutation() *NotifierMutation {
+ return nu.mutation
+}
+
+// ClearGroup clears the "group" edge to the Group entity.
+func (nu *NotifierUpdate) ClearGroup() *NotifierUpdate {
+ nu.mutation.ClearGroup()
+ return nu
+}
+
+// ClearUser clears the "user" edge to the User entity.
+func (nu *NotifierUpdate) ClearUser() *NotifierUpdate {
+ nu.mutation.ClearUser()
+ return nu
+}
+
+// Save executes the query and returns the number of nodes affected by the update operation.
+func (nu *NotifierUpdate) Save(ctx context.Context) (int, error) {
+ nu.defaults()
+ return withHooks(ctx, nu.sqlSave, nu.mutation, nu.hooks)
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (nu *NotifierUpdate) SaveX(ctx context.Context) int {
+ affected, err := nu.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return affected
+}
+
+// Exec executes the query.
+func (nu *NotifierUpdate) Exec(ctx context.Context) error {
+ _, err := nu.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (nu *NotifierUpdate) ExecX(ctx context.Context) {
+ if err := nu.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
+
+// defaults sets the default values of the builder before save.
+func (nu *NotifierUpdate) defaults() {
+ if _, ok := nu.mutation.UpdatedAt(); !ok {
+ v := notifier.UpdateDefaultUpdatedAt()
+ nu.mutation.SetUpdatedAt(v)
+ }
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (nu *NotifierUpdate) check() error {
+ if v, ok := nu.mutation.Name(); ok {
+ if err := notifier.NameValidator(v); err != nil {
+ return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Notifier.name": %w`, err)}
+ }
+ }
+ if v, ok := nu.mutation.URL(); ok {
+ if err := notifier.URLValidator(v); err != nil {
+ return &ValidationError{Name: "url", err: fmt.Errorf(`ent: validator failed for field "Notifier.url": %w`, err)}
+ }
+ }
+ if _, ok := nu.mutation.GroupID(); nu.mutation.GroupCleared() && !ok {
+ return errors.New(`ent: clearing a required unique edge "Notifier.group"`)
+ }
+ if _, ok := nu.mutation.UserID(); nu.mutation.UserCleared() && !ok {
+ return errors.New(`ent: clearing a required unique edge "Notifier.user"`)
+ }
+ return nil
+}
+
+func (nu *NotifierUpdate) sqlSave(ctx context.Context) (n int, err error) {
+ if err := nu.check(); err != nil {
+ return n, err
+ }
+ _spec := sqlgraph.NewUpdateSpec(notifier.Table, notifier.Columns, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID))
+ if ps := nu.mutation.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ if value, ok := nu.mutation.UpdatedAt(); ok {
+ _spec.SetField(notifier.FieldUpdatedAt, field.TypeTime, value)
+ }
+ if value, ok := nu.mutation.Name(); ok {
+ _spec.SetField(notifier.FieldName, field.TypeString, value)
+ }
+ if value, ok := nu.mutation.URL(); ok {
+ _spec.SetField(notifier.FieldURL, field.TypeString, value)
+ }
+ if value, ok := nu.mutation.IsActive(); ok {
+ _spec.SetField(notifier.FieldIsActive, field.TypeBool, value)
+ }
+ if nu.mutation.GroupCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: notifier.GroupTable,
+ Columns: []string{notifier.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := nu.mutation.GroupIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: notifier.GroupTable,
+ Columns: []string{notifier.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if nu.mutation.UserCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: notifier.UserTable,
+ Columns: []string{notifier.UserColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := nu.mutation.UserIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: notifier.UserTable,
+ Columns: []string{notifier.UserColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if n, err = sqlgraph.UpdateNodes(ctx, nu.driver, _spec); err != nil {
+ if _, ok := err.(*sqlgraph.NotFoundError); ok {
+ err = &NotFoundError{notifier.Label}
+ } else if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ return 0, err
+ }
+ nu.mutation.done = true
+ return n, nil
+}
+
+// NotifierUpdateOne is the builder for updating a single Notifier entity.
+type NotifierUpdateOne struct {
+ config
+ fields []string
+ hooks []Hook
+ mutation *NotifierMutation
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (nuo *NotifierUpdateOne) SetUpdatedAt(t time.Time) *NotifierUpdateOne {
+ nuo.mutation.SetUpdatedAt(t)
+ return nuo
+}
+
+// SetGroupID sets the "group_id" field.
+func (nuo *NotifierUpdateOne) SetGroupID(u uuid.UUID) *NotifierUpdateOne {
+ nuo.mutation.SetGroupID(u)
+ return nuo
+}
+
+// SetNillableGroupID sets the "group_id" field if the given value is not nil.
+func (nuo *NotifierUpdateOne) SetNillableGroupID(u *uuid.UUID) *NotifierUpdateOne {
+ if u != nil {
+ nuo.SetGroupID(*u)
+ }
+ return nuo
+}
+
+// SetUserID sets the "user_id" field.
+func (nuo *NotifierUpdateOne) SetUserID(u uuid.UUID) *NotifierUpdateOne {
+ nuo.mutation.SetUserID(u)
+ return nuo
+}
+
+// SetNillableUserID sets the "user_id" field if the given value is not nil.
+func (nuo *NotifierUpdateOne) SetNillableUserID(u *uuid.UUID) *NotifierUpdateOne {
+ if u != nil {
+ nuo.SetUserID(*u)
+ }
+ return nuo
+}
+
+// SetName sets the "name" field.
+func (nuo *NotifierUpdateOne) SetName(s string) *NotifierUpdateOne {
+ nuo.mutation.SetName(s)
+ return nuo
+}
+
+// SetNillableName sets the "name" field if the given value is not nil.
+func (nuo *NotifierUpdateOne) SetNillableName(s *string) *NotifierUpdateOne {
+ if s != nil {
+ nuo.SetName(*s)
+ }
+ return nuo
+}
+
+// SetURL sets the "url" field.
+func (nuo *NotifierUpdateOne) SetURL(s string) *NotifierUpdateOne {
+ nuo.mutation.SetURL(s)
+ return nuo
+}
+
+// SetNillableURL sets the "url" field if the given value is not nil.
+func (nuo *NotifierUpdateOne) SetNillableURL(s *string) *NotifierUpdateOne {
+ if s != nil {
+ nuo.SetURL(*s)
+ }
+ return nuo
+}
+
+// SetIsActive sets the "is_active" field.
+func (nuo *NotifierUpdateOne) SetIsActive(b bool) *NotifierUpdateOne {
+ nuo.mutation.SetIsActive(b)
+ return nuo
+}
+
+// SetNillableIsActive sets the "is_active" field if the given value is not nil.
+func (nuo *NotifierUpdateOne) SetNillableIsActive(b *bool) *NotifierUpdateOne {
+ if b != nil {
+ nuo.SetIsActive(*b)
+ }
+ return nuo
+}
+
+// SetGroup sets the "group" edge to the Group entity.
+func (nuo *NotifierUpdateOne) SetGroup(g *Group) *NotifierUpdateOne {
+ return nuo.SetGroupID(g.ID)
+}
+
+// SetUser sets the "user" edge to the User entity.
+func (nuo *NotifierUpdateOne) SetUser(u *User) *NotifierUpdateOne {
+ return nuo.SetUserID(u.ID)
+}
+
+// Mutation returns the NotifierMutation object of the builder.
+func (nuo *NotifierUpdateOne) Mutation() *NotifierMutation {
+ return nuo.mutation
+}
+
+// ClearGroup clears the "group" edge to the Group entity.
+func (nuo *NotifierUpdateOne) ClearGroup() *NotifierUpdateOne {
+ nuo.mutation.ClearGroup()
+ return nuo
+}
+
+// ClearUser clears the "user" edge to the User entity.
+func (nuo *NotifierUpdateOne) ClearUser() *NotifierUpdateOne {
+ nuo.mutation.ClearUser()
+ return nuo
+}
+
+// Where appends a list predicates to the NotifierUpdate builder.
+func (nuo *NotifierUpdateOne) Where(ps ...predicate.Notifier) *NotifierUpdateOne {
+ nuo.mutation.Where(ps...)
+ return nuo
+}
+
+// Select allows selecting one or more fields (columns) of the returned entity.
+// The default is selecting all fields defined in the entity schema.
+func (nuo *NotifierUpdateOne) Select(field string, fields ...string) *NotifierUpdateOne {
+ nuo.fields = append([]string{field}, fields...)
+ return nuo
+}
+
+// Save executes the query and returns the updated Notifier entity.
+func (nuo *NotifierUpdateOne) Save(ctx context.Context) (*Notifier, error) {
+ nuo.defaults()
+ return withHooks(ctx, nuo.sqlSave, nuo.mutation, nuo.hooks)
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (nuo *NotifierUpdateOne) SaveX(ctx context.Context) *Notifier {
+ node, err := nuo.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return node
+}
+
+// Exec executes the query on the entity.
+func (nuo *NotifierUpdateOne) Exec(ctx context.Context) error {
+ _, err := nuo.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (nuo *NotifierUpdateOne) ExecX(ctx context.Context) {
+ if err := nuo.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
+
+// defaults sets the default values of the builder before save.
+func (nuo *NotifierUpdateOne) defaults() {
+ if _, ok := nuo.mutation.UpdatedAt(); !ok {
+ v := notifier.UpdateDefaultUpdatedAt()
+ nuo.mutation.SetUpdatedAt(v)
+ }
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (nuo *NotifierUpdateOne) check() error {
+ if v, ok := nuo.mutation.Name(); ok {
+ if err := notifier.NameValidator(v); err != nil {
+ return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Notifier.name": %w`, err)}
+ }
+ }
+ if v, ok := nuo.mutation.URL(); ok {
+ if err := notifier.URLValidator(v); err != nil {
+ return &ValidationError{Name: "url", err: fmt.Errorf(`ent: validator failed for field "Notifier.url": %w`, err)}
+ }
+ }
+ if _, ok := nuo.mutation.GroupID(); nuo.mutation.GroupCleared() && !ok {
+ return errors.New(`ent: clearing a required unique edge "Notifier.group"`)
+ }
+ if _, ok := nuo.mutation.UserID(); nuo.mutation.UserCleared() && !ok {
+ return errors.New(`ent: clearing a required unique edge "Notifier.user"`)
+ }
+ return nil
+}
+
+func (nuo *NotifierUpdateOne) sqlSave(ctx context.Context) (_node *Notifier, err error) {
+ if err := nuo.check(); err != nil {
+ return _node, err
+ }
+ _spec := sqlgraph.NewUpdateSpec(notifier.Table, notifier.Columns, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID))
+ id, ok := nuo.mutation.ID()
+ if !ok {
+ return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Notifier.id" for update`)}
+ }
+ _spec.Node.ID.Value = id
+ if fields := nuo.fields; len(fields) > 0 {
+ _spec.Node.Columns = make([]string, 0, len(fields))
+ _spec.Node.Columns = append(_spec.Node.Columns, notifier.FieldID)
+ for _, f := range fields {
+ if !notifier.ValidColumn(f) {
+ return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+ }
+ if f != notifier.FieldID {
+ _spec.Node.Columns = append(_spec.Node.Columns, f)
+ }
+ }
+ }
+ if ps := nuo.mutation.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ if value, ok := nuo.mutation.UpdatedAt(); ok {
+ _spec.SetField(notifier.FieldUpdatedAt, field.TypeTime, value)
+ }
+ if value, ok := nuo.mutation.Name(); ok {
+ _spec.SetField(notifier.FieldName, field.TypeString, value)
+ }
+ if value, ok := nuo.mutation.URL(); ok {
+ _spec.SetField(notifier.FieldURL, field.TypeString, value)
+ }
+ if value, ok := nuo.mutation.IsActive(); ok {
+ _spec.SetField(notifier.FieldIsActive, field.TypeBool, value)
+ }
+ if nuo.mutation.GroupCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: notifier.GroupTable,
+ Columns: []string{notifier.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := nuo.mutation.GroupIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: notifier.GroupTable,
+ Columns: []string{notifier.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if nuo.mutation.UserCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: notifier.UserTable,
+ Columns: []string{notifier.UserColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := nuo.mutation.UserIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: notifier.UserTable,
+ Columns: []string{notifier.UserColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ _node = &Notifier{config: nuo.config}
+ _spec.Assign = _node.assignValues
+ _spec.ScanValues = _node.scanValues
+ if err = sqlgraph.UpdateNode(ctx, nuo.driver, _spec); err != nil {
+ if _, ok := err.(*sqlgraph.NotFoundError); ok {
+ err = &NotFoundError{notifier.Label}
+ } else if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ return nil, err
+ }
+ nuo.mutation.done = true
+ return _node, nil
+}
diff --git a/backend/internal/data/ent/predicate/predicate.go b/backend/internal/data/ent/predicate/predicate.go
index 12b1f44..bd36616 100644
--- a/backend/internal/data/ent/predicate/predicate.go
+++ b/backend/internal/data/ent/predicate/predicate.go
@@ -9,15 +9,15 @@ import (
// Attachment is the predicate function for attachment builders.
type Attachment func(*sql.Selector)
+// AuthRoles is the predicate function for authroles builders.
+type AuthRoles func(*sql.Selector)
+
// AuthTokens is the predicate function for authtokens builders.
type AuthTokens func(*sql.Selector)
// Document is the predicate function for document builders.
type Document func(*sql.Selector)
-// DocumentToken is the predicate function for documenttoken builders.
-type DocumentToken func(*sql.Selector)
-
// Group is the predicate function for group builders.
type Group func(*sql.Selector)
@@ -36,5 +36,11 @@ type Label func(*sql.Selector)
// Location is the predicate function for location builders.
type Location func(*sql.Selector)
+// MaintenanceEntry is the predicate function for maintenanceentry builders.
+type MaintenanceEntry func(*sql.Selector)
+
+// Notifier is the predicate function for notifier builders.
+type Notifier func(*sql.Selector)
+
// User is the predicate function for user builders.
type User func(*sql.Selector)
diff --git a/backend/internal/data/ent/runtime.go b/backend/internal/data/ent/runtime.go
index af5dc22..c3aff00 100644
--- a/backend/internal/data/ent/runtime.go
+++ b/backend/internal/data/ent/runtime.go
@@ -9,13 +9,14 @@ import (
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
"github.com/hay-kot/homebox/backend/internal/data/ent/document"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
"github.com/hay-kot/homebox/backend/internal/data/ent/groupinvitationtoken"
"github.com/hay-kot/homebox/backend/internal/data/ent/item"
"github.com/hay-kot/homebox/backend/internal/data/ent/itemfield"
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
"github.com/hay-kot/homebox/backend/internal/data/ent/schema"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
)
@@ -39,10 +40,16 @@ func init() {
attachment.DefaultUpdatedAt = attachmentDescUpdatedAt.Default.(func() time.Time)
// attachment.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
attachment.UpdateDefaultUpdatedAt = attachmentDescUpdatedAt.UpdateDefault.(func() time.Time)
+ // attachmentDescPrimary is the schema descriptor for primary field.
+ attachmentDescPrimary := attachmentFields[1].Descriptor()
+ // attachment.DefaultPrimary holds the default value on creation for the primary field.
+ attachment.DefaultPrimary = attachmentDescPrimary.Default.(bool)
// attachmentDescID is the schema descriptor for id field.
attachmentDescID := attachmentMixinFields0[0].Descriptor()
// attachment.DefaultID holds the default value on creation for the id field.
attachment.DefaultID = attachmentDescID.Default.(func() uuid.UUID)
+ authrolesFields := schema.AuthRoles{}.Fields()
+ _ = authrolesFields
authtokensMixin := schema.AuthTokens{}.Mixin()
authtokensMixinFields0 := authtokensMixin[0].Fields()
_ = authtokensMixinFields0
@@ -121,37 +128,6 @@ func init() {
documentDescID := documentMixinFields0[0].Descriptor()
// document.DefaultID holds the default value on creation for the id field.
document.DefaultID = documentDescID.Default.(func() uuid.UUID)
- documenttokenMixin := schema.DocumentToken{}.Mixin()
- documenttokenMixinFields0 := documenttokenMixin[0].Fields()
- _ = documenttokenMixinFields0
- documenttokenFields := schema.DocumentToken{}.Fields()
- _ = documenttokenFields
- // documenttokenDescCreatedAt is the schema descriptor for created_at field.
- documenttokenDescCreatedAt := documenttokenMixinFields0[1].Descriptor()
- // documenttoken.DefaultCreatedAt holds the default value on creation for the created_at field.
- documenttoken.DefaultCreatedAt = documenttokenDescCreatedAt.Default.(func() time.Time)
- // documenttokenDescUpdatedAt is the schema descriptor for updated_at field.
- documenttokenDescUpdatedAt := documenttokenMixinFields0[2].Descriptor()
- // documenttoken.DefaultUpdatedAt holds the default value on creation for the updated_at field.
- documenttoken.DefaultUpdatedAt = documenttokenDescUpdatedAt.Default.(func() time.Time)
- // documenttoken.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
- documenttoken.UpdateDefaultUpdatedAt = documenttokenDescUpdatedAt.UpdateDefault.(func() time.Time)
- // documenttokenDescToken is the schema descriptor for token field.
- documenttokenDescToken := documenttokenFields[0].Descriptor()
- // documenttoken.TokenValidator is a validator for the "token" field. It is called by the builders before save.
- documenttoken.TokenValidator = documenttokenDescToken.Validators[0].(func([]byte) error)
- // documenttokenDescUses is the schema descriptor for uses field.
- documenttokenDescUses := documenttokenFields[1].Descriptor()
- // documenttoken.DefaultUses holds the default value on creation for the uses field.
- documenttoken.DefaultUses = documenttokenDescUses.Default.(int)
- // documenttokenDescExpiresAt is the schema descriptor for expires_at field.
- documenttokenDescExpiresAt := documenttokenFields[2].Descriptor()
- // documenttoken.DefaultExpiresAt holds the default value on creation for the expires_at field.
- documenttoken.DefaultExpiresAt = documenttokenDescExpiresAt.Default.(func() time.Time)
- // documenttokenDescID is the schema descriptor for id field.
- documenttokenDescID := documenttokenMixinFields0[0].Descriptor()
- // documenttoken.DefaultID holds the default value on creation for the id field.
- documenttoken.DefaultID = documenttokenDescID.Default.(func() uuid.UUID)
groupMixin := schema.Group{}.Mixin()
groupMixinFields0 := groupMixin[0].Fields()
_ = groupMixinFields0
@@ -185,6 +161,10 @@ func init() {
return nil
}
}()
+ // groupDescCurrency is the schema descriptor for currency field.
+ groupDescCurrency := groupFields[1].Descriptor()
+ // group.DefaultCurrency holds the default value on creation for the currency field.
+ group.DefaultCurrency = groupDescCurrency.Default.(string)
// groupDescID is the schema descriptor for id field.
groupDescID := groupMixinFields0[0].Descriptor()
// group.DefaultID holds the default value on creation for the id field.
@@ -275,36 +255,40 @@ func init() {
itemDescArchived := itemFields[4].Descriptor()
// item.DefaultArchived holds the default value on creation for the archived field.
item.DefaultArchived = itemDescArchived.Default.(bool)
+ // itemDescAssetID is the schema descriptor for asset_id field.
+ itemDescAssetID := itemFields[5].Descriptor()
+ // item.DefaultAssetID holds the default value on creation for the asset_id field.
+ item.DefaultAssetID = itemDescAssetID.Default.(int)
// itemDescSerialNumber is the schema descriptor for serial_number field.
- itemDescSerialNumber := itemFields[5].Descriptor()
+ itemDescSerialNumber := itemFields[6].Descriptor()
// item.SerialNumberValidator is a validator for the "serial_number" field. It is called by the builders before save.
item.SerialNumberValidator = itemDescSerialNumber.Validators[0].(func(string) error)
// itemDescModelNumber is the schema descriptor for model_number field.
- itemDescModelNumber := itemFields[6].Descriptor()
+ itemDescModelNumber := itemFields[7].Descriptor()
// item.ModelNumberValidator is a validator for the "model_number" field. It is called by the builders before save.
item.ModelNumberValidator = itemDescModelNumber.Validators[0].(func(string) error)
// itemDescManufacturer is the schema descriptor for manufacturer field.
- itemDescManufacturer := itemFields[7].Descriptor()
+ itemDescManufacturer := itemFields[8].Descriptor()
// item.ManufacturerValidator is a validator for the "manufacturer" field. It is called by the builders before save.
item.ManufacturerValidator = itemDescManufacturer.Validators[0].(func(string) error)
// itemDescLifetimeWarranty is the schema descriptor for lifetime_warranty field.
- itemDescLifetimeWarranty := itemFields[8].Descriptor()
+ itemDescLifetimeWarranty := itemFields[9].Descriptor()
// item.DefaultLifetimeWarranty holds the default value on creation for the lifetime_warranty field.
item.DefaultLifetimeWarranty = itemDescLifetimeWarranty.Default.(bool)
// itemDescWarrantyDetails is the schema descriptor for warranty_details field.
- itemDescWarrantyDetails := itemFields[10].Descriptor()
+ itemDescWarrantyDetails := itemFields[11].Descriptor()
// item.WarrantyDetailsValidator is a validator for the "warranty_details" field. It is called by the builders before save.
item.WarrantyDetailsValidator = itemDescWarrantyDetails.Validators[0].(func(string) error)
// itemDescPurchasePrice is the schema descriptor for purchase_price field.
- itemDescPurchasePrice := itemFields[13].Descriptor()
+ itemDescPurchasePrice := itemFields[14].Descriptor()
// item.DefaultPurchasePrice holds the default value on creation for the purchase_price field.
item.DefaultPurchasePrice = itemDescPurchasePrice.Default.(float64)
// itemDescSoldPrice is the schema descriptor for sold_price field.
- itemDescSoldPrice := itemFields[16].Descriptor()
+ itemDescSoldPrice := itemFields[17].Descriptor()
// item.DefaultSoldPrice holds the default value on creation for the sold_price field.
item.DefaultSoldPrice = itemDescSoldPrice.Default.(float64)
// itemDescSoldNotes is the schema descriptor for sold_notes field.
- itemDescSoldNotes := itemFields[17].Descriptor()
+ itemDescSoldNotes := itemFields[18].Descriptor()
// item.SoldNotesValidator is a validator for the "sold_notes" field. It is called by the builders before save.
item.SoldNotesValidator = itemDescSoldNotes.Validators[0].(func(string) error)
// itemDescID is the schema descriptor for id field.
@@ -456,6 +440,110 @@ func init() {
locationDescID := locationMixinFields0[0].Descriptor()
// location.DefaultID holds the default value on creation for the id field.
location.DefaultID = locationDescID.Default.(func() uuid.UUID)
+ maintenanceentryMixin := schema.MaintenanceEntry{}.Mixin()
+ maintenanceentryMixinFields0 := maintenanceentryMixin[0].Fields()
+ _ = maintenanceentryMixinFields0
+ maintenanceentryFields := schema.MaintenanceEntry{}.Fields()
+ _ = maintenanceentryFields
+ // maintenanceentryDescCreatedAt is the schema descriptor for created_at field.
+ maintenanceentryDescCreatedAt := maintenanceentryMixinFields0[1].Descriptor()
+ // maintenanceentry.DefaultCreatedAt holds the default value on creation for the created_at field.
+ maintenanceentry.DefaultCreatedAt = maintenanceentryDescCreatedAt.Default.(func() time.Time)
+ // maintenanceentryDescUpdatedAt is the schema descriptor for updated_at field.
+ maintenanceentryDescUpdatedAt := maintenanceentryMixinFields0[2].Descriptor()
+ // maintenanceentry.DefaultUpdatedAt holds the default value on creation for the updated_at field.
+ maintenanceentry.DefaultUpdatedAt = maintenanceentryDescUpdatedAt.Default.(func() time.Time)
+ // maintenanceentry.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
+ maintenanceentry.UpdateDefaultUpdatedAt = maintenanceentryDescUpdatedAt.UpdateDefault.(func() time.Time)
+ // maintenanceentryDescName is the schema descriptor for name field.
+ maintenanceentryDescName := maintenanceentryFields[3].Descriptor()
+ // maintenanceentry.NameValidator is a validator for the "name" field. It is called by the builders before save.
+ maintenanceentry.NameValidator = func() func(string) error {
+ validators := maintenanceentryDescName.Validators
+ fns := [...]func(string) error{
+ validators[0].(func(string) error),
+ validators[1].(func(string) error),
+ }
+ return func(name string) error {
+ for _, fn := range fns {
+ if err := fn(name); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ }()
+ // maintenanceentryDescDescription is the schema descriptor for description field.
+ maintenanceentryDescDescription := maintenanceentryFields[4].Descriptor()
+ // maintenanceentry.DescriptionValidator is a validator for the "description" field. It is called by the builders before save.
+ maintenanceentry.DescriptionValidator = maintenanceentryDescDescription.Validators[0].(func(string) error)
+ // maintenanceentryDescCost is the schema descriptor for cost field.
+ maintenanceentryDescCost := maintenanceentryFields[5].Descriptor()
+ // maintenanceentry.DefaultCost holds the default value on creation for the cost field.
+ maintenanceentry.DefaultCost = maintenanceentryDescCost.Default.(float64)
+ // maintenanceentryDescID is the schema descriptor for id field.
+ maintenanceentryDescID := maintenanceentryMixinFields0[0].Descriptor()
+ // maintenanceentry.DefaultID holds the default value on creation for the id field.
+ maintenanceentry.DefaultID = maintenanceentryDescID.Default.(func() uuid.UUID)
+ notifierMixin := schema.Notifier{}.Mixin()
+ notifierMixinFields0 := notifierMixin[0].Fields()
+ _ = notifierMixinFields0
+ notifierFields := schema.Notifier{}.Fields()
+ _ = notifierFields
+ // notifierDescCreatedAt is the schema descriptor for created_at field.
+ notifierDescCreatedAt := notifierMixinFields0[1].Descriptor()
+ // notifier.DefaultCreatedAt holds the default value on creation for the created_at field.
+ notifier.DefaultCreatedAt = notifierDescCreatedAt.Default.(func() time.Time)
+ // notifierDescUpdatedAt is the schema descriptor for updated_at field.
+ notifierDescUpdatedAt := notifierMixinFields0[2].Descriptor()
+ // notifier.DefaultUpdatedAt holds the default value on creation for the updated_at field.
+ notifier.DefaultUpdatedAt = notifierDescUpdatedAt.Default.(func() time.Time)
+ // notifier.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
+ notifier.UpdateDefaultUpdatedAt = notifierDescUpdatedAt.UpdateDefault.(func() time.Time)
+ // notifierDescName is the schema descriptor for name field.
+ notifierDescName := notifierFields[0].Descriptor()
+ // notifier.NameValidator is a validator for the "name" field. It is called by the builders before save.
+ notifier.NameValidator = func() func(string) error {
+ validators := notifierDescName.Validators
+ fns := [...]func(string) error{
+ validators[0].(func(string) error),
+ validators[1].(func(string) error),
+ }
+ return func(name string) error {
+ for _, fn := range fns {
+ if err := fn(name); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ }()
+ // notifierDescURL is the schema descriptor for url field.
+ notifierDescURL := notifierFields[1].Descriptor()
+ // notifier.URLValidator is a validator for the "url" field. It is called by the builders before save.
+ notifier.URLValidator = func() func(string) error {
+ validators := notifierDescURL.Validators
+ fns := [...]func(string) error{
+ validators[0].(func(string) error),
+ validators[1].(func(string) error),
+ }
+ return func(url string) error {
+ for _, fn := range fns {
+ if err := fn(url); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ }()
+ // notifierDescIsActive is the schema descriptor for is_active field.
+ notifierDescIsActive := notifierFields[2].Descriptor()
+ // notifier.DefaultIsActive holds the default value on creation for the is_active field.
+ notifier.DefaultIsActive = notifierDescIsActive.Default.(bool)
+ // notifierDescID is the schema descriptor for id field.
+ notifierDescID := notifierMixinFields0[0].Descriptor()
+ // notifier.DefaultID holds the default value on creation for the id field.
+ notifier.DefaultID = notifierDescID.Default.(func() uuid.UUID)
userMixin := schema.User{}.Mixin()
userMixinFields0 := userMixin[0].Fields()
_ = userMixinFields0
@@ -530,7 +618,7 @@ func init() {
// user.DefaultIsSuperuser holds the default value on creation for the is_superuser field.
user.DefaultIsSuperuser = userDescIsSuperuser.Default.(bool)
// userDescSuperuser is the schema descriptor for superuser field.
- userDescSuperuser := userFields[5].Descriptor()
+ userDescSuperuser := userFields[4].Descriptor()
// user.DefaultSuperuser holds the default value on creation for the superuser field.
user.DefaultSuperuser = userDescSuperuser.Default.(bool)
// userDescID is the schema descriptor for id field.
diff --git a/backend/internal/data/ent/runtime/runtime.go b/backend/internal/data/ent/runtime/runtime.go
index 9be6acb..b5773b1 100644
--- a/backend/internal/data/ent/runtime/runtime.go
+++ b/backend/internal/data/ent/runtime/runtime.go
@@ -5,6 +5,6 @@ package runtime
// The schema-stitching logic is generated in github.com/hay-kot/homebox/backend/internal/data/ent/runtime.go
const (
- Version = "v0.11.3" // Version of ent codegen.
- Sum = "h1:F5FBGAWiDCGder7YT+lqMnyzXl6d0xU3xMBM/SO3CMc=" // Sum of ent codegen.
+ Version = "v0.12.5" // Version of ent codegen.
+ Sum = "h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4=" // Sum of ent codegen.
)
diff --git a/backend/internal/data/ent/schema/attachment.go b/backend/internal/data/ent/schema/attachment.go
index 7f4673a..589b684 100644
--- a/backend/internal/data/ent/schema/attachment.go
+++ b/backend/internal/data/ent/schema/attachment.go
@@ -24,6 +24,8 @@ func (Attachment) Fields() []ent.Field {
field.Enum("type").
Values("photo", "manual", "warranty", "attachment", "receipt").
Default("attachment"),
+ field.Bool("primary").
+ Default(false),
}
}
diff --git a/backend/internal/data/ent/schema/auth_roles.go b/backend/internal/data/ent/schema/auth_roles.go
new file mode 100644
index 0000000..5333eb3
--- /dev/null
+++ b/backend/internal/data/ent/schema/auth_roles.go
@@ -0,0 +1,34 @@
+package schema
+
+import (
+ "entgo.io/ent"
+ "entgo.io/ent/schema/edge"
+ "entgo.io/ent/schema/field"
+)
+
+// AuthRoles holds the schema definition for the AuthRoles entity.
+type AuthRoles struct {
+ ent.Schema
+}
+
+// Fields of the AuthRoles.
+func (AuthRoles) Fields() []ent.Field {
+ return []ent.Field{
+ field.Enum("role").
+ Default("user").
+ Values(
+ "admin", // can do everything - currently unused
+ "user", // default login role
+ "attachments", // Read Attachments
+ ),
+ }
+}
+
+// Edges of the AuthRoles.
+func (AuthRoles) Edges() []ent.Edge {
+ return []ent.Edge{
+ edge.From("token", AuthTokens.Type).
+ Ref("roles").
+ Unique(),
+ }
+}
diff --git a/backend/internal/data/ent/schema/auth_tokens.go b/backend/internal/data/ent/schema/auth_tokens.go
index 0cfd4d1..71b22d7 100644
--- a/backend/internal/data/ent/schema/auth_tokens.go
+++ b/backend/internal/data/ent/schema/auth_tokens.go
@@ -4,6 +4,7 @@ import (
"time"
"entgo.io/ent"
+ "entgo.io/ent/dialect/entsql"
"entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field"
"entgo.io/ent/schema/index"
@@ -37,6 +38,11 @@ func (AuthTokens) Edges() []ent.Edge {
edge.From("user", User.Type).
Ref("auth_tokens").
Unique(),
+ edge.To("roles", AuthRoles.Type).
+ Unique().
+ Annotations(entsql.Annotation{
+ OnDelete: entsql.Cascade,
+ }),
}
}
diff --git a/backend/internal/data/ent/schema/document.go b/backend/internal/data/ent/schema/document.go
index 2293c39..d814f60 100644
--- a/backend/internal/data/ent/schema/document.go
+++ b/backend/internal/data/ent/schema/document.go
@@ -16,6 +16,7 @@ type Document struct {
func (Document) Mixin() []ent.Mixin {
return []ent.Mixin{
mixins.BaseMixin{},
+ GroupMixin{ref: "documents"},
}
}
@@ -34,14 +35,6 @@ func (Document) Fields() []ent.Field {
// Edges of the Document.
func (Document) Edges() []ent.Edge {
return []ent.Edge{
- edge.From("group", Group.Type).
- Ref("documents").
- Required().
- Unique(),
- edge.To("document_tokens", DocumentToken.Type).
- Annotations(entsql.Annotation{
- OnDelete: entsql.Cascade,
- }),
edge.To("attachments", Attachment.Type).
Annotations(entsql.Annotation{
OnDelete: entsql.Cascade,
diff --git a/backend/internal/data/ent/schema/document_token.go b/backend/internal/data/ent/schema/document_token.go
deleted file mode 100644
index c5ec72f..0000000
--- a/backend/internal/data/ent/schema/document_token.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package schema
-
-import (
- "time"
-
- "entgo.io/ent"
- "entgo.io/ent/schema/edge"
- "entgo.io/ent/schema/field"
- "entgo.io/ent/schema/index"
- "github.com/hay-kot/homebox/backend/internal/data/ent/schema/mixins"
-)
-
-// DocumentToken holds the schema definition for the DocumentToken entity.
-type DocumentToken struct {
- ent.Schema
-}
-
-func (DocumentToken) Mixin() []ent.Mixin {
- return []ent.Mixin{
- mixins.BaseMixin{},
- }
-}
-
-// Fields of the DocumentToken.
-func (DocumentToken) Fields() []ent.Field {
- return []ent.Field{
- field.Bytes("token").
- NotEmpty().
- Unique(),
- field.Int("uses").
- Default(1),
- field.Time("expires_at").
- Default(func() time.Time { return time.Now().Add(time.Minute * 10) }),
- }
-}
-
-// Edges of the DocumentToken.
-func (DocumentToken) Edges() []ent.Edge {
- return []ent.Edge{
- edge.From("document", Document.Type).
- Ref("document_tokens").
- Unique(),
- }
-}
-
-func (DocumentToken) Indexes() []ent.Index {
- return []ent.Index{
- index.Fields("token"),
- }
-}
diff --git a/backend/internal/data/ent/schema/group.go b/backend/internal/data/ent/schema/group.go
index 49b358c..352ac0b 100644
--- a/backend/internal/data/ent/schema/group.go
+++ b/backend/internal/data/ent/schema/group.go
@@ -5,6 +5,8 @@ import (
"entgo.io/ent/dialect/entsql"
"entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field"
+ "entgo.io/ent/schema/mixin"
+ "github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/schema/mixins"
)
@@ -25,38 +27,59 @@ func (Group) Fields() []ent.Field {
field.String("name").
MaxLen(255).
NotEmpty(),
- field.Enum("currency").
- Default("usd").
- Values("usd", "eur", "gbp", "jpy", "zar", "aud", "nok", "sek", "dkk"),
+ field.String("currency").
+ Default("usd"),
}
}
// Edges of the Home.
func (Group) Edges() []ent.Edge {
+ owned := func(name string, t any) ent.Edge {
+ return edge.To(name, t).
+ Annotations(entsql.Annotation{
+ OnDelete: entsql.Cascade,
+ })
+ }
+
return []ent.Edge{
- edge.To("users", User.Type).
- Annotations(entsql.Annotation{
- OnDelete: entsql.Cascade,
- }),
- edge.To("locations", Location.Type).
- Annotations(entsql.Annotation{
- OnDelete: entsql.Cascade,
- }),
- edge.To("items", Item.Type).
- Annotations(entsql.Annotation{
- OnDelete: entsql.Cascade,
- }),
- edge.To("labels", Label.Type).
- Annotations(entsql.Annotation{
- OnDelete: entsql.Cascade,
- }),
- edge.To("documents", Document.Type).
- Annotations(entsql.Annotation{
- OnDelete: entsql.Cascade,
- }),
- edge.To("invitation_tokens", GroupInvitationToken.Type).
- Annotations(entsql.Annotation{
- OnDelete: entsql.Cascade,
- }),
+ owned("users", User.Type),
+ owned("locations", Location.Type),
+ owned("items", Item.Type),
+ owned("labels", Label.Type),
+ owned("documents", Document.Type),
+ owned("invitation_tokens", GroupInvitationToken.Type),
+ owned("notifiers", Notifier.Type),
+ // $scaffold_edge
}
}
+
+// GroupMixin when embedded in an ent.Schema, adds a reference to
+// the Group entity.
+type GroupMixin struct {
+ ref string
+ field string
+ mixin.Schema
+}
+
+func (g GroupMixin) Fields() []ent.Field {
+ if g.field != "" {
+ return []ent.Field{
+ field.UUID(g.field, uuid.UUID{}),
+ }
+ }
+
+ return nil
+}
+
+func (g GroupMixin) Edges() []ent.Edge {
+ edge := edge.From("group", Group.Type).
+ Ref(g.ref).
+ Unique().
+ Required()
+
+ if g.field != "" {
+ edge = edge.Field(g.field)
+ }
+
+ return []ent.Edge{edge}
+}
diff --git a/backend/internal/data/ent/schema/item.go b/backend/internal/data/ent/schema/item.go
index f7799f4..344829f 100644
--- a/backend/internal/data/ent/schema/item.go
+++ b/backend/internal/data/ent/schema/item.go
@@ -18,6 +18,7 @@ func (Item) Mixin() []ent.Mixin {
return []ent.Mixin{
mixins.BaseMixin{},
mixins.DetailsMixin{},
+ GroupMixin{ref: "items"},
}
}
@@ -29,6 +30,7 @@ func (Item) Indexes() []ent.Index {
index.Fields("model_number"),
index.Fields("serial_number"),
index.Fields("archived"),
+ index.Fields("asset_id"),
}
}
@@ -37,8 +39,7 @@ func (Item) Fields() []ent.Field {
return []ent.Field{
field.String("import_ref").
Optional().
- MaxLen(100).
- Immutable(),
+ MaxLen(100),
field.String("notes").
MaxLen(1000).
Optional(),
@@ -48,6 +49,8 @@ func (Item) Fields() []ent.Field {
Default(false),
field.Bool("archived").
Default(false),
+ field.Int("asset_id").
+ Default(0),
// ------------------------------------
// item identification
@@ -96,26 +99,24 @@ func (Item) Fields() []ent.Field {
// Edges of the Item.
func (Item) Edges() []ent.Edge {
+ owned := func(s string, t any) ent.Edge {
+ return edge.To(s, t).
+ Annotations(entsql.Annotation{
+ OnDelete: entsql.Cascade,
+ })
+ }
+
return []ent.Edge{
edge.To("children", Item.Type).
From("parent").
Unique(),
- edge.From("group", Group.Type).
- Ref("items").
- Required().
- Unique(),
edge.From("label", Label.Type).
Ref("items"),
edge.From("location", Location.Type).
Ref("items").
Unique(),
- edge.To("fields", ItemField.Type).
- Annotations(entsql.Annotation{
- OnDelete: entsql.Cascade,
- }),
- edge.To("attachments", Attachment.Type).
- Annotations(entsql.Annotation{
- OnDelete: entsql.Cascade,
- }),
+ owned("fields", ItemField.Type),
+ owned("maintenance_entries", MaintenanceEntry.Type),
+ owned("attachments", Attachment.Type),
}
}
diff --git a/backend/internal/data/ent/schema/label.go b/backend/internal/data/ent/schema/label.go
index 72d6078..c54c713 100644
--- a/backend/internal/data/ent/schema/label.go
+++ b/backend/internal/data/ent/schema/label.go
@@ -16,6 +16,7 @@ func (Label) Mixin() []ent.Mixin {
return []ent.Mixin{
mixins.BaseMixin{},
mixins.DetailsMixin{},
+ GroupMixin{ref: "labels"},
}
}
@@ -31,10 +32,6 @@ func (Label) Fields() []ent.Field {
// Edges of the Label.
func (Label) Edges() []ent.Edge {
return []ent.Edge{
- edge.From("group", Group.Type).
- Ref("labels").
- Required().
- Unique(),
edge.To("items", Item.Type),
}
}
diff --git a/backend/internal/data/ent/schema/location.go b/backend/internal/data/ent/schema/location.go
index b3142b4..b52cb7a 100644
--- a/backend/internal/data/ent/schema/location.go
+++ b/backend/internal/data/ent/schema/location.go
@@ -16,6 +16,7 @@ func (Location) Mixin() []ent.Mixin {
return []ent.Mixin{
mixins.BaseMixin{},
mixins.DetailsMixin{},
+ GroupMixin{ref: "locations"},
}
}
@@ -30,10 +31,6 @@ func (Location) Edges() []ent.Edge {
edge.To("children", Location.Type).
From("parent").
Unique(),
- edge.From("group", Group.Type).
- Ref("locations").
- Unique().
- Required(),
edge.To("items", Item.Type).
Annotations(entsql.Annotation{
OnDelete: entsql.Cascade,
diff --git a/backend/internal/data/ent/schema/maintenance_entry.go b/backend/internal/data/ent/schema/maintenance_entry.go
new file mode 100644
index 0000000..1c623cf
--- /dev/null
+++ b/backend/internal/data/ent/schema/maintenance_entry.go
@@ -0,0 +1,48 @@
+package schema
+
+import (
+ "entgo.io/ent"
+ "entgo.io/ent/schema/edge"
+ "entgo.io/ent/schema/field"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/schema/mixins"
+)
+
+type MaintenanceEntry struct {
+ ent.Schema
+}
+
+func (MaintenanceEntry) Mixin() []ent.Mixin {
+ return []ent.Mixin{
+ mixins.BaseMixin{},
+ }
+}
+
+func (MaintenanceEntry) Fields() []ent.Field {
+ return []ent.Field{
+ field.UUID("item_id", uuid.UUID{}),
+ field.Time("date").
+ Optional(),
+ field.Time("scheduled_date").
+ Optional(),
+ field.String("name").
+ MaxLen(255).
+ NotEmpty(),
+ field.String("description").
+ MaxLen(2500).
+ Optional(),
+ field.Float("cost").
+ Default(0.0),
+ }
+}
+
+// Edges of the ItemField.
+func (MaintenanceEntry) Edges() []ent.Edge {
+ return []ent.Edge{
+ edge.From("item", Item.Type).
+ Field("item_id").
+ Ref("maintenance_entries").
+ Required().
+ Unique(),
+ }
+}
diff --git a/backend/internal/data/ent/schema/notifier.go b/backend/internal/data/ent/schema/notifier.go
new file mode 100755
index 0000000..c3561d0
--- /dev/null
+++ b/backend/internal/data/ent/schema/notifier.go
@@ -0,0 +1,51 @@
+package schema
+
+import (
+ "entgo.io/ent"
+ "entgo.io/ent/schema/field"
+ "entgo.io/ent/schema/index"
+
+ "github.com/hay-kot/homebox/backend/internal/data/ent/schema/mixins"
+)
+
+type Notifier struct {
+ ent.Schema
+}
+
+func (Notifier) Mixin() []ent.Mixin {
+ return []ent.Mixin{
+ mixins.BaseMixin{},
+ GroupMixin{
+ ref: "notifiers",
+ field: "group_id",
+ },
+ UserMixin{
+ ref: "notifiers",
+ field: "user_id",
+ },
+ }
+}
+
+// Fields of the Notifier.
+func (Notifier) Fields() []ent.Field {
+ return []ent.Field{
+ field.String("name").
+ MaxLen(255).
+ NotEmpty(),
+ field.String("url").
+ Sensitive().
+ MaxLen(2083). // supposed max length of URL
+ NotEmpty(),
+ field.Bool("is_active").
+ Default(true),
+ }
+}
+
+func (Notifier) Indexes() []ent.Index {
+ return []ent.Index{
+ index.Fields("user_id"),
+ index.Fields("user_id", "is_active"),
+ index.Fields("group_id"),
+ index.Fields("group_id", "is_active"),
+ }
+}
diff --git a/backend/internal/data/ent/schema/templates/has_id.tmpl b/backend/internal/data/ent/schema/templates/has_id.tmpl
index 42b0cd8..d9134e9 100644
--- a/backend/internal/data/ent/schema/templates/has_id.tmpl
+++ b/backend/internal/data/ent/schema/templates/has_id.tmpl
@@ -9,10 +9,15 @@
import "github.com/google/uuid"
{{/* Loop over all nodes and implement the "HasID" interface */}}
{{ range $n := $.Nodes }}
+ {{ if not $n.ID }}
+ {{/* If the node doesn't have an ID field, we skip it. */}}
+ {{ continue }}
+ {{ end }}
+ {{/* The "HasID" interface is implemented by the "ID" method. */}}
{{ $receiver := $n.Receiver }}
- func ({{ $receiver }} *{{ $n.Name }}) GetID() uuid.UUID {
+ func ({{ $receiver }} *{{ $n.Name }}) GetID() {{ $n.ID.Type }} {
return {{ $receiver }}.ID
}
{{ end }}
-{{ end }}
\ No newline at end of file
+{{ end }}
diff --git a/backend/internal/data/ent/schema/user.go b/backend/internal/data/ent/schema/user.go
index b3342a8..10b0a8a 100644
--- a/backend/internal/data/ent/schema/user.go
+++ b/backend/internal/data/ent/schema/user.go
@@ -5,6 +5,8 @@ import (
"entgo.io/ent/dialect/entsql"
"entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field"
+ "entgo.io/ent/schema/mixin"
+ "github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/schema/mixins"
)
@@ -16,6 +18,7 @@ type User struct {
func (User) Mixin() []ent.Mixin {
return []ent.Mixin{
mixins.BaseMixin{},
+ GroupMixin{ref: "users"},
}
}
@@ -35,11 +38,11 @@ func (User) Fields() []ent.Field {
Sensitive(),
field.Bool("is_superuser").
Default(false),
+ field.Bool("superuser").
+ Default(false),
field.Enum("role").
Default("user").
Values("user", "owner"),
- field.Bool("superuser").
- Default(false),
field.Time("activated_on").
Optional(),
}
@@ -48,13 +51,44 @@ func (User) Fields() []ent.Field {
// Edges of the User.
func (User) Edges() []ent.Edge {
return []ent.Edge{
- edge.From("group", Group.Type).
- Ref("users").
- Required().
- Unique(),
edge.To("auth_tokens", AuthTokens.Type).
Annotations(entsql.Annotation{
OnDelete: entsql.Cascade,
}),
+ edge.To("notifiers", Notifier.Type).
+ Annotations(entsql.Annotation{
+ OnDelete: entsql.Cascade,
+ }),
}
}
+
+// UserMixin when embedded in an ent.Schema, adds a reference to
+// the Group entity.
+type UserMixin struct {
+ ref string
+ field string
+ mixin.Schema
+}
+
+func (g UserMixin) Fields() []ent.Field {
+ if g.field != "" {
+ return []ent.Field{
+ field.UUID(g.field, uuid.UUID{}),
+ }
+ }
+
+ return nil
+}
+
+func (g UserMixin) Edges() []ent.Edge {
+ edge := edge.From("user", User.Type).
+ Ref(g.ref).
+ Unique().
+ Required()
+
+ if g.field != "" {
+ edge = edge.Field(g.field)
+ }
+
+ return []ent.Edge{edge}
+}
diff --git a/backend/internal/data/ent/tx.go b/backend/internal/data/ent/tx.go
index 12e5e9f..f51f2ac 100644
--- a/backend/internal/data/ent/tx.go
+++ b/backend/internal/data/ent/tx.go
@@ -14,12 +14,12 @@ type Tx struct {
config
// Attachment is the client for interacting with the Attachment builders.
Attachment *AttachmentClient
+ // AuthRoles is the client for interacting with the AuthRoles builders.
+ AuthRoles *AuthRolesClient
// AuthTokens is the client for interacting with the AuthTokens builders.
AuthTokens *AuthTokensClient
// Document is the client for interacting with the Document builders.
Document *DocumentClient
- // DocumentToken is the client for interacting with the DocumentToken builders.
- DocumentToken *DocumentTokenClient
// Group is the client for interacting with the Group builders.
Group *GroupClient
// GroupInvitationToken is the client for interacting with the GroupInvitationToken builders.
@@ -32,18 +32,16 @@ type Tx struct {
Label *LabelClient
// Location is the client for interacting with the Location builders.
Location *LocationClient
+ // MaintenanceEntry is the client for interacting with the MaintenanceEntry builders.
+ MaintenanceEntry *MaintenanceEntryClient
+ // Notifier is the client for interacting with the Notifier builders.
+ Notifier *NotifierClient
// User is the client for interacting with the User builders.
User *UserClient
// lazily loaded.
client *Client
clientOnce sync.Once
-
- // completion callbacks.
- mu sync.Mutex
- onCommit []CommitHook
- onRollback []RollbackHook
-
// ctx lives for the life of the transaction. It is
// the same context used by the underlying connection.
ctx context.Context
@@ -88,9 +86,9 @@ func (tx *Tx) Commit() error {
var fn Committer = CommitFunc(func(context.Context, *Tx) error {
return txDriver.tx.Commit()
})
- tx.mu.Lock()
- hooks := append([]CommitHook(nil), tx.onCommit...)
- tx.mu.Unlock()
+ txDriver.mu.Lock()
+ hooks := append([]CommitHook(nil), txDriver.onCommit...)
+ txDriver.mu.Unlock()
for i := len(hooks) - 1; i >= 0; i-- {
fn = hooks[i](fn)
}
@@ -99,9 +97,10 @@ func (tx *Tx) Commit() error {
// OnCommit adds a hook to call on commit.
func (tx *Tx) OnCommit(f CommitHook) {
- tx.mu.Lock()
- defer tx.mu.Unlock()
- tx.onCommit = append(tx.onCommit, f)
+ txDriver := tx.config.driver.(*txDriver)
+ txDriver.mu.Lock()
+ txDriver.onCommit = append(txDriver.onCommit, f)
+ txDriver.mu.Unlock()
}
type (
@@ -143,9 +142,9 @@ func (tx *Tx) Rollback() error {
var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error {
return txDriver.tx.Rollback()
})
- tx.mu.Lock()
- hooks := append([]RollbackHook(nil), tx.onRollback...)
- tx.mu.Unlock()
+ txDriver.mu.Lock()
+ hooks := append([]RollbackHook(nil), txDriver.onRollback...)
+ txDriver.mu.Unlock()
for i := len(hooks) - 1; i >= 0; i-- {
fn = hooks[i](fn)
}
@@ -154,9 +153,10 @@ func (tx *Tx) Rollback() error {
// OnRollback adds a hook to call on rollback.
func (tx *Tx) OnRollback(f RollbackHook) {
- tx.mu.Lock()
- defer tx.mu.Unlock()
- tx.onRollback = append(tx.onRollback, f)
+ txDriver := tx.config.driver.(*txDriver)
+ txDriver.mu.Lock()
+ txDriver.onRollback = append(txDriver.onRollback, f)
+ txDriver.mu.Unlock()
}
// Client returns a Client that binds to current transaction.
@@ -170,15 +170,17 @@ func (tx *Tx) Client() *Client {
func (tx *Tx) init() {
tx.Attachment = NewAttachmentClient(tx.config)
+ tx.AuthRoles = NewAuthRolesClient(tx.config)
tx.AuthTokens = NewAuthTokensClient(tx.config)
tx.Document = NewDocumentClient(tx.config)
- tx.DocumentToken = NewDocumentTokenClient(tx.config)
tx.Group = NewGroupClient(tx.config)
tx.GroupInvitationToken = NewGroupInvitationTokenClient(tx.config)
tx.Item = NewItemClient(tx.config)
tx.ItemField = NewItemFieldClient(tx.config)
tx.Label = NewLabelClient(tx.config)
tx.Location = NewLocationClient(tx.config)
+ tx.MaintenanceEntry = NewMaintenanceEntryClient(tx.config)
+ tx.Notifier = NewNotifierClient(tx.config)
tx.User = NewUserClient(tx.config)
}
@@ -198,6 +200,10 @@ type txDriver struct {
drv dialect.Driver
// tx is the underlying transaction.
tx dialect.Tx
+ // completion hooks.
+ mu sync.Mutex
+ onCommit []CommitHook
+ onRollback []RollbackHook
}
// newTx creates a new transactional driver.
diff --git a/backend/internal/data/ent/user.go b/backend/internal/data/ent/user.go
index 48dbdcb..3331de7 100644
--- a/backend/internal/data/ent/user.go
+++ b/backend/internal/data/ent/user.go
@@ -7,6 +7,7 @@ import (
"strings"
"time"
+ "entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
@@ -30,16 +31,17 @@ type User struct {
Password string `json:"-"`
// IsSuperuser holds the value of the "is_superuser" field.
IsSuperuser bool `json:"is_superuser,omitempty"`
- // Role holds the value of the "role" field.
- Role user.Role `json:"role,omitempty"`
// Superuser holds the value of the "superuser" field.
Superuser bool `json:"superuser,omitempty"`
+ // Role holds the value of the "role" field.
+ Role user.Role `json:"role,omitempty"`
// ActivatedOn holds the value of the "activated_on" field.
ActivatedOn time.Time `json:"activated_on,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the UserQuery when eager-loading is set.
- Edges UserEdges `json:"edges"`
- group_users *uuid.UUID
+ Edges UserEdges `json:"edges"`
+ group_users *uuid.UUID
+ selectValues sql.SelectValues
}
// UserEdges holds the relations/edges for other nodes in the graph.
@@ -48,9 +50,11 @@ type UserEdges struct {
Group *Group `json:"group,omitempty"`
// AuthTokens holds the value of the auth_tokens edge.
AuthTokens []*AuthTokens `json:"auth_tokens,omitempty"`
+ // Notifiers holds the value of the notifiers edge.
+ Notifiers []*Notifier `json:"notifiers,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
- loadedTypes [2]bool
+ loadedTypes [3]bool
}
// GroupOrErr returns the Group value or an error if the edge
@@ -75,6 +79,15 @@ func (e UserEdges) AuthTokensOrErr() ([]*AuthTokens, error) {
return nil, &NotLoadedError{edge: "auth_tokens"}
}
+// NotifiersOrErr returns the Notifiers value or an error if the edge
+// was not loaded in eager-loading.
+func (e UserEdges) NotifiersOrErr() ([]*Notifier, error) {
+ if e.loadedTypes[2] {
+ return e.Notifiers, nil
+ }
+ return nil, &NotLoadedError{edge: "notifiers"}
+}
+
// scanValues returns the types for scanning values from sql.Rows.
func (*User) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
@@ -91,7 +104,7 @@ func (*User) scanValues(columns []string) ([]any, error) {
case user.ForeignKeys[0]: // group_users
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
- return nil, fmt.Errorf("unexpected column %q for type User", columns[i])
+ values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -147,18 +160,18 @@ func (u *User) assignValues(columns []string, values []any) error {
} else if value.Valid {
u.IsSuperuser = value.Bool
}
- case user.FieldRole:
- if value, ok := values[i].(*sql.NullString); !ok {
- return fmt.Errorf("unexpected type %T for field role", values[i])
- } else if value.Valid {
- u.Role = user.Role(value.String)
- }
case user.FieldSuperuser:
if value, ok := values[i].(*sql.NullBool); !ok {
return fmt.Errorf("unexpected type %T for field superuser", values[i])
} else if value.Valid {
u.Superuser = value.Bool
}
+ case user.FieldRole:
+ if value, ok := values[i].(*sql.NullString); !ok {
+ return fmt.Errorf("unexpected type %T for field role", values[i])
+ } else if value.Valid {
+ u.Role = user.Role(value.String)
+ }
case user.FieldActivatedOn:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field activated_on", values[i])
@@ -172,26 +185,39 @@ func (u *User) assignValues(columns []string, values []any) error {
u.group_users = new(uuid.UUID)
*u.group_users = *value.S.(*uuid.UUID)
}
+ default:
+ u.selectValues.Set(columns[i], values[i])
}
}
return nil
}
+// Value returns the ent.Value that was dynamically selected and assigned to the User.
+// This includes values selected through modifiers, order, etc.
+func (u *User) Value(name string) (ent.Value, error) {
+ return u.selectValues.Get(name)
+}
+
// QueryGroup queries the "group" edge of the User entity.
func (u *User) QueryGroup() *GroupQuery {
- return (&UserClient{config: u.config}).QueryGroup(u)
+ return NewUserClient(u.config).QueryGroup(u)
}
// QueryAuthTokens queries the "auth_tokens" edge of the User entity.
func (u *User) QueryAuthTokens() *AuthTokensQuery {
- return (&UserClient{config: u.config}).QueryAuthTokens(u)
+ return NewUserClient(u.config).QueryAuthTokens(u)
+}
+
+// QueryNotifiers queries the "notifiers" edge of the User entity.
+func (u *User) QueryNotifiers() *NotifierQuery {
+ return NewUserClient(u.config).QueryNotifiers(u)
}
// Update returns a builder for updating this User.
// Note that you need to call User.Unwrap() before calling this method if this User
// was returned from a transaction, and the transaction was committed or rolled back.
func (u *User) Update() *UserUpdateOne {
- return (&UserClient{config: u.config}).UpdateOne(u)
+ return NewUserClient(u.config).UpdateOne(u)
}
// Unwrap unwraps the User entity that was returned from a transaction after it was closed,
@@ -227,12 +253,12 @@ func (u *User) String() string {
builder.WriteString("is_superuser=")
builder.WriteString(fmt.Sprintf("%v", u.IsSuperuser))
builder.WriteString(", ")
- builder.WriteString("role=")
- builder.WriteString(fmt.Sprintf("%v", u.Role))
- builder.WriteString(", ")
builder.WriteString("superuser=")
builder.WriteString(fmt.Sprintf("%v", u.Superuser))
builder.WriteString(", ")
+ builder.WriteString("role=")
+ builder.WriteString(fmt.Sprintf("%v", u.Role))
+ builder.WriteString(", ")
builder.WriteString("activated_on=")
builder.WriteString(u.ActivatedOn.Format(time.ANSIC))
builder.WriteByte(')')
@@ -241,9 +267,3 @@ func (u *User) String() string {
// Users is a parsable slice of User.
type Users []*User
-
-func (u Users) config(cfg config) {
- for _i := range u {
- u[_i].config = cfg
- }
-}
diff --git a/backend/internal/data/ent/user/user.go b/backend/internal/data/ent/user/user.go
index c8b61c2..33b657b 100644
--- a/backend/internal/data/ent/user/user.go
+++ b/backend/internal/data/ent/user/user.go
@@ -6,6 +6,8 @@ import (
"fmt"
"time"
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -26,16 +28,18 @@ const (
FieldPassword = "password"
// FieldIsSuperuser holds the string denoting the is_superuser field in the database.
FieldIsSuperuser = "is_superuser"
- // FieldRole holds the string denoting the role field in the database.
- FieldRole = "role"
// FieldSuperuser holds the string denoting the superuser field in the database.
FieldSuperuser = "superuser"
+ // FieldRole holds the string denoting the role field in the database.
+ FieldRole = "role"
// FieldActivatedOn holds the string denoting the activated_on field in the database.
FieldActivatedOn = "activated_on"
// EdgeGroup holds the string denoting the group edge name in mutations.
EdgeGroup = "group"
// EdgeAuthTokens holds the string denoting the auth_tokens edge name in mutations.
EdgeAuthTokens = "auth_tokens"
+ // EdgeNotifiers holds the string denoting the notifiers edge name in mutations.
+ EdgeNotifiers = "notifiers"
// Table holds the table name of the user in the database.
Table = "users"
// GroupTable is the table that holds the group relation/edge.
@@ -52,6 +56,13 @@ const (
AuthTokensInverseTable = "auth_tokens"
// AuthTokensColumn is the table column denoting the auth_tokens relation/edge.
AuthTokensColumn = "user_auth_tokens"
+ // NotifiersTable is the table that holds the notifiers relation/edge.
+ NotifiersTable = "notifiers"
+ // NotifiersInverseTable is the table name for the Notifier entity.
+ // It exists in this package in order to avoid circular dependency with the "notifier" package.
+ NotifiersInverseTable = "notifiers"
+ // NotifiersColumn is the table column denoting the notifiers relation/edge.
+ NotifiersColumn = "user_id"
)
// Columns holds all SQL columns for user fields.
@@ -63,8 +74,8 @@ var Columns = []string{
FieldEmail,
FieldPassword,
FieldIsSuperuser,
- FieldRole,
FieldSuperuser,
+ FieldRole,
FieldActivatedOn,
}
@@ -135,3 +146,112 @@ func RoleValidator(r Role) error {
return fmt.Errorf("user: invalid enum value for role field: %q", r)
}
}
+
+// OrderOption defines the ordering options for the User queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByName orders the results by the name field.
+func ByName(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldName, opts...).ToFunc()
+}
+
+// ByEmail orders the results by the email field.
+func ByEmail(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldEmail, opts...).ToFunc()
+}
+
+// ByPassword orders the results by the password field.
+func ByPassword(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldPassword, opts...).ToFunc()
+}
+
+// ByIsSuperuser orders the results by the is_superuser field.
+func ByIsSuperuser(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldIsSuperuser, opts...).ToFunc()
+}
+
+// BySuperuser orders the results by the superuser field.
+func BySuperuser(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldSuperuser, opts...).ToFunc()
+}
+
+// ByRole orders the results by the role field.
+func ByRole(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldRole, opts...).ToFunc()
+}
+
+// ByActivatedOn orders the results by the activated_on field.
+func ByActivatedOn(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldActivatedOn, opts...).ToFunc()
+}
+
+// ByGroupField orders the results by group field.
+func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
+ }
+}
+
+// ByAuthTokensCount orders the results by auth_tokens count.
+func ByAuthTokensCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newAuthTokensStep(), opts...)
+ }
+}
+
+// ByAuthTokens orders the results by auth_tokens terms.
+func ByAuthTokens(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newAuthTokensStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+
+// ByNotifiersCount orders the results by notifiers count.
+func ByNotifiersCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newNotifiersStep(), opts...)
+ }
+}
+
+// ByNotifiers orders the results by notifiers terms.
+func ByNotifiers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newNotifiersStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+func newGroupStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(GroupInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
+ )
+}
+func newAuthTokensStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(AuthTokensInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, AuthTokensTable, AuthTokensColumn),
+ )
+}
+func newNotifiersStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(NotifiersInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn),
+ )
+}
diff --git a/backend/internal/data/ent/user/where.go b/backend/internal/data/ent/user/where.go
index 567187e..8686e73 100644
--- a/backend/internal/data/ent/user/where.go
+++ b/backend/internal/data/ent/user/where.go
@@ -13,696 +13,452 @@ import (
// ID filters vertices based on their ID field.
func ID(id uuid.UUID) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.User(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id uuid.UUID) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldID), id))
- })
+ return predicate.User(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id uuid.UUID) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldID), id))
- })
+ return predicate.User(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...uuid.UUID) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.In(s.C(FieldID), v...))
- })
+ return predicate.User(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...uuid.UUID) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- v := make([]any, len(ids))
- for i := range v {
- v[i] = ids[i]
- }
- s.Where(sql.NotIn(s.C(FieldID), v...))
- })
+ return predicate.User(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id uuid.UUID) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldID), id))
- })
+ return predicate.User(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id uuid.UUID) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldID), id))
- })
+ return predicate.User(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id uuid.UUID) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldID), id))
- })
+ return predicate.User(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id uuid.UUID) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldID), id))
- })
+ return predicate.User(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.User(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.User(sql.FieldEQ(FieldUpdatedAt, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldName), v))
- })
+ return predicate.User(sql.FieldEQ(FieldName, v))
}
// Email applies equality check predicate on the "email" field. It's identical to EmailEQ.
func Email(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldEmail), v))
- })
+ return predicate.User(sql.FieldEQ(FieldEmail, v))
}
// Password applies equality check predicate on the "password" field. It's identical to PasswordEQ.
func Password(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldPassword), v))
- })
+ return predicate.User(sql.FieldEQ(FieldPassword, v))
}
// IsSuperuser applies equality check predicate on the "is_superuser" field. It's identical to IsSuperuserEQ.
func IsSuperuser(v bool) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldIsSuperuser), v))
- })
+ return predicate.User(sql.FieldEQ(FieldIsSuperuser, v))
}
// Superuser applies equality check predicate on the "superuser" field. It's identical to SuperuserEQ.
func Superuser(v bool) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldSuperuser), v))
- })
+ return predicate.User(sql.FieldEQ(FieldSuperuser, v))
}
// ActivatedOn applies equality check predicate on the "activated_on" field. It's identical to ActivatedOnEQ.
func ActivatedOn(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldActivatedOn), v))
- })
+ return predicate.User(sql.FieldEQ(FieldActivatedOn, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.User(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
- })
+ return predicate.User(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldCreatedAt), v...))
- })
+ return predicate.User(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
- })
+ return predicate.User(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldCreatedAt), v))
- })
+ return predicate.User(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.User(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldCreatedAt), v))
- })
+ return predicate.User(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldCreatedAt), v))
- })
+ return predicate.User(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.User(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
- })
+ return predicate.User(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.User(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
- })
+ return predicate.User(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.User(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.User(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldUpdatedAt), v))
- })
+ return predicate.User(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
- })
+ return predicate.User(sql.FieldLTE(FieldUpdatedAt, v))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldName), v))
- })
+ return predicate.User(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldName), v))
- })
+ return predicate.User(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldName), v...))
- })
+ return predicate.User(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldName), v...))
- })
+ return predicate.User(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldName), v))
- })
+ return predicate.User(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldName), v))
- })
+ return predicate.User(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldName), v))
- })
+ return predicate.User(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldName), v))
- })
+ return predicate.User(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldName), v))
- })
+ return predicate.User(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldName), v))
- })
+ return predicate.User(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldName), v))
- })
+ return predicate.User(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldName), v))
- })
+ return predicate.User(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldName), v))
- })
+ return predicate.User(sql.FieldContainsFold(FieldName, v))
}
// EmailEQ applies the EQ predicate on the "email" field.
func EmailEQ(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldEmail), v))
- })
+ return predicate.User(sql.FieldEQ(FieldEmail, v))
}
// EmailNEQ applies the NEQ predicate on the "email" field.
func EmailNEQ(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldEmail), v))
- })
+ return predicate.User(sql.FieldNEQ(FieldEmail, v))
}
// EmailIn applies the In predicate on the "email" field.
func EmailIn(vs ...string) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldEmail), v...))
- })
+ return predicate.User(sql.FieldIn(FieldEmail, vs...))
}
// EmailNotIn applies the NotIn predicate on the "email" field.
func EmailNotIn(vs ...string) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldEmail), v...))
- })
+ return predicate.User(sql.FieldNotIn(FieldEmail, vs...))
}
// EmailGT applies the GT predicate on the "email" field.
func EmailGT(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldEmail), v))
- })
+ return predicate.User(sql.FieldGT(FieldEmail, v))
}
// EmailGTE applies the GTE predicate on the "email" field.
func EmailGTE(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldEmail), v))
- })
+ return predicate.User(sql.FieldGTE(FieldEmail, v))
}
// EmailLT applies the LT predicate on the "email" field.
func EmailLT(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldEmail), v))
- })
+ return predicate.User(sql.FieldLT(FieldEmail, v))
}
// EmailLTE applies the LTE predicate on the "email" field.
func EmailLTE(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldEmail), v))
- })
+ return predicate.User(sql.FieldLTE(FieldEmail, v))
}
// EmailContains applies the Contains predicate on the "email" field.
func EmailContains(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldEmail), v))
- })
+ return predicate.User(sql.FieldContains(FieldEmail, v))
}
// EmailHasPrefix applies the HasPrefix predicate on the "email" field.
func EmailHasPrefix(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldEmail), v))
- })
+ return predicate.User(sql.FieldHasPrefix(FieldEmail, v))
}
// EmailHasSuffix applies the HasSuffix predicate on the "email" field.
func EmailHasSuffix(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldEmail), v))
- })
+ return predicate.User(sql.FieldHasSuffix(FieldEmail, v))
}
// EmailEqualFold applies the EqualFold predicate on the "email" field.
func EmailEqualFold(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldEmail), v))
- })
+ return predicate.User(sql.FieldEqualFold(FieldEmail, v))
}
// EmailContainsFold applies the ContainsFold predicate on the "email" field.
func EmailContainsFold(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldEmail), v))
- })
+ return predicate.User(sql.FieldContainsFold(FieldEmail, v))
}
// PasswordEQ applies the EQ predicate on the "password" field.
func PasswordEQ(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldPassword), v))
- })
+ return predicate.User(sql.FieldEQ(FieldPassword, v))
}
// PasswordNEQ applies the NEQ predicate on the "password" field.
func PasswordNEQ(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldPassword), v))
- })
+ return predicate.User(sql.FieldNEQ(FieldPassword, v))
}
// PasswordIn applies the In predicate on the "password" field.
func PasswordIn(vs ...string) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldPassword), v...))
- })
+ return predicate.User(sql.FieldIn(FieldPassword, vs...))
}
// PasswordNotIn applies the NotIn predicate on the "password" field.
func PasswordNotIn(vs ...string) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldPassword), v...))
- })
+ return predicate.User(sql.FieldNotIn(FieldPassword, vs...))
}
// PasswordGT applies the GT predicate on the "password" field.
func PasswordGT(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldPassword), v))
- })
+ return predicate.User(sql.FieldGT(FieldPassword, v))
}
// PasswordGTE applies the GTE predicate on the "password" field.
func PasswordGTE(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldPassword), v))
- })
+ return predicate.User(sql.FieldGTE(FieldPassword, v))
}
// PasswordLT applies the LT predicate on the "password" field.
func PasswordLT(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldPassword), v))
- })
+ return predicate.User(sql.FieldLT(FieldPassword, v))
}
// PasswordLTE applies the LTE predicate on the "password" field.
func PasswordLTE(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldPassword), v))
- })
+ return predicate.User(sql.FieldLTE(FieldPassword, v))
}
// PasswordContains applies the Contains predicate on the "password" field.
func PasswordContains(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.Contains(s.C(FieldPassword), v))
- })
+ return predicate.User(sql.FieldContains(FieldPassword, v))
}
// PasswordHasPrefix applies the HasPrefix predicate on the "password" field.
func PasswordHasPrefix(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.HasPrefix(s.C(FieldPassword), v))
- })
+ return predicate.User(sql.FieldHasPrefix(FieldPassword, v))
}
// PasswordHasSuffix applies the HasSuffix predicate on the "password" field.
func PasswordHasSuffix(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.HasSuffix(s.C(FieldPassword), v))
- })
+ return predicate.User(sql.FieldHasSuffix(FieldPassword, v))
}
// PasswordEqualFold applies the EqualFold predicate on the "password" field.
func PasswordEqualFold(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EqualFold(s.C(FieldPassword), v))
- })
+ return predicate.User(sql.FieldEqualFold(FieldPassword, v))
}
// PasswordContainsFold applies the ContainsFold predicate on the "password" field.
func PasswordContainsFold(v string) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.ContainsFold(s.C(FieldPassword), v))
- })
+ return predicate.User(sql.FieldContainsFold(FieldPassword, v))
}
// IsSuperuserEQ applies the EQ predicate on the "is_superuser" field.
func IsSuperuserEQ(v bool) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldIsSuperuser), v))
- })
+ return predicate.User(sql.FieldEQ(FieldIsSuperuser, v))
}
// IsSuperuserNEQ applies the NEQ predicate on the "is_superuser" field.
func IsSuperuserNEQ(v bool) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldIsSuperuser), v))
- })
-}
-
-// RoleEQ applies the EQ predicate on the "role" field.
-func RoleEQ(v Role) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldRole), v))
- })
-}
-
-// RoleNEQ applies the NEQ predicate on the "role" field.
-func RoleNEQ(v Role) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldRole), v))
- })
-}
-
-// RoleIn applies the In predicate on the "role" field.
-func RoleIn(vs ...Role) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldRole), v...))
- })
-}
-
-// RoleNotIn applies the NotIn predicate on the "role" field.
-func RoleNotIn(vs ...Role) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldRole), v...))
- })
+ return predicate.User(sql.FieldNEQ(FieldIsSuperuser, v))
}
// SuperuserEQ applies the EQ predicate on the "superuser" field.
func SuperuserEQ(v bool) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldSuperuser), v))
- })
+ return predicate.User(sql.FieldEQ(FieldSuperuser, v))
}
// SuperuserNEQ applies the NEQ predicate on the "superuser" field.
func SuperuserNEQ(v bool) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldSuperuser), v))
- })
+ return predicate.User(sql.FieldNEQ(FieldSuperuser, v))
+}
+
+// RoleEQ applies the EQ predicate on the "role" field.
+func RoleEQ(v Role) predicate.User {
+ return predicate.User(sql.FieldEQ(FieldRole, v))
+}
+
+// RoleNEQ applies the NEQ predicate on the "role" field.
+func RoleNEQ(v Role) predicate.User {
+ return predicate.User(sql.FieldNEQ(FieldRole, v))
+}
+
+// RoleIn applies the In predicate on the "role" field.
+func RoleIn(vs ...Role) predicate.User {
+ return predicate.User(sql.FieldIn(FieldRole, vs...))
+}
+
+// RoleNotIn applies the NotIn predicate on the "role" field.
+func RoleNotIn(vs ...Role) predicate.User {
+ return predicate.User(sql.FieldNotIn(FieldRole, vs...))
}
// ActivatedOnEQ applies the EQ predicate on the "activated_on" field.
func ActivatedOnEQ(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.EQ(s.C(FieldActivatedOn), v))
- })
+ return predicate.User(sql.FieldEQ(FieldActivatedOn, v))
}
// ActivatedOnNEQ applies the NEQ predicate on the "activated_on" field.
func ActivatedOnNEQ(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NEQ(s.C(FieldActivatedOn), v))
- })
+ return predicate.User(sql.FieldNEQ(FieldActivatedOn, v))
}
// ActivatedOnIn applies the In predicate on the "activated_on" field.
func ActivatedOnIn(vs ...time.Time) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.In(s.C(FieldActivatedOn), v...))
- })
+ return predicate.User(sql.FieldIn(FieldActivatedOn, vs...))
}
// ActivatedOnNotIn applies the NotIn predicate on the "activated_on" field.
func ActivatedOnNotIn(vs ...time.Time) predicate.User {
- v := make([]any, len(vs))
- for i := range v {
- v[i] = vs[i]
- }
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NotIn(s.C(FieldActivatedOn), v...))
- })
+ return predicate.User(sql.FieldNotIn(FieldActivatedOn, vs...))
}
// ActivatedOnGT applies the GT predicate on the "activated_on" field.
func ActivatedOnGT(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GT(s.C(FieldActivatedOn), v))
- })
+ return predicate.User(sql.FieldGT(FieldActivatedOn, v))
}
// ActivatedOnGTE applies the GTE predicate on the "activated_on" field.
func ActivatedOnGTE(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.GTE(s.C(FieldActivatedOn), v))
- })
+ return predicate.User(sql.FieldGTE(FieldActivatedOn, v))
}
// ActivatedOnLT applies the LT predicate on the "activated_on" field.
func ActivatedOnLT(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LT(s.C(FieldActivatedOn), v))
- })
+ return predicate.User(sql.FieldLT(FieldActivatedOn, v))
}
// ActivatedOnLTE applies the LTE predicate on the "activated_on" field.
func ActivatedOnLTE(v time.Time) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.LTE(s.C(FieldActivatedOn), v))
- })
+ return predicate.User(sql.FieldLTE(FieldActivatedOn, v))
}
// ActivatedOnIsNil applies the IsNil predicate on the "activated_on" field.
func ActivatedOnIsNil() predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.IsNull(s.C(FieldActivatedOn)))
- })
+ return predicate.User(sql.FieldIsNull(FieldActivatedOn))
}
// ActivatedOnNotNil applies the NotNil predicate on the "activated_on" field.
func ActivatedOnNotNil() predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s.Where(sql.NotNull(s.C(FieldActivatedOn)))
- })
+ return predicate.User(sql.FieldNotNull(FieldActivatedOn))
}
// HasGroup applies the HasEdge predicate on the "group" edge.
@@ -710,7 +466,6 @@ func HasGroup() predicate.User {
return predicate.User(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(GroupTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -720,11 +475,7 @@ func HasGroup() predicate.User {
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
func HasGroupWith(preds ...predicate.Group) predicate.User {
return predicate.User(func(s *sql.Selector) {
- step := sqlgraph.NewStep(
- sqlgraph.From(Table, FieldID),
- sqlgraph.To(GroupInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
- )
+ step := newGroupStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -738,7 +489,6 @@ func HasAuthTokens() predicate.User {
return predicate.User(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(AuthTokensTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, AuthTokensTable, AuthTokensColumn),
)
sqlgraph.HasNeighbors(s, step)
@@ -747,12 +497,31 @@ func HasAuthTokens() predicate.User {
// HasAuthTokensWith applies the HasEdge predicate on the "auth_tokens" edge with a given conditions (other predicates).
func HasAuthTokensWith(preds ...predicate.AuthTokens) predicate.User {
+ return predicate.User(func(s *sql.Selector) {
+ step := newAuthTokensStep()
+ sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+ for _, p := range preds {
+ p(s)
+ }
+ })
+ })
+}
+
+// HasNotifiers applies the HasEdge predicate on the "notifiers" edge.
+func HasNotifiers() predicate.User {
return predicate.User(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
- sqlgraph.To(AuthTokensInverseTable, FieldID),
- sqlgraph.Edge(sqlgraph.O2M, false, AuthTokensTable, AuthTokensColumn),
+ sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn),
)
+ sqlgraph.HasNeighbors(s, step)
+ })
+}
+
+// HasNotifiersWith applies the HasEdge predicate on the "notifiers" edge with a given conditions (other predicates).
+func HasNotifiersWith(preds ...predicate.Notifier) predicate.User {
+ return predicate.User(func(s *sql.Selector) {
+ step := newNotifiersStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -763,32 +532,15 @@ func HasAuthTokensWith(preds ...predicate.AuthTokens) predicate.User {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.User) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for _, p := range predicates {
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.User(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.User) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- s1 := s.Clone().SetP(nil)
- for i, p := range predicates {
- if i > 0 {
- s1.Or()
- }
- p(s1)
- }
- s.Where(s1.P())
- })
+ return predicate.User(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.User) predicate.User {
- return predicate.User(func(s *sql.Selector) {
- p(s.Not())
- })
+ return predicate.User(sql.NotPredicates(p))
}
diff --git a/backend/internal/data/ent/user_create.go b/backend/internal/data/ent/user_create.go
index 317b43a..2cfe2d1 100644
--- a/backend/internal/data/ent/user_create.go
+++ b/backend/internal/data/ent/user_create.go
@@ -13,6 +13,7 @@ import (
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
)
@@ -83,20 +84,6 @@ func (uc *UserCreate) SetNillableIsSuperuser(b *bool) *UserCreate {
return uc
}
-// SetRole sets the "role" field.
-func (uc *UserCreate) SetRole(u user.Role) *UserCreate {
- uc.mutation.SetRole(u)
- return uc
-}
-
-// SetNillableRole sets the "role" field if the given value is not nil.
-func (uc *UserCreate) SetNillableRole(u *user.Role) *UserCreate {
- if u != nil {
- uc.SetRole(*u)
- }
- return uc
-}
-
// SetSuperuser sets the "superuser" field.
func (uc *UserCreate) SetSuperuser(b bool) *UserCreate {
uc.mutation.SetSuperuser(b)
@@ -111,6 +98,20 @@ func (uc *UserCreate) SetNillableSuperuser(b *bool) *UserCreate {
return uc
}
+// SetRole sets the "role" field.
+func (uc *UserCreate) SetRole(u user.Role) *UserCreate {
+ uc.mutation.SetRole(u)
+ return uc
+}
+
+// SetNillableRole sets the "role" field if the given value is not nil.
+func (uc *UserCreate) SetNillableRole(u *user.Role) *UserCreate {
+ if u != nil {
+ uc.SetRole(*u)
+ }
+ return uc
+}
+
// SetActivatedOn sets the "activated_on" field.
func (uc *UserCreate) SetActivatedOn(t time.Time) *UserCreate {
uc.mutation.SetActivatedOn(t)
@@ -165,6 +166,21 @@ func (uc *UserCreate) AddAuthTokens(a ...*AuthTokens) *UserCreate {
return uc.AddAuthTokenIDs(ids...)
}
+// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs.
+func (uc *UserCreate) AddNotifierIDs(ids ...uuid.UUID) *UserCreate {
+ uc.mutation.AddNotifierIDs(ids...)
+ return uc
+}
+
+// AddNotifiers adds the "notifiers" edges to the Notifier entity.
+func (uc *UserCreate) AddNotifiers(n ...*Notifier) *UserCreate {
+ ids := make([]uuid.UUID, len(n))
+ for i := range n {
+ ids[i] = n[i].ID
+ }
+ return uc.AddNotifierIDs(ids...)
+}
+
// Mutation returns the UserMutation object of the builder.
func (uc *UserCreate) Mutation() *UserMutation {
return uc.mutation
@@ -172,50 +188,8 @@ func (uc *UserCreate) Mutation() *UserMutation {
// Save creates the User in the database.
func (uc *UserCreate) Save(ctx context.Context) (*User, error) {
- var (
- err error
- node *User
- )
uc.defaults()
- if len(uc.hooks) == 0 {
- if err = uc.check(); err != nil {
- return nil, err
- }
- node, err = uc.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*UserMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = uc.check(); err != nil {
- return nil, err
- }
- uc.mutation = mutation
- if node, err = uc.sqlSave(ctx); err != nil {
- return nil, err
- }
- mutation.id = &node.ID
- mutation.done = true
- return node, err
- })
- for i := len(uc.hooks) - 1; i >= 0; i-- {
- if uc.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = uc.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, uc.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*User)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from UserMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, uc.sqlSave, uc.mutation, uc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -254,14 +228,14 @@ func (uc *UserCreate) defaults() {
v := user.DefaultIsSuperuser
uc.mutation.SetIsSuperuser(v)
}
- if _, ok := uc.mutation.Role(); !ok {
- v := user.DefaultRole
- uc.mutation.SetRole(v)
- }
if _, ok := uc.mutation.Superuser(); !ok {
v := user.DefaultSuperuser
uc.mutation.SetSuperuser(v)
}
+ if _, ok := uc.mutation.Role(); !ok {
+ v := user.DefaultRole
+ uc.mutation.SetRole(v)
+ }
if _, ok := uc.mutation.ID(); !ok {
v := user.DefaultID()
uc.mutation.SetID(v)
@@ -303,6 +277,9 @@ func (uc *UserCreate) check() error {
if _, ok := uc.mutation.IsSuperuser(); !ok {
return &ValidationError{Name: "is_superuser", err: errors.New(`ent: missing required field "User.is_superuser"`)}
}
+ if _, ok := uc.mutation.Superuser(); !ok {
+ return &ValidationError{Name: "superuser", err: errors.New(`ent: missing required field "User.superuser"`)}
+ }
if _, ok := uc.mutation.Role(); !ok {
return &ValidationError{Name: "role", err: errors.New(`ent: missing required field "User.role"`)}
}
@@ -311,9 +288,6 @@ func (uc *UserCreate) check() error {
return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "User.role": %w`, err)}
}
}
- if _, ok := uc.mutation.Superuser(); !ok {
- return &ValidationError{Name: "superuser", err: errors.New(`ent: missing required field "User.superuser"`)}
- }
if _, ok := uc.mutation.GroupID(); !ok {
return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "User.group"`)}
}
@@ -321,6 +295,9 @@ func (uc *UserCreate) check() error {
}
func (uc *UserCreate) sqlSave(ctx context.Context) (*User, error) {
+ if err := uc.check(); err != nil {
+ return nil, err
+ }
_node, _spec := uc.createSpec()
if err := sqlgraph.CreateNode(ctx, uc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@@ -335,94 +312,54 @@ func (uc *UserCreate) sqlSave(ctx context.Context) (*User, error) {
return nil, err
}
}
+ uc.mutation.id = &_node.ID
+ uc.mutation.done = true
return _node, nil
}
func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) {
var (
_node = &User{config: uc.config}
- _spec = &sqlgraph.CreateSpec{
- Table: user.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
- }
+ _spec = sqlgraph.NewCreateSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID))
)
if id, ok := uc.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = &id
}
if value, ok := uc.mutation.CreatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: user.FieldCreatedAt,
- })
+ _spec.SetField(user.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := uc.mutation.UpdatedAt(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: user.FieldUpdatedAt,
- })
+ _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := uc.mutation.Name(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: user.FieldName,
- })
+ _spec.SetField(user.FieldName, field.TypeString, value)
_node.Name = value
}
if value, ok := uc.mutation.Email(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: user.FieldEmail,
- })
+ _spec.SetField(user.FieldEmail, field.TypeString, value)
_node.Email = value
}
if value, ok := uc.mutation.Password(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: user.FieldPassword,
- })
+ _spec.SetField(user.FieldPassword, field.TypeString, value)
_node.Password = value
}
if value, ok := uc.mutation.IsSuperuser(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: user.FieldIsSuperuser,
- })
+ _spec.SetField(user.FieldIsSuperuser, field.TypeBool, value)
_node.IsSuperuser = value
}
- if value, ok := uc.mutation.Role(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeEnum,
- Value: value,
- Column: user.FieldRole,
- })
- _node.Role = value
- }
if value, ok := uc.mutation.Superuser(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: user.FieldSuperuser,
- })
+ _spec.SetField(user.FieldSuperuser, field.TypeBool, value)
_node.Superuser = value
}
+ if value, ok := uc.mutation.Role(); ok {
+ _spec.SetField(user.FieldRole, field.TypeEnum, value)
+ _node.Role = value
+ }
if value, ok := uc.mutation.ActivatedOn(); ok {
- _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: user.FieldActivatedOn,
- })
+ _spec.SetField(user.FieldActivatedOn, field.TypeTime, value)
_node.ActivatedOn = value
}
if nodes := uc.mutation.GroupIDs(); len(nodes) > 0 {
@@ -433,10 +370,7 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) {
Columns: []string{user.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -453,10 +387,23 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) {
Columns: []string{user.AuthTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: authtokens.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges = append(_spec.Edges, edge)
+ }
+ if nodes := uc.mutation.NotifiersIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: user.NotifiersTable,
+ Columns: []string{user.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -470,11 +417,15 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) {
// UserCreateBulk is the builder for creating many User entities in bulk.
type UserCreateBulk struct {
config
+ err error
builders []*UserCreate
}
// Save creates the User entities in the database.
func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) {
+ if ucb.err != nil {
+ return nil, ucb.err
+ }
specs := make([]*sqlgraph.CreateSpec, len(ucb.builders))
nodes := make([]*User, len(ucb.builders))
mutators := make([]Mutator, len(ucb.builders))
@@ -491,8 +442,8 @@ func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) {
return nil, err
}
builder.mutation = mutation
- nodes[i], specs[i] = builder.createSpec()
var err error
+ nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, ucb.builders[i+1].mutation)
} else {
diff --git a/backend/internal/data/ent/user_delete.go b/backend/internal/data/ent/user_delete.go
index 9013f6f..08fd3ef 100644
--- a/backend/internal/data/ent/user_delete.go
+++ b/backend/internal/data/ent/user_delete.go
@@ -4,7 +4,6 @@ package ent
import (
"context"
- "fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@@ -28,34 +27,7 @@ func (ud *UserDelete) Where(ps ...predicate.User) *UserDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (ud *UserDelete) Exec(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
- if len(ud.hooks) == 0 {
- affected, err = ud.sqlExec(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*UserMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- ud.mutation = mutation
- affected, err = ud.sqlExec(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(ud.hooks) - 1; i >= 0; i-- {
- if ud.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = ud.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, ud.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, ud.sqlExec, ud.mutation, ud.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@@ -68,15 +40,7 @@ func (ud *UserDelete) ExecX(ctx context.Context) int {
}
func (ud *UserDelete) sqlExec(ctx context.Context) (int, error) {
- _spec := &sqlgraph.DeleteSpec{
- Node: &sqlgraph.NodeSpec{
- Table: user.Table,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
- },
- }
+ _spec := sqlgraph.NewDeleteSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID))
if ps := ud.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -88,6 +52,7 @@ func (ud *UserDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
+ ud.mutation.done = true
return affected, err
}
@@ -96,6 +61,12 @@ type UserDeleteOne struct {
ud *UserDelete
}
+// Where appends a list predicates to the UserDelete builder.
+func (udo *UserDeleteOne) Where(ps ...predicate.User) *UserDeleteOne {
+ udo.ud.mutation.Where(ps...)
+ return udo
+}
+
// Exec executes the deletion query.
func (udo *UserDeleteOne) Exec(ctx context.Context) error {
n, err := udo.ud.Exec(ctx)
@@ -111,5 +82,7 @@ func (udo *UserDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (udo *UserDeleteOne) ExecX(ctx context.Context) {
- udo.ud.ExecX(ctx)
+ if err := udo.Exec(ctx); err != nil {
+ panic(err)
+ }
}
diff --git a/backend/internal/data/ent/user_query.go b/backend/internal/data/ent/user_query.go
index 2178bd3..7205e9b 100644
--- a/backend/internal/data/ent/user_query.go
+++ b/backend/internal/data/ent/user_query.go
@@ -14,6 +14,7 @@ import (
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
)
@@ -21,14 +22,13 @@ import (
// UserQuery is the builder for querying User entities.
type UserQuery struct {
config
- limit *int
- offset *int
- unique *bool
- order []OrderFunc
- fields []string
+ ctx *QueryContext
+ order []user.OrderOption
+ inters []Interceptor
predicates []predicate.User
withGroup *GroupQuery
withAuthTokens *AuthTokensQuery
+ withNotifiers *NotifierQuery
withFKs bool
// intermediate query (i.e. traversal path).
sql *sql.Selector
@@ -41,34 +41,34 @@ func (uq *UserQuery) Where(ps ...predicate.User) *UserQuery {
return uq
}
-// Limit adds a limit step to the query.
+// Limit the number of records to be returned by this query.
func (uq *UserQuery) Limit(limit int) *UserQuery {
- uq.limit = &limit
+ uq.ctx.Limit = &limit
return uq
}
-// Offset adds an offset step to the query.
+// Offset to start from.
func (uq *UserQuery) Offset(offset int) *UserQuery {
- uq.offset = &offset
+ uq.ctx.Offset = &offset
return uq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (uq *UserQuery) Unique(unique bool) *UserQuery {
- uq.unique = &unique
+ uq.ctx.Unique = &unique
return uq
}
-// Order adds an order step to the query.
-func (uq *UserQuery) Order(o ...OrderFunc) *UserQuery {
+// Order specifies how the records should be ordered.
+func (uq *UserQuery) Order(o ...user.OrderOption) *UserQuery {
uq.order = append(uq.order, o...)
return uq
}
// QueryGroup chains the current query on the "group" edge.
func (uq *UserQuery) QueryGroup() *GroupQuery {
- query := &GroupQuery{config: uq.config}
+ query := (&GroupClient{config: uq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := uq.prepareQuery(ctx); err != nil {
return nil, err
@@ -90,7 +90,7 @@ func (uq *UserQuery) QueryGroup() *GroupQuery {
// QueryAuthTokens chains the current query on the "auth_tokens" edge.
func (uq *UserQuery) QueryAuthTokens() *AuthTokensQuery {
- query := &AuthTokensQuery{config: uq.config}
+ query := (&AuthTokensClient{config: uq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := uq.prepareQuery(ctx); err != nil {
return nil, err
@@ -110,10 +110,32 @@ func (uq *UserQuery) QueryAuthTokens() *AuthTokensQuery {
return query
}
+// QueryNotifiers chains the current query on the "notifiers" edge.
+func (uq *UserQuery) QueryNotifiers() *NotifierQuery {
+ query := (&NotifierClient{config: uq.config}).Query()
+ query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+ if err := uq.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ selector := uq.sqlQuery(ctx)
+ if err := selector.Err(); err != nil {
+ return nil, err
+ }
+ step := sqlgraph.NewStep(
+ sqlgraph.From(user.Table, user.FieldID, selector),
+ sqlgraph.To(notifier.Table, notifier.FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, user.NotifiersTable, user.NotifiersColumn),
+ )
+ fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step)
+ return fromU, nil
+ }
+ return query
+}
+
// First returns the first User entity from the query.
// Returns a *NotFoundError when no User was found.
func (uq *UserQuery) First(ctx context.Context) (*User, error) {
- nodes, err := uq.Limit(1).All(ctx)
+ nodes, err := uq.Limit(1).All(setContextOp(ctx, uq.ctx, "First"))
if err != nil {
return nil, err
}
@@ -136,7 +158,7 @@ func (uq *UserQuery) FirstX(ctx context.Context) *User {
// Returns a *NotFoundError when no User ID was found.
func (uq *UserQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = uq.Limit(1).IDs(ctx); err != nil {
+ if ids, err = uq.Limit(1).IDs(setContextOp(ctx, uq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@@ -159,7 +181,7 @@ func (uq *UserQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one User entity is found.
// Returns a *NotFoundError when no User entities are found.
func (uq *UserQuery) Only(ctx context.Context) (*User, error) {
- nodes, err := uq.Limit(2).All(ctx)
+ nodes, err := uq.Limit(2).All(setContextOp(ctx, uq.ctx, "Only"))
if err != nil {
return nil, err
}
@@ -187,7 +209,7 @@ func (uq *UserQuery) OnlyX(ctx context.Context) *User {
// Returns a *NotFoundError when no entities are found.
func (uq *UserQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
- if ids, err = uq.Limit(2).IDs(ctx); err != nil {
+ if ids, err = uq.Limit(2).IDs(setContextOp(ctx, uq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@@ -212,10 +234,12 @@ func (uq *UserQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Users.
func (uq *UserQuery) All(ctx context.Context) ([]*User, error) {
+ ctx = setContextOp(ctx, uq.ctx, "All")
if err := uq.prepareQuery(ctx); err != nil {
return nil, err
}
- return uq.sqlAll(ctx)
+ qr := querierAll[[]*User, *UserQuery]()
+ return withInterceptors[[]*User](ctx, uq, qr, uq.inters)
}
// AllX is like All, but panics if an error occurs.
@@ -228,9 +252,12 @@ func (uq *UserQuery) AllX(ctx context.Context) []*User {
}
// IDs executes the query and returns a list of User IDs.
-func (uq *UserQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
- var ids []uuid.UUID
- if err := uq.Select(user.FieldID).Scan(ctx, &ids); err != nil {
+func (uq *UserQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
+ if uq.ctx.Unique == nil && uq.path != nil {
+ uq.Unique(true)
+ }
+ ctx = setContextOp(ctx, uq.ctx, "IDs")
+ if err = uq.Select(user.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@@ -247,10 +274,11 @@ func (uq *UserQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query.
func (uq *UserQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, uq.ctx, "Count")
if err := uq.prepareQuery(ctx); err != nil {
return 0, err
}
- return uq.sqlCount(ctx)
+ return withInterceptors[int](ctx, uq, querierCount[*UserQuery](), uq.inters)
}
// CountX is like Count, but panics if an error occurs.
@@ -264,10 +292,15 @@ func (uq *UserQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (uq *UserQuery) Exist(ctx context.Context) (bool, error) {
- if err := uq.prepareQuery(ctx); err != nil {
- return false, err
+ ctx = setContextOp(ctx, uq.ctx, "Exist")
+ switch _, err := uq.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
}
- return uq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@@ -287,23 +320,23 @@ func (uq *UserQuery) Clone() *UserQuery {
}
return &UserQuery{
config: uq.config,
- limit: uq.limit,
- offset: uq.offset,
- order: append([]OrderFunc{}, uq.order...),
+ ctx: uq.ctx.Clone(),
+ order: append([]user.OrderOption{}, uq.order...),
+ inters: append([]Interceptor{}, uq.inters...),
predicates: append([]predicate.User{}, uq.predicates...),
withGroup: uq.withGroup.Clone(),
withAuthTokens: uq.withAuthTokens.Clone(),
+ withNotifiers: uq.withNotifiers.Clone(),
// clone intermediate query.
- sql: uq.sql.Clone(),
- path: uq.path,
- unique: uq.unique,
+ sql: uq.sql.Clone(),
+ path: uq.path,
}
}
// WithGroup tells the query-builder to eager-load the nodes that are connected to
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
func (uq *UserQuery) WithGroup(opts ...func(*GroupQuery)) *UserQuery {
- query := &GroupQuery{config: uq.config}
+ query := (&GroupClient{config: uq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -314,7 +347,7 @@ func (uq *UserQuery) WithGroup(opts ...func(*GroupQuery)) *UserQuery {
// WithAuthTokens tells the query-builder to eager-load the nodes that are connected to
// the "auth_tokens" edge. The optional arguments are used to configure the query builder of the edge.
func (uq *UserQuery) WithAuthTokens(opts ...func(*AuthTokensQuery)) *UserQuery {
- query := &AuthTokensQuery{config: uq.config}
+ query := (&AuthTokensClient{config: uq.config}).Query()
for _, opt := range opts {
opt(query)
}
@@ -322,6 +355,17 @@ func (uq *UserQuery) WithAuthTokens(opts ...func(*AuthTokensQuery)) *UserQuery {
return uq
}
+// WithNotifiers tells the query-builder to eager-load the nodes that are connected to
+// the "notifiers" edge. The optional arguments are used to configure the query builder of the edge.
+func (uq *UserQuery) WithNotifiers(opts ...func(*NotifierQuery)) *UserQuery {
+ query := (&NotifierClient{config: uq.config}).Query()
+ for _, opt := range opts {
+ opt(query)
+ }
+ uq.withNotifiers = query
+ return uq
+}
+
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
@@ -337,16 +381,11 @@ func (uq *UserQuery) WithAuthTokens(opts ...func(*AuthTokensQuery)) *UserQuery {
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy {
- grbuild := &UserGroupBy{config: uq.config}
- grbuild.fields = append([]string{field}, fields...)
- grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
- if err := uq.prepareQuery(ctx); err != nil {
- return nil, err
- }
- return uq.sqlQuery(ctx), nil
- }
+ uq.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &UserGroupBy{build: uq}
+ grbuild.flds = &uq.ctx.Fields
grbuild.label = user.Label
- grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
+ grbuild.scan = grbuild.Scan
return grbuild
}
@@ -363,15 +402,30 @@ func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy {
// Select(user.FieldCreatedAt).
// Scan(ctx, &v)
func (uq *UserQuery) Select(fields ...string) *UserSelect {
- uq.fields = append(uq.fields, fields...)
- selbuild := &UserSelect{UserQuery: uq}
- selbuild.label = user.Label
- selbuild.flds, selbuild.scan = &uq.fields, selbuild.Scan
- return selbuild
+ uq.ctx.Fields = append(uq.ctx.Fields, fields...)
+ sbuild := &UserSelect{UserQuery: uq}
+ sbuild.label = user.Label
+ sbuild.flds, sbuild.scan = &uq.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a UserSelect configured with the given aggregations.
+func (uq *UserQuery) Aggregate(fns ...AggregateFunc) *UserSelect {
+ return uq.Select().Aggregate(fns...)
}
func (uq *UserQuery) prepareQuery(ctx context.Context) error {
- for _, f := range uq.fields {
+ for _, inter := range uq.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, uq); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range uq.ctx.Fields {
if !user.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@@ -391,9 +445,10 @@ func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e
nodes = []*User{}
withFKs = uq.withFKs
_spec = uq.querySpec()
- loadedTypes = [2]bool{
+ loadedTypes = [3]bool{
uq.withGroup != nil,
uq.withAuthTokens != nil,
+ uq.withNotifiers != nil,
}
)
if uq.withGroup != nil {
@@ -433,6 +488,13 @@ func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e
return nil, err
}
}
+ if query := uq.withNotifiers; query != nil {
+ if err := uq.loadNotifiers(ctx, query, nodes,
+ func(n *User) { n.Edges.Notifiers = []*Notifier{} },
+ func(n *User, e *Notifier) { n.Edges.Notifiers = append(n.Edges.Notifiers, e) }); err != nil {
+ return nil, err
+ }
+ }
return nodes, nil
}
@@ -449,6 +511,9 @@ func (uq *UserQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
+ if len(ids) == 0 {
+ return nil
+ }
query.Where(group.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@@ -477,7 +542,7 @@ func (uq *UserQuery) loadAuthTokens(ctx context.Context, query *AuthTokensQuery,
}
query.withFKs = true
query.Where(predicate.AuthTokens(func(s *sql.Selector) {
- s.Where(sql.InValues(user.AuthTokensColumn, fks...))
+ s.Where(sql.InValues(s.C(user.AuthTokensColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -490,7 +555,37 @@ func (uq *UserQuery) loadAuthTokens(ctx context.Context, query *AuthTokensQuery,
}
node, ok := nodeids[*fk]
if !ok {
- return fmt.Errorf(`unexpected foreign-key "user_auth_tokens" returned %v for node %v`, *fk, n.ID)
+ return fmt.Errorf(`unexpected referenced foreign-key "user_auth_tokens" returned %v for node %v`, *fk, n.ID)
+ }
+ assign(node, n)
+ }
+ return nil
+}
+func (uq *UserQuery) loadNotifiers(ctx context.Context, query *NotifierQuery, nodes []*User, init func(*User), assign func(*User, *Notifier)) error {
+ fks := make([]driver.Value, 0, len(nodes))
+ nodeids := make(map[uuid.UUID]*User)
+ for i := range nodes {
+ fks = append(fks, nodes[i].ID)
+ nodeids[nodes[i].ID] = nodes[i]
+ if init != nil {
+ init(nodes[i])
+ }
+ }
+ if len(query.ctx.Fields) > 0 {
+ query.ctx.AppendFieldOnce(notifier.FieldUserID)
+ }
+ query.Where(predicate.Notifier(func(s *sql.Selector) {
+ s.Where(sql.InValues(s.C(user.NotifiersColumn), fks...))
+ }))
+ neighbors, err := query.All(ctx)
+ if err != nil {
+ return err
+ }
+ for _, n := range neighbors {
+ fk := n.UserID
+ node, ok := nodeids[fk]
+ if !ok {
+ return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}
@@ -499,41 +594,22 @@ func (uq *UserQuery) loadAuthTokens(ctx context.Context, query *AuthTokensQuery,
func (uq *UserQuery) sqlCount(ctx context.Context) (int, error) {
_spec := uq.querySpec()
- _spec.Node.Columns = uq.fields
- if len(uq.fields) > 0 {
- _spec.Unique = uq.unique != nil && *uq.unique
+ _spec.Node.Columns = uq.ctx.Fields
+ if len(uq.ctx.Fields) > 0 {
+ _spec.Unique = uq.ctx.Unique != nil && *uq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, uq.driver, _spec)
}
-func (uq *UserQuery) sqlExist(ctx context.Context) (bool, error) {
- switch _, err := uq.FirstID(ctx); {
- case IsNotFound(err):
- return false, nil
- case err != nil:
- return false, fmt.Errorf("ent: check existence: %w", err)
- default:
- return true, nil
- }
-}
-
func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec {
- _spec := &sqlgraph.QuerySpec{
- Node: &sqlgraph.NodeSpec{
- Table: user.Table,
- Columns: user.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
- },
- From: uq.sql,
- Unique: true,
- }
- if unique := uq.unique; unique != nil {
+ _spec := sqlgraph.NewQuerySpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID))
+ _spec.From = uq.sql
+ if unique := uq.ctx.Unique; unique != nil {
_spec.Unique = *unique
+ } else if uq.path != nil {
+ _spec.Unique = true
}
- if fields := uq.fields; len(fields) > 0 {
+ if fields := uq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, user.FieldID)
for i := range fields {
@@ -549,10 +625,10 @@ func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
- if limit := uq.limit; limit != nil {
+ if limit := uq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
- if offset := uq.offset; offset != nil {
+ if offset := uq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := uq.order; len(ps) > 0 {
@@ -568,7 +644,7 @@ func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec {
func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(uq.driver.Dialect())
t1 := builder.Table(user.Table)
- columns := uq.fields
+ columns := uq.ctx.Fields
if len(columns) == 0 {
columns = user.Columns
}
@@ -577,7 +653,7 @@ func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = uq.sql
selector.Select(selector.Columns(columns...)...)
}
- if uq.unique != nil && *uq.unique {
+ if uq.ctx.Unique != nil && *uq.ctx.Unique {
selector.Distinct()
}
for _, p := range uq.predicates {
@@ -586,12 +662,12 @@ func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range uq.order {
p(selector)
}
- if offset := uq.offset; offset != nil {
+ if offset := uq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
- if limit := uq.limit; limit != nil {
+ if limit := uq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@@ -599,13 +675,8 @@ func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector {
// UserGroupBy is the group-by builder for User entities.
type UserGroupBy struct {
- config
selector
- fields []string
- fns []AggregateFunc
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
- path func(context.Context) (*sql.Selector, error)
+ build *UserQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@@ -614,74 +685,77 @@ func (ugb *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy {
return ugb
}
-// Scan applies the group-by query and scans the result into the given value.
+// Scan applies the selector query and scans the result into the given value.
func (ugb *UserGroupBy) Scan(ctx context.Context, v any) error {
- query, err := ugb.path(ctx)
- if err != nil {
+ ctx = setContextOp(ctx, ugb.build.ctx, "GroupBy")
+ if err := ugb.build.prepareQuery(ctx); err != nil {
return err
}
- ugb.sql = query
- return ugb.sqlScan(ctx, v)
+ return scanWithInterceptors[*UserQuery, *UserGroupBy](ctx, ugb.build, ugb, ugb.build.inters, v)
}
-func (ugb *UserGroupBy) sqlScan(ctx context.Context, v any) error {
- for _, f := range ugb.fields {
- if !user.ValidColumn(f) {
- return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
- }
- }
- selector := ugb.sqlQuery()
- if err := selector.Err(); err != nil {
- return err
- }
- rows := &sql.Rows{}
- query, args := selector.Query()
- if err := ugb.driver.Query(ctx, query, args, rows); err != nil {
- return err
- }
- defer rows.Close()
- return sql.ScanSlice(rows, v)
-}
-
-func (ugb *UserGroupBy) sqlQuery() *sql.Selector {
- selector := ugb.sql.Select()
+func (ugb *UserGroupBy) sqlScan(ctx context.Context, root *UserQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(ugb.fns))
for _, fn := range ugb.fns {
aggregation = append(aggregation, fn(selector))
}
- // If no columns were selected in a custom aggregation function, the default
- // selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
- columns := make([]string, 0, len(ugb.fields)+len(ugb.fns))
- for _, f := range ugb.fields {
+ columns := make([]string, 0, len(*ugb.flds)+len(ugb.fns))
+ for _, f := range *ugb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
- return selector.GroupBy(selector.Columns(ugb.fields...)...)
+ selector.GroupBy(selector.Columns(*ugb.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := ugb.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
}
// UserSelect is the builder for selecting fields of User entities.
type UserSelect struct {
*UserQuery
selector
- // intermediate query (i.e. traversal path).
- sql *sql.Selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (us *UserSelect) Aggregate(fns ...AggregateFunc) *UserSelect {
+ us.fns = append(us.fns, fns...)
+ return us
}
// Scan applies the selector query and scans the result into the given value.
func (us *UserSelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, us.ctx, "Select")
if err := us.prepareQuery(ctx); err != nil {
return err
}
- us.sql = us.UserQuery.sqlQuery(ctx)
- return us.sqlScan(ctx, v)
+ return scanWithInterceptors[*UserQuery, *UserSelect](ctx, us.UserQuery, us, us.inters, v)
}
-func (us *UserSelect) sqlScan(ctx context.Context, v any) error {
+func (us *UserSelect) sqlScan(ctx context.Context, root *UserQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(us.fns))
+ for _, fn := range us.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*us.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
rows := &sql.Rows{}
- query, args := us.sql.Query()
+ query, args := selector.Query()
if err := us.driver.Query(ctx, query, args, rows); err != nil {
return err
}
diff --git a/backend/internal/data/ent/user_update.go b/backend/internal/data/ent/user_update.go
index bfe8d3b..0e4c01a 100644
--- a/backend/internal/data/ent/user_update.go
+++ b/backend/internal/data/ent/user_update.go
@@ -14,6 +14,7 @@ import (
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
)
@@ -43,18 +44,42 @@ func (uu *UserUpdate) SetName(s string) *UserUpdate {
return uu
}
+// SetNillableName sets the "name" field if the given value is not nil.
+func (uu *UserUpdate) SetNillableName(s *string) *UserUpdate {
+ if s != nil {
+ uu.SetName(*s)
+ }
+ return uu
+}
+
// SetEmail sets the "email" field.
func (uu *UserUpdate) SetEmail(s string) *UserUpdate {
uu.mutation.SetEmail(s)
return uu
}
+// SetNillableEmail sets the "email" field if the given value is not nil.
+func (uu *UserUpdate) SetNillableEmail(s *string) *UserUpdate {
+ if s != nil {
+ uu.SetEmail(*s)
+ }
+ return uu
+}
+
// SetPassword sets the "password" field.
func (uu *UserUpdate) SetPassword(s string) *UserUpdate {
uu.mutation.SetPassword(s)
return uu
}
+// SetNillablePassword sets the "password" field if the given value is not nil.
+func (uu *UserUpdate) SetNillablePassword(s *string) *UserUpdate {
+ if s != nil {
+ uu.SetPassword(*s)
+ }
+ return uu
+}
+
// SetIsSuperuser sets the "is_superuser" field.
func (uu *UserUpdate) SetIsSuperuser(b bool) *UserUpdate {
uu.mutation.SetIsSuperuser(b)
@@ -69,20 +94,6 @@ func (uu *UserUpdate) SetNillableIsSuperuser(b *bool) *UserUpdate {
return uu
}
-// SetRole sets the "role" field.
-func (uu *UserUpdate) SetRole(u user.Role) *UserUpdate {
- uu.mutation.SetRole(u)
- return uu
-}
-
-// SetNillableRole sets the "role" field if the given value is not nil.
-func (uu *UserUpdate) SetNillableRole(u *user.Role) *UserUpdate {
- if u != nil {
- uu.SetRole(*u)
- }
- return uu
-}
-
// SetSuperuser sets the "superuser" field.
func (uu *UserUpdate) SetSuperuser(b bool) *UserUpdate {
uu.mutation.SetSuperuser(b)
@@ -97,6 +108,20 @@ func (uu *UserUpdate) SetNillableSuperuser(b *bool) *UserUpdate {
return uu
}
+// SetRole sets the "role" field.
+func (uu *UserUpdate) SetRole(u user.Role) *UserUpdate {
+ uu.mutation.SetRole(u)
+ return uu
+}
+
+// SetNillableRole sets the "role" field if the given value is not nil.
+func (uu *UserUpdate) SetNillableRole(u *user.Role) *UserUpdate {
+ if u != nil {
+ uu.SetRole(*u)
+ }
+ return uu
+}
+
// SetActivatedOn sets the "activated_on" field.
func (uu *UserUpdate) SetActivatedOn(t time.Time) *UserUpdate {
uu.mutation.SetActivatedOn(t)
@@ -143,6 +168,21 @@ func (uu *UserUpdate) AddAuthTokens(a ...*AuthTokens) *UserUpdate {
return uu.AddAuthTokenIDs(ids...)
}
+// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs.
+func (uu *UserUpdate) AddNotifierIDs(ids ...uuid.UUID) *UserUpdate {
+ uu.mutation.AddNotifierIDs(ids...)
+ return uu
+}
+
+// AddNotifiers adds the "notifiers" edges to the Notifier entity.
+func (uu *UserUpdate) AddNotifiers(n ...*Notifier) *UserUpdate {
+ ids := make([]uuid.UUID, len(n))
+ for i := range n {
+ ids[i] = n[i].ID
+ }
+ return uu.AddNotifierIDs(ids...)
+}
+
// Mutation returns the UserMutation object of the builder.
func (uu *UserUpdate) Mutation() *UserMutation {
return uu.mutation
@@ -175,43 +215,31 @@ func (uu *UserUpdate) RemoveAuthTokens(a ...*AuthTokens) *UserUpdate {
return uu.RemoveAuthTokenIDs(ids...)
}
+// ClearNotifiers clears all "notifiers" edges to the Notifier entity.
+func (uu *UserUpdate) ClearNotifiers() *UserUpdate {
+ uu.mutation.ClearNotifiers()
+ return uu
+}
+
+// RemoveNotifierIDs removes the "notifiers" edge to Notifier entities by IDs.
+func (uu *UserUpdate) RemoveNotifierIDs(ids ...uuid.UUID) *UserUpdate {
+ uu.mutation.RemoveNotifierIDs(ids...)
+ return uu
+}
+
+// RemoveNotifiers removes "notifiers" edges to Notifier entities.
+func (uu *UserUpdate) RemoveNotifiers(n ...*Notifier) *UserUpdate {
+ ids := make([]uuid.UUID, len(n))
+ for i := range n {
+ ids[i] = n[i].ID
+ }
+ return uu.RemoveNotifierIDs(ids...)
+}
+
// Save executes the query and returns the number of nodes affected by the update operation.
func (uu *UserUpdate) Save(ctx context.Context) (int, error) {
- var (
- err error
- affected int
- )
uu.defaults()
- if len(uu.hooks) == 0 {
- if err = uu.check(); err != nil {
- return 0, err
- }
- affected, err = uu.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*UserMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = uu.check(); err != nil {
- return 0, err
- }
- uu.mutation = mutation
- affected, err = uu.sqlSave(ctx)
- mutation.done = true
- return affected, err
- })
- for i := len(uu.hooks) - 1; i >= 0; i-- {
- if uu.hooks[i] == nil {
- return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = uu.hooks[i](mut)
- }
- if _, err := mut.Mutate(ctx, uu.mutation); err != nil {
- return 0, err
- }
- }
- return affected, err
+ return withHooks(ctx, uu.sqlSave, uu.mutation, uu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -273,16 +301,10 @@ func (uu *UserUpdate) check() error {
}
func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: user.Table,
- Columns: user.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
- },
+ if err := uu.check(); err != nil {
+ return n, err
}
+ _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID))
if ps := uu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@@ -291,66 +313,31 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := uu.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: user.FieldUpdatedAt,
- })
+ _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := uu.mutation.Name(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: user.FieldName,
- })
+ _spec.SetField(user.FieldName, field.TypeString, value)
}
if value, ok := uu.mutation.Email(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: user.FieldEmail,
- })
+ _spec.SetField(user.FieldEmail, field.TypeString, value)
}
if value, ok := uu.mutation.Password(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: user.FieldPassword,
- })
+ _spec.SetField(user.FieldPassword, field.TypeString, value)
}
if value, ok := uu.mutation.IsSuperuser(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: user.FieldIsSuperuser,
- })
- }
- if value, ok := uu.mutation.Role(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeEnum,
- Value: value,
- Column: user.FieldRole,
- })
+ _spec.SetField(user.FieldIsSuperuser, field.TypeBool, value)
}
if value, ok := uu.mutation.Superuser(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: user.FieldSuperuser,
- })
+ _spec.SetField(user.FieldSuperuser, field.TypeBool, value)
+ }
+ if value, ok := uu.mutation.Role(); ok {
+ _spec.SetField(user.FieldRole, field.TypeEnum, value)
}
if value, ok := uu.mutation.ActivatedOn(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: user.FieldActivatedOn,
- })
+ _spec.SetField(user.FieldActivatedOn, field.TypeTime, value)
}
if uu.mutation.ActivatedOnCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Column: user.FieldActivatedOn,
- })
+ _spec.ClearField(user.FieldActivatedOn, field.TypeTime)
}
if uu.mutation.GroupCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -360,10 +347,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{user.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -376,10 +360,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{user.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -395,10 +376,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{user.AuthTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: authtokens.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -411,10 +389,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{user.AuthTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: authtokens.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -430,10 +405,52 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{user.AuthTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: authtokens.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if uu.mutation.NotifiersCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: user.NotifiersTable,
+ Columns: []string{user.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := uu.mutation.RemovedNotifiersIDs(); len(nodes) > 0 && !uu.mutation.NotifiersCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: user.NotifiersTable,
+ Columns: []string{user.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := uu.mutation.NotifiersIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: user.NotifiersTable,
+ Columns: []string{user.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -449,6 +466,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
+ uu.mutation.done = true
return n, nil
}
@@ -472,18 +490,42 @@ func (uuo *UserUpdateOne) SetName(s string) *UserUpdateOne {
return uuo
}
+// SetNillableName sets the "name" field if the given value is not nil.
+func (uuo *UserUpdateOne) SetNillableName(s *string) *UserUpdateOne {
+ if s != nil {
+ uuo.SetName(*s)
+ }
+ return uuo
+}
+
// SetEmail sets the "email" field.
func (uuo *UserUpdateOne) SetEmail(s string) *UserUpdateOne {
uuo.mutation.SetEmail(s)
return uuo
}
+// SetNillableEmail sets the "email" field if the given value is not nil.
+func (uuo *UserUpdateOne) SetNillableEmail(s *string) *UserUpdateOne {
+ if s != nil {
+ uuo.SetEmail(*s)
+ }
+ return uuo
+}
+
// SetPassword sets the "password" field.
func (uuo *UserUpdateOne) SetPassword(s string) *UserUpdateOne {
uuo.mutation.SetPassword(s)
return uuo
}
+// SetNillablePassword sets the "password" field if the given value is not nil.
+func (uuo *UserUpdateOne) SetNillablePassword(s *string) *UserUpdateOne {
+ if s != nil {
+ uuo.SetPassword(*s)
+ }
+ return uuo
+}
+
// SetIsSuperuser sets the "is_superuser" field.
func (uuo *UserUpdateOne) SetIsSuperuser(b bool) *UserUpdateOne {
uuo.mutation.SetIsSuperuser(b)
@@ -498,20 +540,6 @@ func (uuo *UserUpdateOne) SetNillableIsSuperuser(b *bool) *UserUpdateOne {
return uuo
}
-// SetRole sets the "role" field.
-func (uuo *UserUpdateOne) SetRole(u user.Role) *UserUpdateOne {
- uuo.mutation.SetRole(u)
- return uuo
-}
-
-// SetNillableRole sets the "role" field if the given value is not nil.
-func (uuo *UserUpdateOne) SetNillableRole(u *user.Role) *UserUpdateOne {
- if u != nil {
- uuo.SetRole(*u)
- }
- return uuo
-}
-
// SetSuperuser sets the "superuser" field.
func (uuo *UserUpdateOne) SetSuperuser(b bool) *UserUpdateOne {
uuo.mutation.SetSuperuser(b)
@@ -526,6 +554,20 @@ func (uuo *UserUpdateOne) SetNillableSuperuser(b *bool) *UserUpdateOne {
return uuo
}
+// SetRole sets the "role" field.
+func (uuo *UserUpdateOne) SetRole(u user.Role) *UserUpdateOne {
+ uuo.mutation.SetRole(u)
+ return uuo
+}
+
+// SetNillableRole sets the "role" field if the given value is not nil.
+func (uuo *UserUpdateOne) SetNillableRole(u *user.Role) *UserUpdateOne {
+ if u != nil {
+ uuo.SetRole(*u)
+ }
+ return uuo
+}
+
// SetActivatedOn sets the "activated_on" field.
func (uuo *UserUpdateOne) SetActivatedOn(t time.Time) *UserUpdateOne {
uuo.mutation.SetActivatedOn(t)
@@ -572,6 +614,21 @@ func (uuo *UserUpdateOne) AddAuthTokens(a ...*AuthTokens) *UserUpdateOne {
return uuo.AddAuthTokenIDs(ids...)
}
+// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs.
+func (uuo *UserUpdateOne) AddNotifierIDs(ids ...uuid.UUID) *UserUpdateOne {
+ uuo.mutation.AddNotifierIDs(ids...)
+ return uuo
+}
+
+// AddNotifiers adds the "notifiers" edges to the Notifier entity.
+func (uuo *UserUpdateOne) AddNotifiers(n ...*Notifier) *UserUpdateOne {
+ ids := make([]uuid.UUID, len(n))
+ for i := range n {
+ ids[i] = n[i].ID
+ }
+ return uuo.AddNotifierIDs(ids...)
+}
+
// Mutation returns the UserMutation object of the builder.
func (uuo *UserUpdateOne) Mutation() *UserMutation {
return uuo.mutation
@@ -604,6 +661,33 @@ func (uuo *UserUpdateOne) RemoveAuthTokens(a ...*AuthTokens) *UserUpdateOne {
return uuo.RemoveAuthTokenIDs(ids...)
}
+// ClearNotifiers clears all "notifiers" edges to the Notifier entity.
+func (uuo *UserUpdateOne) ClearNotifiers() *UserUpdateOne {
+ uuo.mutation.ClearNotifiers()
+ return uuo
+}
+
+// RemoveNotifierIDs removes the "notifiers" edge to Notifier entities by IDs.
+func (uuo *UserUpdateOne) RemoveNotifierIDs(ids ...uuid.UUID) *UserUpdateOne {
+ uuo.mutation.RemoveNotifierIDs(ids...)
+ return uuo
+}
+
+// RemoveNotifiers removes "notifiers" edges to Notifier entities.
+func (uuo *UserUpdateOne) RemoveNotifiers(n ...*Notifier) *UserUpdateOne {
+ ids := make([]uuid.UUID, len(n))
+ for i := range n {
+ ids[i] = n[i].ID
+ }
+ return uuo.RemoveNotifierIDs(ids...)
+}
+
+// Where appends a list predicates to the UserUpdate builder.
+func (uuo *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne {
+ uuo.mutation.Where(ps...)
+ return uuo
+}
+
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (uuo *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne {
@@ -613,47 +697,8 @@ func (uuo *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne
// Save executes the query and returns the updated User entity.
func (uuo *UserUpdateOne) Save(ctx context.Context) (*User, error) {
- var (
- err error
- node *User
- )
uuo.defaults()
- if len(uuo.hooks) == 0 {
- if err = uuo.check(); err != nil {
- return nil, err
- }
- node, err = uuo.sqlSave(ctx)
- } else {
- var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
- mutation, ok := m.(*UserMutation)
- if !ok {
- return nil, fmt.Errorf("unexpected mutation type %T", m)
- }
- if err = uuo.check(); err != nil {
- return nil, err
- }
- uuo.mutation = mutation
- node, err = uuo.sqlSave(ctx)
- mutation.done = true
- return node, err
- })
- for i := len(uuo.hooks) - 1; i >= 0; i-- {
- if uuo.hooks[i] == nil {
- return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
- }
- mut = uuo.hooks[i](mut)
- }
- v, err := mut.Mutate(ctx, uuo.mutation)
- if err != nil {
- return nil, err
- }
- nv, ok := v.(*User)
- if !ok {
- return nil, fmt.Errorf("unexpected node type %T returned from UserMutation", v)
- }
- node = nv
- }
- return node, err
+ return withHooks(ctx, uuo.sqlSave, uuo.mutation, uuo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -715,16 +760,10 @@ func (uuo *UserUpdateOne) check() error {
}
func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) {
- _spec := &sqlgraph.UpdateSpec{
- Node: &sqlgraph.NodeSpec{
- Table: user.Table,
- Columns: user.Columns,
- ID: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: user.FieldID,
- },
- },
+ if err := uuo.check(); err != nil {
+ return _node, err
}
+ _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID))
id, ok := uuo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "User.id" for update`)}
@@ -750,66 +789,31 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error)
}
}
if value, ok := uuo.mutation.UpdatedAt(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: user.FieldUpdatedAt,
- })
+ _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := uuo.mutation.Name(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: user.FieldName,
- })
+ _spec.SetField(user.FieldName, field.TypeString, value)
}
if value, ok := uuo.mutation.Email(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: user.FieldEmail,
- })
+ _spec.SetField(user.FieldEmail, field.TypeString, value)
}
if value, ok := uuo.mutation.Password(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeString,
- Value: value,
- Column: user.FieldPassword,
- })
+ _spec.SetField(user.FieldPassword, field.TypeString, value)
}
if value, ok := uuo.mutation.IsSuperuser(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: user.FieldIsSuperuser,
- })
- }
- if value, ok := uuo.mutation.Role(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeEnum,
- Value: value,
- Column: user.FieldRole,
- })
+ _spec.SetField(user.FieldIsSuperuser, field.TypeBool, value)
}
if value, ok := uuo.mutation.Superuser(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeBool,
- Value: value,
- Column: user.FieldSuperuser,
- })
+ _spec.SetField(user.FieldSuperuser, field.TypeBool, value)
+ }
+ if value, ok := uuo.mutation.Role(); ok {
+ _spec.SetField(user.FieldRole, field.TypeEnum, value)
}
if value, ok := uuo.mutation.ActivatedOn(); ok {
- _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Value: value,
- Column: user.FieldActivatedOn,
- })
+ _spec.SetField(user.FieldActivatedOn, field.TypeTime, value)
}
if uuo.mutation.ActivatedOnCleared() {
- _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
- Type: field.TypeTime,
- Column: user.FieldActivatedOn,
- })
+ _spec.ClearField(user.FieldActivatedOn, field.TypeTime)
}
if uuo.mutation.GroupCleared() {
edge := &sqlgraph.EdgeSpec{
@@ -819,10 +823,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error)
Columns: []string{user.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -835,10 +836,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error)
Columns: []string{user.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: group.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -854,10 +852,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error)
Columns: []string{user.AuthTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: authtokens.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -870,10 +865,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error)
Columns: []string{user.AuthTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: authtokens.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -889,10 +881,52 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error)
Columns: []string{user.AuthTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
- IDSpec: &sqlgraph.FieldSpec{
- Type: field.TypeUUID,
- Column: authtokens.FieldID,
- },
+ IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if uuo.mutation.NotifiersCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: user.NotifiersTable,
+ Columns: []string{user.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := uuo.mutation.RemovedNotifiersIDs(); len(nodes) > 0 && !uuo.mutation.NotifiersCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: user.NotifiersTable,
+ Columns: []string{user.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := uuo.mutation.NotifiersIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: user.NotifiersTable,
+ Columns: []string{user.NotifiersColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -911,5 +945,6 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error)
}
return nil, err
}
+ uuo.mutation.done = true
return _node, nil
}
diff --git a/backend/internal/data/migrations/migrations.go b/backend/internal/data/migrations/migrations.go
index 83354aa..a2afdc8 100644
--- a/backend/internal/data/migrations/migrations.go
+++ b/backend/internal/data/migrations/migrations.go
@@ -1,25 +1,25 @@
+// Package migrations provides a way to embed the migrations into the binary.
package migrations
import (
"embed"
"os"
- "path/filepath"
+ "path"
)
-// go:embed all:migrations
+//go:embed all:migrations
var Files embed.FS
// Write writes the embedded migrations to a temporary directory.
// It returns an error and a cleanup function. The cleanup function
// should be called when the migrations are no longer needed.
func Write(temp string) error {
- err := os.MkdirAll(temp, 0755)
-
+ err := os.MkdirAll(temp, 0o755)
if err != nil {
return err
}
- fsDir, err := Files.ReadDir(".")
+ fsDir, err := Files.ReadDir("migrations")
if err != nil {
return err
}
@@ -29,12 +29,12 @@ func Write(temp string) error {
continue
}
- b, err := Files.ReadFile(filepath.Join("migrations", f.Name()))
+ b, err := Files.ReadFile(path.Join("migrations", f.Name()))
if err != nil {
return err
}
- err = os.WriteFile(filepath.Join(temp, f.Name()), b, 0644)
+ err = os.WriteFile(path.Join(temp, f.Name()), b, 0o644)
if err != nil {
return err
}
diff --git a/backend/internal/data/migrations/migrations/20221113012312_add_asset_id_field.sql b/backend/internal/data/migrations/migrations/20221113012312_add_asset_id_field.sql
new file mode 100644
index 0000000..5bcf3ad
--- /dev/null
+++ b/backend/internal/data/migrations/migrations/20221113012312_add_asset_id_field.sql
@@ -0,0 +1,24 @@
+-- disable the enforcement of foreign-keys constraints
+PRAGMA foreign_keys = off;
+-- create "new_items" table
+CREATE TABLE `new_items` (`id` uuid NOT NULL, `created_at` datetime NOT NULL, `updated_at` datetime NOT NULL, `name` text NOT NULL, `description` text NULL, `import_ref` text NULL, `notes` text NULL, `quantity` integer NOT NULL DEFAULT 1, `insured` bool NOT NULL DEFAULT false, `archived` bool NOT NULL DEFAULT false, `asset_id` integer NOT NULL DEFAULT 0, `serial_number` text NULL, `model_number` text NULL, `manufacturer` text NULL, `lifetime_warranty` bool NOT NULL DEFAULT false, `warranty_expires` datetime NULL, `warranty_details` text NULL, `purchase_time` datetime NULL, `purchase_from` text NULL, `purchase_price` real NOT NULL DEFAULT 0, `sold_time` datetime NULL, `sold_to` text NULL, `sold_price` real NOT NULL DEFAULT 0, `sold_notes` text NULL, `group_items` uuid NOT NULL, `item_children` uuid NULL, `location_items` uuid NULL, PRIMARY KEY (`id`), CONSTRAINT `items_groups_items` FOREIGN KEY (`group_items`) REFERENCES `groups` (`id`) ON DELETE CASCADE, CONSTRAINT `items_items_children` FOREIGN KEY (`item_children`) REFERENCES `items` (`id`) ON DELETE SET NULL, CONSTRAINT `items_locations_items` FOREIGN KEY (`location_items`) REFERENCES `locations` (`id`) ON DELETE CASCADE);
+-- copy rows from old table "items" to new temporary table "new_items"
+INSERT INTO `new_items` (`id`, `created_at`, `updated_at`, `name`, `description`, `import_ref`, `notes`, `quantity`, `insured`, `archived`, `serial_number`, `model_number`, `manufacturer`, `lifetime_warranty`, `warranty_expires`, `warranty_details`, `purchase_time`, `purchase_from`, `purchase_price`, `sold_time`, `sold_to`, `sold_price`, `sold_notes`, `group_items`, `item_children`, `location_items`) SELECT `id`, `created_at`, `updated_at`, `name`, `description`, `import_ref`, `notes`, `quantity`, `insured`, `archived`, `serial_number`, `model_number`, `manufacturer`, `lifetime_warranty`, `warranty_expires`, `warranty_details`, `purchase_time`, `purchase_from`, `purchase_price`, `sold_time`, `sold_to`, `sold_price`, `sold_notes`, `group_items`, `item_children`, `location_items` FROM `items`;
+-- drop "items" table after copying rows
+DROP TABLE `items`;
+-- rename temporary table "new_items" to "items"
+ALTER TABLE `new_items` RENAME TO `items`;
+-- create index "item_name" to table: "items"
+CREATE INDEX `item_name` ON `items` (`name`);
+-- create index "item_manufacturer" to table: "items"
+CREATE INDEX `item_manufacturer` ON `items` (`manufacturer`);
+-- create index "item_model_number" to table: "items"
+CREATE INDEX `item_model_number` ON `items` (`model_number`);
+-- create index "item_serial_number" to table: "items"
+CREATE INDEX `item_serial_number` ON `items` (`serial_number`);
+-- create index "item_archived" to table: "items"
+CREATE INDEX `item_archived` ON `items` (`archived`);
+-- create index "item_asset_id" to table: "items"
+CREATE INDEX `item_asset_id` ON `items` (`asset_id`);
+-- enable back the enforcement of foreign-keys constraints
+PRAGMA foreign_keys = on;
diff --git a/backend/internal/data/migrations/migrations/20221203053132_add_token_roles.sql b/backend/internal/data/migrations/migrations/20221203053132_add_token_roles.sql
new file mode 100644
index 0000000..6f6d00a
--- /dev/null
+++ b/backend/internal/data/migrations/migrations/20221203053132_add_token_roles.sql
@@ -0,0 +1,4 @@
+-- create "auth_roles" table
+CREATE TABLE `auth_roles` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `role` text NOT NULL DEFAULT 'user', `auth_tokens_roles` uuid NULL, CONSTRAINT `auth_roles_auth_tokens_roles` FOREIGN KEY (`auth_tokens_roles`) REFERENCES `auth_tokens` (`id`) ON DELETE SET NULL);
+-- create index "auth_roles_auth_tokens_roles_key" to table: "auth_roles"
+CREATE UNIQUE INDEX `auth_roles_auth_tokens_roles_key` ON `auth_roles` (`auth_tokens_roles`);
diff --git a/backend/internal/data/migrations/migrations/20221205230404_drop_document_tokens.sql b/backend/internal/data/migrations/migrations/20221205230404_drop_document_tokens.sql
new file mode 100644
index 0000000..e130abe
--- /dev/null
+++ b/backend/internal/data/migrations/migrations/20221205230404_drop_document_tokens.sql
@@ -0,0 +1,5 @@
+-- disable the enforcement of foreign-keys constraints
+PRAGMA foreign_keys = off;
+DROP TABLE `document_tokens`;
+-- enable back the enforcement of foreign-keys constraints
+PRAGMA foreign_keys = on;
\ No newline at end of file
diff --git a/backend/internal/data/migrations/migrations/20221205234214_add_maintenance_entries.sql b/backend/internal/data/migrations/migrations/20221205234214_add_maintenance_entries.sql
new file mode 100644
index 0000000..2491ec4
--- /dev/null
+++ b/backend/internal/data/migrations/migrations/20221205234214_add_maintenance_entries.sql
@@ -0,0 +1,2 @@
+-- create "maintenance_entries" table
+CREATE TABLE `maintenance_entries` (`id` uuid NOT NULL, `created_at` datetime NOT NULL, `updated_at` datetime NOT NULL, `date` datetime NOT NULL, `name` text NOT NULL, `description` text NULL, `cost` real NOT NULL DEFAULT 0, `item_id` uuid NOT NULL, PRIMARY KEY (`id`), CONSTRAINT `maintenance_entries_items_maintenance_entries` FOREIGN KEY (`item_id`) REFERENCES `items` (`id`) ON DELETE CASCADE);
diff --git a/backend/internal/data/migrations/migrations/20221205234812_cascade_delete_roles.sql b/backend/internal/data/migrations/migrations/20221205234812_cascade_delete_roles.sql
new file mode 100644
index 0000000..8a37c11
--- /dev/null
+++ b/backend/internal/data/migrations/migrations/20221205234812_cascade_delete_roles.sql
@@ -0,0 +1,16 @@
+-- disable the enforcement of foreign-keys constraints
+PRAGMA foreign_keys = off;
+-- create "new_auth_roles" table
+CREATE TABLE `new_auth_roles` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `role` text NOT NULL DEFAULT 'user', `auth_tokens_roles` uuid NULL, CONSTRAINT `auth_roles_auth_tokens_roles` FOREIGN KEY (`auth_tokens_roles`) REFERENCES `auth_tokens` (`id`) ON DELETE CASCADE);
+-- copy rows from old table "auth_roles" to new temporary table "new_auth_roles"
+INSERT INTO `new_auth_roles` (`id`, `role`, `auth_tokens_roles`) SELECT `id`, `role`, `auth_tokens_roles` FROM `auth_roles`;
+-- drop "auth_roles" table after copying rows
+DROP TABLE `auth_roles`;
+-- rename temporary table "new_auth_roles" to "auth_roles"
+ALTER TABLE `new_auth_roles` RENAME TO `auth_roles`;
+-- create index "auth_roles_auth_tokens_roles_key" to table: "auth_roles"
+CREATE UNIQUE INDEX `auth_roles_auth_tokens_roles_key` ON `auth_roles` (`auth_tokens_roles`);
+-- delete where tokens is null
+DELETE FROM `auth_roles` WHERE `auth_tokens_roles` IS NULL;
+-- enable back the enforcement of foreign-keys constraints
+PRAGMA foreign_keys = on;
diff --git a/backend/internal/data/migrations/migrations/20230227024134_add_scheduled_date.sql b/backend/internal/data/migrations/migrations/20230227024134_add_scheduled_date.sql
new file mode 100644
index 0000000..a43ecfb
--- /dev/null
+++ b/backend/internal/data/migrations/migrations/20230227024134_add_scheduled_date.sql
@@ -0,0 +1,12 @@
+-- disable the enforcement of foreign-keys constraints
+PRAGMA foreign_keys = off;
+-- create "new_maintenance_entries" table
+CREATE TABLE `new_maintenance_entries` (`id` uuid NOT NULL, `created_at` datetime NOT NULL, `updated_at` datetime NOT NULL, `date` datetime NULL, `scheduled_date` datetime NULL, `name` text NOT NULL, `description` text NULL, `cost` real NOT NULL DEFAULT 0, `item_id` uuid NOT NULL, PRIMARY KEY (`id`), CONSTRAINT `maintenance_entries_items_maintenance_entries` FOREIGN KEY (`item_id`) REFERENCES `items` (`id`) ON DELETE CASCADE);
+-- copy rows from old table "maintenance_entries" to new temporary table "new_maintenance_entries"
+INSERT INTO `new_maintenance_entries` (`id`, `created_at`, `updated_at`, `date`, `name`, `description`, `cost`, `item_id`) SELECT `id`, `created_at`, `updated_at`, `date`, `name`, `description`, `cost`, `item_id` FROM `maintenance_entries`;
+-- drop "maintenance_entries" table after copying rows
+DROP TABLE `maintenance_entries`;
+-- rename temporary table "new_maintenance_entries" to "maintenance_entries"
+ALTER TABLE `new_maintenance_entries` RENAME TO `maintenance_entries`;
+-- enable back the enforcement of foreign-keys constraints
+PRAGMA foreign_keys = on;
diff --git a/backend/internal/data/migrations/migrations/20230305065819_add_notifier_types.sql b/backend/internal/data/migrations/migrations/20230305065819_add_notifier_types.sql
new file mode 100644
index 0000000..09b1824
--- /dev/null
+++ b/backend/internal/data/migrations/migrations/20230305065819_add_notifier_types.sql
@@ -0,0 +1,6 @@
+-- create "notifiers" table
+CREATE TABLE `notifiers` (`id` uuid NOT NULL, `created_at` datetime NOT NULL, `updated_at` datetime NOT NULL, `name` text NOT NULL, `url` text NOT NULL, `is_active` bool NOT NULL DEFAULT true, `user_id` uuid NOT NULL, PRIMARY KEY (`id`), CONSTRAINT `notifiers_users_notifiers` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`) ON DELETE CASCADE);
+-- create index "notifier_user_id" to table: "notifiers"
+CREATE INDEX `notifier_user_id` ON `notifiers` (`user_id`);
+-- create index "notifier_user_id_is_active" to table: "notifiers"
+CREATE INDEX `notifier_user_id_is_active` ON `notifiers` (`user_id`, `is_active`);
diff --git a/backend/internal/data/migrations/migrations/20230305071524_add_group_id_to_notifiers.sql b/backend/internal/data/migrations/migrations/20230305071524_add_group_id_to_notifiers.sql
new file mode 100644
index 0000000..5f0f16d
--- /dev/null
+++ b/backend/internal/data/migrations/migrations/20230305071524_add_group_id_to_notifiers.sql
@@ -0,0 +1,20 @@
+-- disable the enforcement of foreign-keys constraints
+PRAGMA foreign_keys = off;
+-- create "new_notifiers" table
+CREATE TABLE `new_notifiers` (`id` uuid NOT NULL, `created_at` datetime NOT NULL, `updated_at` datetime NOT NULL, `name` text NOT NULL, `url` text NOT NULL, `is_active` bool NOT NULL DEFAULT true, `group_id` uuid NOT NULL, `user_id` uuid NOT NULL, PRIMARY KEY (`id`), CONSTRAINT `notifiers_groups_notifiers` FOREIGN KEY (`group_id`) REFERENCES `groups` (`id`) ON DELETE CASCADE, CONSTRAINT `notifiers_users_notifiers` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`) ON DELETE CASCADE);
+-- copy rows from old table "notifiers" to new temporary table "new_notifiers"
+INSERT INTO `new_notifiers` (`id`, `created_at`, `updated_at`, `name`, `url`, `is_active`, `user_id`) SELECT `id`, `created_at`, `updated_at`, `name`, `url`, `is_active`, `user_id` FROM `notifiers`;
+-- drop "notifiers" table after copying rows
+DROP TABLE `notifiers`;
+-- rename temporary table "new_notifiers" to "notifiers"
+ALTER TABLE `new_notifiers` RENAME TO `notifiers`;
+-- create index "notifier_user_id" to table: "notifiers"
+CREATE INDEX `notifier_user_id` ON `notifiers` (`user_id`);
+-- create index "notifier_user_id_is_active" to table: "notifiers"
+CREATE INDEX `notifier_user_id_is_active` ON `notifiers` (`user_id`, `is_active`);
+-- create index "notifier_group_id" to table: "notifiers"
+CREATE INDEX `notifier_group_id` ON `notifiers` (`group_id`);
+-- create index "notifier_group_id_is_active" to table: "notifiers"
+CREATE INDEX `notifier_group_id_is_active` ON `notifiers` (`group_id`, `is_active`);
+-- enable back the enforcement of foreign-keys constraints
+PRAGMA foreign_keys = on;
diff --git a/backend/internal/data/migrations/migrations/20231006213457_add_primary_attachment_flag.sql b/backend/internal/data/migrations/migrations/20231006213457_add_primary_attachment_flag.sql
new file mode 100644
index 0000000..b7506c1
--- /dev/null
+++ b/backend/internal/data/migrations/migrations/20231006213457_add_primary_attachment_flag.sql
@@ -0,0 +1,12 @@
+-- Disable the enforcement of foreign-keys constraints
+PRAGMA foreign_keys = off;
+-- Create "new_attachments" table
+CREATE TABLE `new_attachments` (`id` uuid NOT NULL, `created_at` datetime NOT NULL, `updated_at` datetime NOT NULL, `type` text NOT NULL DEFAULT 'attachment', `primary` bool NOT NULL DEFAULT false, `document_attachments` uuid NOT NULL, `item_attachments` uuid NOT NULL, PRIMARY KEY (`id`), CONSTRAINT `attachments_documents_attachments` FOREIGN KEY (`document_attachments`) REFERENCES `documents` (`id`) ON DELETE CASCADE, CONSTRAINT `attachments_items_attachments` FOREIGN KEY (`item_attachments`) REFERENCES `items` (`id`) ON DELETE CASCADE);
+-- Copy rows from old table "attachments" to new temporary table "new_attachments"
+INSERT INTO `new_attachments` (`id`, `created_at`, `updated_at`, `type`, `document_attachments`, `item_attachments`) SELECT `id`, `created_at`, `updated_at`, `type`, `document_attachments`, `item_attachments` FROM `attachments`;
+-- Drop "attachments" table after copying rows
+DROP TABLE `attachments`;
+-- Rename temporary table "new_attachments" to "attachments"
+ALTER TABLE `new_attachments` RENAME TO `attachments`;
+-- Enable back the enforcement of foreign-keys constraints
+PRAGMA foreign_keys = on;
diff --git a/backend/internal/data/migrations/migrations/atlas.sum b/backend/internal/data/migrations/migrations/atlas.sum
index 2916627..e8d99a6 100644
--- a/backend/internal/data/migrations/migrations/atlas.sum
+++ b/backend/internal/data/migrations/migrations/atlas.sum
@@ -1,6 +1,15 @@
-h1:i76VRMDIPdcmQtXTe9bzrgITAzLGjjVy9y8XaXIchAs=
+h1:sjJCTAqc9FG8BKBIzh5ZynYD/Ilz6vnLqM4XX83WQ4M=
20220929052825_init.sql h1:ZlCqm1wzjDmofeAcSX3jE4h4VcdTNGpRg2eabztDy9Q=
20221001210956_group_invitations.sql h1:YQKJFtE39wFOcRNbZQ/d+ZlHwrcfcsZlcv/pLEYdpjw=
20221009173029_add_user_roles.sql h1:vWmzAfgEWQeGk0Vn70zfVPCcfEZth3E0JcvyKTjpYyU=
20221020043305_allow_nesting_types.sql h1:4AyJpZ7l7SSJtJAQETYY802FHJ64ufYPJTqvwdiGn3M=
20221101041931_add_archived_field.sql h1:L2WxiOh1svRn817cNURgqnEQg6DIcodZ1twK4tvxW94=
+20221113012312_add_asset_id_field.sql h1:DjD7e1PS8OfxGBWic8h0nO/X6CNnHEMqQjDCaaQ3M3Q=
+20221203053132_add_token_roles.sql h1:wFTIh+KBoHfLfy/L0ZmJz4cNXKHdACG9ZK/yvVKjF0M=
+20221205230404_drop_document_tokens.sql h1:9dCbNFcjtsT6lEhkxCn/vYaGRmQrl1LefdEJgvkfhGg=
+20221205234214_add_maintenance_entries.sql h1:B56VzCuDsed1k3/sYUoKlOkP90DcdLufxFK0qYvoafU=
+20221205234812_cascade_delete_roles.sql h1:VIiaImR48nCHF3uFbOYOX1E79Ta5HsUBetGaSAbh9Gk=
+20230227024134_add_scheduled_date.sql h1:8qO5OBZ0AzsfYEQOAQQrYIjyhSwM+v1A+/ylLSoiyoc=
+20230305065819_add_notifier_types.sql h1:r5xrgCKYQ2o9byBqYeAX1zdp94BLdaxf4vq9OmGHNl0=
+20230305071524_add_group_id_to_notifiers.sql h1:xDShqbyClcFhvJbwclOHdczgXbdffkxXNWjV61hL/t4=
+20231006213457_add_primary_attachment_flag.sql h1:J4tMSJQFa7vaj0jpnh8YKTssdyIjRyq6RXDXZIzDDu4=
diff --git a/backend/internal/data/repo/asset_id_type.go b/backend/internal/data/repo/asset_id_type.go
new file mode 100644
index 0000000..0a53a4a
--- /dev/null
+++ b/backend/internal/data/repo/asset_id_type.go
@@ -0,0 +1,73 @@
+package repo
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+type AssetID int
+
+func (aid AssetID) Nil() bool {
+ return aid.Int() <= 0
+}
+
+func (aid AssetID) Int() int {
+ return int(aid)
+}
+
+func ParseAssetIDBytes(d []byte) (AID AssetID, ok bool) {
+ d = bytes.Replace(d, []byte(`"`), []byte(``), -1)
+ d = bytes.Replace(d, []byte(`-`), []byte(``), -1)
+
+ aidInt, err := strconv.Atoi(string(d))
+ if err != nil {
+ return AssetID(-1), false
+ }
+
+ return AssetID(aidInt), true
+}
+
+func ParseAssetID(s string) (AID AssetID, ok bool) {
+ return ParseAssetIDBytes([]byte(s))
+}
+
+func (aid AssetID) String() string {
+ if aid.Nil() {
+ return ""
+ }
+
+ aidStr := fmt.Sprintf("%06d", aid)
+ aidStr = fmt.Sprintf("%s-%s", aidStr[:3], aidStr[3:])
+ return aidStr
+}
+
+func (aid AssetID) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + aid.String() + `"`), nil
+}
+
+func (aid *AssetID) UnmarshalJSON(d []byte) error {
+ if len(d) == 0 || bytes.Equal(d, []byte(`""`)) {
+ *aid = -1
+ return nil
+ }
+
+ d = bytes.Replace(d, []byte(`"`), []byte(``), -1)
+ d = bytes.Replace(d, []byte(`-`), []byte(``), -1)
+
+ aidInt, err := strconv.Atoi(string(d))
+ if err != nil {
+ return err
+ }
+
+ *aid = AssetID(aidInt)
+ return nil
+}
+
+func (aid AssetID) MarshalCSV() (string, error) {
+ return aid.String(), nil
+}
+
+func (aid *AssetID) UnmarshalCSV(d string) error {
+ return aid.UnmarshalJSON([]byte(d))
+}
diff --git a/backend/internal/data/repo/asset_id_type_test.go b/backend/internal/data/repo/asset_id_type_test.go
new file mode 100644
index 0000000..6aa7b99
--- /dev/null
+++ b/backend/internal/data/repo/asset_id_type_test.go
@@ -0,0 +1,115 @@
+package repo
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+)
+
+func TestAssetID_MarshalJSON(t *testing.T) {
+ tests := []struct {
+ name string
+ aid AssetID
+ want []byte
+ wantErr bool
+ }{
+ {
+ name: "basic test",
+ aid: 123,
+ want: []byte(`"000-123"`),
+ },
+ {
+ name: "zero test",
+ aid: 0,
+ want: []byte(`""`),
+ },
+ {
+ name: "large int",
+ aid: 123456789,
+ want: []byte(`"123-456789"`),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := tt.aid.MarshalJSON()
+ if (err != nil) != tt.wantErr {
+ t.Errorf("AssetID.MarshalJSON() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("AssetID.MarshalJSON() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestAssetID_UnmarshalJSON(t *testing.T) {
+ type args struct {
+ data []byte
+ }
+ tests := []struct {
+ name string
+ aid *AssetID
+ args args
+ want AssetID
+ wantErr bool
+ }{
+ {
+ name: "basic test",
+ aid: new(AssetID),
+ want: 123,
+ args: args{
+ data: []byte(`{"AssetID":"000123"}`),
+ },
+ },
+ {
+ name: "dashed format",
+ aid: new(AssetID),
+ want: 123,
+ args: args{
+ data: []byte(`{"AssetID":"000-123"}`),
+ },
+ },
+ {
+ name: "no leading zeros",
+ aid: new(AssetID),
+ want: 123,
+ args: args{
+ data: []byte(`{"AssetID":"123"}`),
+ },
+ },
+ {
+ name: "trailing zeros",
+ aid: new(AssetID),
+ want: 123000,
+ args: args{
+ data: []byte(`{"AssetID":"000123000"}`),
+ },
+ },
+ {
+ name: "large int",
+ aid: new(AssetID),
+ want: 123456789,
+ args: args{
+ data: []byte(`{"AssetID":"123456789"}`),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ st := struct {
+ AssetID AssetID `json:"AssetID"`
+ }{}
+
+ err := json.Unmarshal(tt.args.data, &st)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("AssetID.UnmarshalJSON() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+
+ if st.AssetID != tt.want {
+ t.Errorf("AssetID.UnmarshalJSON() = %v, want %v", st.AssetID, tt.want)
+ }
+ })
+ }
+}
diff --git a/backend/internal/data/repo/automappers.go b/backend/internal/data/repo/automappers.go
new file mode 100644
index 0000000..279164b
--- /dev/null
+++ b/backend/internal/data/repo/automappers.go
@@ -0,0 +1,32 @@
+package repo
+
+type MapFunc[T any, U any] func(T) U
+
+func (a MapFunc[T, U]) Map(v T) U {
+ return a(v)
+}
+
+func (a MapFunc[T, U]) MapEach(v []T) []U {
+ result := make([]U, len(v))
+ for i, item := range v {
+ result[i] = a(item)
+ }
+ return result
+}
+
+func (a MapFunc[T, U]) MapErr(v T, err error) (U, error) {
+ if err != nil {
+ var zero U
+ return zero, err
+ }
+
+ return a(v), nil
+}
+
+func (a MapFunc[T, U]) MapEachErr(v []T, err error) ([]U, error) {
+ if err != nil {
+ return nil, err
+ }
+
+ return a.MapEach(v), nil
+}
diff --git a/backend/internal/data/repo/main_test.go b/backend/internal/data/repo/main_test.go
index 221fbd5..47e5ec0 100644
--- a/backend/internal/data/repo/main_test.go
+++ b/backend/internal/data/repo/main_test.go
@@ -3,18 +3,18 @@ package repo
import (
"context"
"log"
- "math/rand"
"os"
"testing"
- "time"
+ "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/pkgs/faker"
_ "github.com/mattn/go-sqlite3"
)
var (
- fk = faker.NewFaker()
+ fk = faker.NewFaker()
+ tbus = eventbus.New()
tClient *ent.Client
tRepos *AllRepos
@@ -40,21 +40,23 @@ func bootstrap() {
}
func TestMain(m *testing.M) {
- rand.Seed(int64(time.Now().Unix()))
-
client, err := ent.Open("sqlite3", "file:ent?mode=memory&cache=shared&_fk=1")
if err != nil {
log.Fatalf("failed opening connection to sqlite: %v", err)
}
+ go func() {
+ _ = tbus.Run(context.Background())
+ }()
+
err = client.Schema.Create(context.Background())
if err != nil {
log.Fatalf("failed creating schema resources: %v", err)
}
tClient = client
- tRepos = New(tClient, os.TempDir())
- defer client.Close()
+ tRepos = New(tClient, tbus, os.TempDir())
+ defer func() { _ = client.Close() }()
bootstrap()
diff --git a/backend/internal/data/repo/map_helpers.go b/backend/internal/data/repo/map_helpers.go
index a9c0bca..9404cb0 100644
--- a/backend/internal/data/repo/map_helpers.go
+++ b/backend/internal/data/repo/map_helpers.go
@@ -16,17 +16,16 @@ func mapTErrFunc[T any, Y any](fn func(T) Y) func(T, error) (Y, error) {
}
}
-// TODO: Future Usage
-// func mapEachFunc[T any, Y any](fn func(T) Y) func([]T) []Y {
-// return func(items []T) []Y {
-// result := make([]Y, len(items))
-// for i, item := range items {
-// result[i] = fn(item)
-// }
+func mapTEachFunc[T any, Y any](fn func(T) Y) func([]T) []Y {
+ return func(items []T) []Y {
+ result := make([]Y, len(items))
+ for i, item := range items {
+ result[i] = fn(item)
+ }
-// return result
-// }
-// }
+ return result
+ }
+}
func mapTEachErrFunc[T any, Y any](fn func(T) Y) func([]T, error) ([]Y, error) {
return func(items []T, err error) ([]Y, error) {
diff --git a/backend/internal/data/repo/query_helpers.go b/backend/internal/data/repo/query_helpers.go
new file mode 100644
index 0000000..2205d81
--- /dev/null
+++ b/backend/internal/data/repo/query_helpers.go
@@ -0,0 +1,18 @@
+package repo
+
+import "time"
+
+func sqliteDateFormat(t time.Time) string {
+ return t.Format("2006-01-02 15:04:05")
+}
+
+// orDefault returns the value of the pointer if it is not nil, otherwise it returns the default value
+//
+// This is used for nullable or potentially nullable fields (or aggregates) in the database when running
+// queries. If the field is null, the pointer will be nil, so we return the default value instead.
+func orDefault[T any](v *T, def T) T {
+ if v == nil {
+ return def
+ }
+ return *v
+}
diff --git a/backend/internal/data/repo/repo_document_tokens.go b/backend/internal/data/repo/repo_document_tokens.go
deleted file mode 100644
index 018ea61..0000000
--- a/backend/internal/data/repo/repo_document_tokens.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package repo
-
-import (
- "context"
- "time"
-
- "github.com/google/uuid"
- "github.com/hay-kot/homebox/backend/internal/data/ent"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
-)
-
-// DocumentTokensRepository is a repository for Document entity
-type DocumentTokensRepository struct {
- db *ent.Client
-}
-
-type (
- DocumentToken struct {
- ID uuid.UUID `json:"-"`
- TokenHash []byte `json:"tokenHash"`
- ExpiresAt time.Time `json:"expiresAt"`
- DocumentID uuid.UUID `json:"documentId"`
- }
-
- DocumentTokenCreate struct {
- TokenHash []byte `json:"tokenHash"`
- DocumentID uuid.UUID `json:"documentId"`
- ExpiresAt time.Time `json:"expiresAt"`
- }
-)
-
-var (
- mapDocumentTokenErr = mapTErrFunc(mapDocumentToken)
-)
-
-func mapDocumentToken(e *ent.DocumentToken) DocumentToken {
- return DocumentToken{
- ID: e.ID,
- TokenHash: e.Token,
- ExpiresAt: e.ExpiresAt,
- DocumentID: e.Edges.Document.ID,
- }
-}
-
-func (r *DocumentTokensRepository) Create(ctx context.Context, data DocumentTokenCreate) (DocumentToken, error) {
- result, err := r.db.DocumentToken.Create().
- SetDocumentID(data.DocumentID).
- SetToken(data.TokenHash).
- SetExpiresAt(data.ExpiresAt).
- Save(ctx)
-
- if err != nil {
- return DocumentToken{}, err
- }
-
- return mapDocumentTokenErr(r.db.DocumentToken.Query().
- Where(documenttoken.ID(result.ID)).
- WithDocument().
- Only(ctx))
-}
-
-func (r *DocumentTokensRepository) PurgeExpiredTokens(ctx context.Context) (int, error) {
- return r.db.DocumentToken.Delete().Where(documenttoken.ExpiresAtLT(time.Now())).Exec(ctx)
-}
-
-func (r *DocumentTokensRepository) Delete(ctx context.Context, id uuid.UUID) error {
- return r.db.DocumentToken.DeleteOneID(id).Exec(ctx)
-}
diff --git a/backend/internal/data/repo/repo_document_tokens_test.go b/backend/internal/data/repo/repo_document_tokens_test.go
deleted file mode 100644
index 6646eca..0000000
--- a/backend/internal/data/repo/repo_document_tokens_test.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package repo
-
-import (
- "context"
- "testing"
- "time"
-
- "github.com/google/uuid"
- "github.com/hay-kot/homebox/backend/internal/data/ent"
- "github.com/hay-kot/homebox/backend/internal/data/ent/documenttoken"
- "github.com/stretchr/testify/assert"
-)
-
-func TestDocumentTokensRepository_Create(t *testing.T) {
- entities := useDocs(t, 1)
- doc := entities[0]
- expires := fk.Time()
-
- type args struct {
- ctx context.Context
- data DocumentTokenCreate
- }
- tests := []struct {
- name string
- args args
- want *ent.DocumentToken
- wantErr bool
- }{
- {
- name: "create document token",
- args: args{
- ctx: context.Background(),
- data: DocumentTokenCreate{
- DocumentID: doc.ID,
- TokenHash: []byte("token"),
- ExpiresAt: expires,
- },
- },
- want: &ent.DocumentToken{
- Edges: ent.DocumentTokenEdges{
- Document: &ent.Document{
- ID: doc.ID,
- },
- },
- Token: []byte("token"),
- ExpiresAt: expires,
- },
- wantErr: false,
- },
- {
- name: "create document token with empty token",
- args: args{
- ctx: context.Background(),
- data: DocumentTokenCreate{
- DocumentID: doc.ID,
- TokenHash: []byte(""),
- ExpiresAt: expires,
- },
- },
- want: nil,
- wantErr: true,
- },
- {
- name: "create document token with empty document id",
- args: args{
- ctx: context.Background(),
- data: DocumentTokenCreate{
- DocumentID: uuid.Nil,
- TokenHash: []byte("token"),
- ExpiresAt: expires,
- },
- },
- want: nil,
- wantErr: true,
- },
- }
-
- ids := make([]uuid.UUID, 0, len(tests))
-
- t.Cleanup(func() {
- for _, id := range ids {
- _ = tRepos.DocTokens.Delete(context.Background(), id)
- }
- })
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
-
- got, err := tRepos.DocTokens.Create(tt.args.ctx, tt.args.data)
- if (err != nil) != tt.wantErr {
- t.Errorf("DocumentTokensRepository.Create() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if tt.wantErr {
- return
- }
-
- assert.Equal(t, tt.want.Token, got.TokenHash)
- assert.WithinDuration(t, tt.want.ExpiresAt, got.ExpiresAt, time.Duration(1)*time.Second)
- assert.Equal(t, tt.want.Edges.Document.ID, got.DocumentID)
- })
-
- }
-}
-
-func useDocTokens(t *testing.T, num int) []DocumentToken {
- entity := useDocs(t, 1)[0]
-
- results := make([]DocumentToken, 0, num)
-
- ids := make([]uuid.UUID, 0, num)
- t.Cleanup(func() {
- for _, id := range ids {
- _ = tRepos.DocTokens.Delete(context.Background(), id)
- }
- })
-
- for i := 0; i < num; i++ {
- e, err := tRepos.DocTokens.Create(context.Background(), DocumentTokenCreate{
- DocumentID: entity.ID,
- TokenHash: []byte(fk.Str(10)),
- ExpiresAt: fk.Time(),
- })
-
- assert.NoError(t, err)
- results = append(results, e)
- ids = append(ids, e.ID)
- }
-
- return results
-}
-
-func TestDocumentTokensRepository_PurgeExpiredTokens(t *testing.T) {
- entities := useDocTokens(t, 2)
-
- // set expired token
- tRepos.DocTokens.db.DocumentToken.Update().
- Where(documenttoken.ID(entities[0].ID)).
- SetExpiresAt(time.Now().Add(-time.Hour)).
- ExecX(context.Background())
-
- count, err := tRepos.DocTokens.PurgeExpiredTokens(context.Background())
- assert.NoError(t, err)
- assert.Equal(t, 1, count)
-
- all, err := tRepos.DocTokens.db.DocumentToken.Query().All(context.Background())
- assert.NoError(t, err)
- assert.Len(t, all, 1)
- assert.Equal(t, entities[1].ID, all[0].ID)
-}
diff --git a/backend/internal/data/repo/repo_documents.go b/backend/internal/data/repo/repo_documents.go
index abe340d..587a4f1 100644
--- a/backend/internal/data/repo/repo_documents.go
+++ b/backend/internal/data/repo/repo_documents.go
@@ -14,9 +14,7 @@ import (
"github.com/hay-kot/homebox/backend/pkgs/pathlib"
)
-var (
- ErrInvalidDocExtension = errors.New("invalid document extension")
-)
+var ErrInvalidDocExtension = errors.New("invalid document extension")
type DocumentRepository struct {
db *ent.Client
@@ -74,7 +72,7 @@ func (r *DocumentRepository) Create(ctx context.Context, gid uuid.UUID, doc Docu
path := r.path(gid, ext)
parent := filepath.Dir(path)
- err := os.MkdirAll(parent, 0755)
+ err := os.MkdirAll(parent, 0o755)
if err != nil {
return DocumentOut{}, err
}
diff --git a/backend/internal/data/repo/repo_documents_test.go b/backend/internal/data/repo/repo_documents_test.go
index b58b3bb..4634235 100644
--- a/backend/internal/data/repo/repo_documents_test.go
+++ b/backend/internal/data/repo/repo_documents_test.go
@@ -11,6 +11,7 @@ import (
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func useDocs(t *testing.T, num int) []DocumentOut {
@@ -25,7 +26,7 @@ func useDocs(t *testing.T, num int) []DocumentOut {
Content: bytes.NewReader([]byte(fk.Str(10))),
})
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.NotNil(t, doc)
results = append(results, doc)
ids = append(ids, doc.ID)
@@ -34,7 +35,6 @@ func useDocs(t *testing.T, num int) []DocumentOut {
t.Cleanup(func() {
for _, id := range ids {
err := tRepos.Docs.Delete(context.Background(), id)
-
if err != nil {
assert.True(t, ent.IsNotFound(err))
}
@@ -81,31 +81,31 @@ func TestDocumentRepository_CreateUpdateDelete(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
// Create Document
got, err := r.Create(tt.args.ctx, tt.args.gid, tt.args.doc)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, tt.title, got.Title)
assert.Equal(t, fmt.Sprintf("%s/%s/documents", temp, tt.args.gid), filepath.Dir(got.Path))
ensureRead := func() {
// Read Document
bts, err := os.ReadFile(got.Path)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, tt.content, string(bts))
}
ensureRead()
// Update Document
got, err = r.Rename(tt.args.ctx, got.ID, "__"+tt.title+"__")
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, "__"+tt.title+"__", got.Title)
ensureRead()
// Delete Document
err = r.Delete(tt.args.ctx, got.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
_, err = os.Stat(got.Path)
- assert.Error(t, err)
+ require.Error(t, err)
})
}
}
diff --git a/backend/internal/data/repo/repo_group.go b/backend/internal/data/repo/repo_group.go
index 9a9ef7a..8f93c78 100644
--- a/backend/internal/data/repo/repo_group.go
+++ b/backend/internal/data/repo/repo_group.go
@@ -5,14 +5,47 @@ import (
"strings"
"time"
+ "entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
"github.com/hay-kot/homebox/backend/internal/data/ent/groupinvitationtoken"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/item"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/label"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/location"
)
type GroupRepository struct {
- db *ent.Client
+ db *ent.Client
+ groupMapper MapFunc[*ent.Group, Group]
+ invitationMapper MapFunc[*ent.GroupInvitationToken, GroupInvitation]
+}
+
+func NewGroupRepository(db *ent.Client) *GroupRepository {
+ gmap := func(g *ent.Group) Group {
+ return Group{
+ ID: g.ID,
+ Name: g.Name,
+ CreatedAt: g.CreatedAt,
+ UpdatedAt: g.UpdatedAt,
+ Currency: strings.ToUpper(g.Currency),
+ }
+ }
+
+ imap := func(i *ent.GroupInvitationToken) GroupInvitation {
+ return GroupInvitation{
+ ID: i.ID,
+ ExpiresAt: i.ExpiresAt,
+ Uses: i.Uses,
+ Group: gmap(i.Edges.Group),
+ }
+ }
+
+ return &GroupRepository{
+ db: db,
+ groupMapper: gmap,
+ invitationMapper: imap,
+ }
}
type (
@@ -41,83 +74,211 @@ type (
Uses int `json:"uses"`
Group Group `json:"group"`
}
+
GroupStatistics struct {
- TotalUsers int `json:"totalUsers"`
- TotalItems int `json:"totalItems"`
- TotalLocations int `json:"totalLocations"`
- TotalLabels int `json:"totalLabels"`
+ TotalUsers int `json:"totalUsers"`
+ TotalItems int `json:"totalItems"`
+ TotalLocations int `json:"totalLocations"`
+ TotalLabels int `json:"totalLabels"`
+ TotalItemPrice float64 `json:"totalItemPrice"`
+ TotalWithWarranty int `json:"totalWithWarranty"`
+ }
+
+ ValueOverTimeEntry struct {
+ Date time.Time `json:"date"`
+ Value float64 `json:"value"`
+ Name string `json:"name"`
+ }
+
+ ValueOverTime struct {
+ PriceAtStart float64 `json:"valueAtStart"`
+ PriceAtEnd float64 `json:"valueAtEnd"`
+ Start time.Time `json:"start"`
+ End time.Time `json:"end"`
+ Entries []ValueOverTimeEntry `json:"entries"`
+ }
+
+ TotalsByOrganizer struct {
+ ID uuid.UUID `json:"id"`
+ Name string `json:"name"`
+ Total float64 `json:"total"`
}
)
-var (
- mapToGroupErr = mapTErrFunc(mapToGroup)
-)
-
-func mapToGroup(g *ent.Group) Group {
- return Group{
- ID: g.ID,
- Name: g.Name,
- CreatedAt: g.CreatedAt,
- UpdatedAt: g.UpdatedAt,
- Currency: strings.ToUpper(g.Currency.String()),
- }
+func (r *GroupRepository) GetAllGroups(ctx context.Context) ([]Group, error) {
+ return r.groupMapper.MapEachErr(r.db.Group.Query().All(ctx))
}
-var (
- mapToGroupInvitationErr = mapTErrFunc(mapToGroupInvitation)
-)
+func (r *GroupRepository) StatsLocationsByPurchasePrice(ctx context.Context, GID uuid.UUID) ([]TotalsByOrganizer, error) {
+ var v []TotalsByOrganizer
-func mapToGroupInvitation(g *ent.GroupInvitationToken) GroupInvitation {
- return GroupInvitation{
- ID: g.ID,
- ExpiresAt: g.ExpiresAt,
- Uses: g.Uses,
- Group: mapToGroup(g.Edges.Group),
+ err := r.db.Location.Query().
+ Where(
+ location.HasGroupWith(group.ID(GID)),
+ ).
+ GroupBy(location.FieldID, location.FieldName).
+ Aggregate(func(sq *sql.Selector) string {
+ t := sql.Table(item.Table)
+ sq.Join(t).On(sq.C(location.FieldID), t.C(item.LocationColumn))
+
+ return sql.As(sql.Sum(t.C(item.FieldPurchasePrice)), "total")
+ }).
+ Scan(ctx, &v)
+ if err != nil {
+ return nil, err
}
+
+ return v, err
}
-func (r *GroupRepository) GroupStatistics(ctx context.Context, GID uuid.UUID) (GroupStatistics, error) {
+func (r *GroupRepository) StatsLabelsByPurchasePrice(ctx context.Context, GID uuid.UUID) ([]TotalsByOrganizer, error) {
+ var v []TotalsByOrganizer
+
+ err := r.db.Label.Query().
+ Where(
+ label.HasGroupWith(group.ID(GID)),
+ ).
+ GroupBy(label.FieldID, label.FieldName).
+ Aggregate(func(sq *sql.Selector) string {
+ itemTable := sql.Table(item.Table)
+
+ jt := sql.Table(label.ItemsTable)
+
+ sq.Join(jt).On(sq.C(label.FieldID), jt.C(label.ItemsPrimaryKey[0]))
+ sq.Join(itemTable).On(jt.C(label.ItemsPrimaryKey[1]), itemTable.C(item.FieldID))
+
+ return sql.As(sql.Sum(itemTable.C(item.FieldPurchasePrice)), "total")
+ }).
+ Scan(ctx, &v)
+ if err != nil {
+ return nil, err
+ }
+
+ return v, err
+}
+
+func (r *GroupRepository) StatsPurchasePrice(ctx context.Context, GID uuid.UUID, start, end time.Time) (*ValueOverTime, error) {
+ // Get the Totals for the Start and End of the Given Time Period
+ q := `
+ SELECT
+ (SELECT Sum(purchase_price)
+ FROM items
+ WHERE group_items = ?
+ AND items.archived = false
+ AND items.created_at < ?) AS price_at_start,
+ (SELECT Sum(purchase_price)
+ FROM items
+ WHERE group_items = ?
+ AND items.archived = false
+ AND items.created_at < ?) AS price_at_end
+`
+ stats := ValueOverTime{
+ Start: start,
+ End: end,
+ }
+
+ var maybeStart *float64
+ var maybeEnd *float64
+
+ row := r.db.Sql().QueryRowContext(ctx, q, GID, sqliteDateFormat(start), GID, sqliteDateFormat(end))
+ err := row.Scan(&maybeStart, &maybeEnd)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.PriceAtStart = orDefault(maybeStart, 0)
+ stats.PriceAtEnd = orDefault(maybeEnd, 0)
+
+ var v []struct {
+ Name string `json:"name"`
+ CreatedAt time.Time `json:"created_at"`
+ PurchasePrice float64 `json:"purchase_price"`
+ }
+
+ // Get Created Date and Price of all items between start and end
+ err = r.db.Item.Query().
+ Where(
+ item.HasGroupWith(group.ID(GID)),
+ item.CreatedAtGTE(start),
+ item.CreatedAtLTE(end),
+ item.Archived(false),
+ ).
+ Select(
+ item.FieldName,
+ item.FieldCreatedAt,
+ item.FieldPurchasePrice,
+ ).
+ Scan(ctx, &v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Entries = make([]ValueOverTimeEntry, len(v))
+
+ for i, vv := range v {
+ stats.Entries[i] = ValueOverTimeEntry{
+ Date: vv.CreatedAt,
+ Value: vv.PurchasePrice,
+ }
+ }
+
+ return &stats, nil
+}
+
+func (r *GroupRepository) StatsGroup(ctx context.Context, GID uuid.UUID) (GroupStatistics, error) {
q := `
SELECT
(SELECT COUNT(*) FROM users WHERE group_users = ?) AS total_users,
(SELECT COUNT(*) FROM items WHERE group_items = ? AND items.archived = false) AS total_items,
(SELECT COUNT(*) FROM locations WHERE group_locations = ?) AS total_locations,
- (SELECT COUNT(*) FROM labels WHERE group_labels = ?) AS total_labels
+ (SELECT COUNT(*) FROM labels WHERE group_labels = ?) AS total_labels,
+ (SELECT SUM(purchase_price*quantity) FROM items WHERE group_items = ? AND items.archived = false) AS total_item_price,
+ (SELECT COUNT(*)
+ FROM items
+ WHERE group_items = ?
+ AND items.archived = false
+ AND (items.lifetime_warranty = true OR items.warranty_expires > date())
+ ) AS total_with_warranty
`
var stats GroupStatistics
- row := r.db.Sql().QueryRowContext(ctx, q, GID, GID, GID, GID)
+ row := r.db.Sql().QueryRowContext(ctx, q, GID, GID, GID, GID, GID, GID)
- err := row.Scan(&stats.TotalUsers, &stats.TotalItems, &stats.TotalLocations, &stats.TotalLabels)
+ var maybeTotalItemPrice *float64
+ var maybeTotalWithWarranty *int
+
+ err := row.Scan(&stats.TotalUsers, &stats.TotalItems, &stats.TotalLocations, &stats.TotalLabels, &maybeTotalItemPrice, &maybeTotalWithWarranty)
if err != nil {
return GroupStatistics{}, err
}
+ stats.TotalItemPrice = orDefault(maybeTotalItemPrice, 0)
+ stats.TotalWithWarranty = orDefault(maybeTotalWithWarranty, 0)
+
return stats, nil
}
func (r *GroupRepository) GroupCreate(ctx context.Context, name string) (Group, error) {
- return mapToGroupErr(r.db.Group.Create().
+ return r.groupMapper.MapErr(r.db.Group.Create().
SetName(name).
Save(ctx))
}
func (r *GroupRepository) GroupUpdate(ctx context.Context, ID uuid.UUID, data GroupUpdate) (Group, error) {
- currency := group.Currency(strings.ToLower(data.Currency))
-
entity, err := r.db.Group.UpdateOneID(ID).
SetName(data.Name).
- SetCurrency(currency).
+ SetCurrency(strings.ToLower(data.Currency)).
Save(ctx)
- return mapToGroupErr(entity, err)
+ return r.groupMapper.MapErr(entity, err)
}
func (r *GroupRepository) GroupByID(ctx context.Context, id uuid.UUID) (Group, error) {
- return mapToGroupErr(r.db.Group.Get(ctx, id))
+ return r.groupMapper.MapErr(r.db.Group.Get(ctx, id))
}
func (r *GroupRepository) InvitationGet(ctx context.Context, token []byte) (GroupInvitation, error) {
- return mapToGroupInvitationErr(r.db.GroupInvitationToken.Query().
+ return r.invitationMapper.MapErr(r.db.GroupInvitationToken.Query().
Where(groupinvitationtoken.Token(token)).
WithGroup().
Only(ctx))
@@ -130,7 +291,6 @@ func (r *GroupRepository) InvitationCreate(ctx context.Context, groupID uuid.UUI
SetExpiresAt(invite.ExpiresAt).
SetUses(invite.Uses).
Save(ctx)
-
if err != nil {
return GroupInvitation{}, err
}
diff --git a/backend/internal/data/repo/repo_group_test.go b/backend/internal/data/repo/repo_group_test.go
index b608d16..180d72e 100644
--- a/backend/internal/data/repo/repo_group_test.go
+++ b/backend/internal/data/repo/repo_group_test.go
@@ -5,29 +5,30 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func Test_Group_Create(t *testing.T) {
g, err := tRepos.Groups.GroupCreate(context.Background(), "test")
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, "test", g.Name)
// Get by ID
foundGroup, err := tRepos.Groups.GroupByID(context.Background(), g.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, g.ID, foundGroup.ID)
}
func Test_Group_Update(t *testing.T) {
g, err := tRepos.Groups.GroupCreate(context.Background(), "test")
- assert.NoError(t, err)
+ require.NoError(t, err)
g, err = tRepos.Groups.GroupUpdate(context.Background(), g.ID, GroupUpdate{
Name: "test2",
Currency: "eur",
})
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, "test2", g.Name)
assert.Equal(t, "EUR", g.Currency)
}
@@ -36,9 +37,9 @@ func Test_Group_GroupStatistics(t *testing.T) {
useItems(t, 20)
useLabels(t, 20)
- stats, err := tRepos.Groups.GroupStatistics(context.Background(), tGroup.ID)
+ stats, err := tRepos.Groups.StatsGroup(context.Background(), tGroup.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, 20, stats.TotalItems)
assert.Equal(t, 20, stats.TotalLabels)
assert.Equal(t, 1, stats.TotalUsers)
diff --git a/backend/internal/data/repo/repo_item_attachments.go b/backend/internal/data/repo/repo_item_attachments.go
index 1e2ef7b..da57b31 100644
--- a/backend/internal/data/repo/repo_item_attachments.go
+++ b/backend/internal/data/repo/repo_item_attachments.go
@@ -7,6 +7,7 @@ import (
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/item"
)
// AttachmentRepo is a repository for Attachments table that links Items to Documents
@@ -24,12 +25,14 @@ type (
UpdatedAt time.Time `json:"updatedAt"`
Type string `json:"type"`
Document DocumentOut `json:"document"`
+ Primary bool `json:"primary"`
}
ItemAttachmentUpdate struct {
- ID uuid.UUID `json:"-"`
- Type string `json:"type"`
- Title string `json:"title"`
+ ID uuid.UUID `json:"-"`
+ Type string `json:"type"`
+ Title string `json:"title"`
+ Primary bool `json:"primary"`
}
)
@@ -39,6 +42,7 @@ func ToItemAttachment(attachment *ent.Attachment) ItemAttachment {
CreatedAt: attachment.CreatedAt,
UpdatedAt: attachment.UpdatedAt,
Type: attachment.Type.String(),
+ Primary: attachment.Primary,
Document: DocumentOut{
ID: attachment.Edges.Document.ID,
Title: attachment.Edges.Document.Title,
@@ -47,12 +51,31 @@ func ToItemAttachment(attachment *ent.Attachment) ItemAttachment {
}
}
-func (r *AttachmentRepo) Create(ctx context.Context, itemId, docId uuid.UUID, typ attachment.Type) (*ent.Attachment, error) {
- return r.db.Attachment.Create().
+func (r *AttachmentRepo) Create(ctx context.Context, itemID, docID uuid.UUID, typ attachment.Type) (*ent.Attachment, error) {
+ bldr := r.db.Attachment.Create().
SetType(typ).
- SetDocumentID(docId).
- SetItemID(itemId).
- Save(ctx)
+ SetDocumentID(docID).
+ SetItemID(itemID)
+
+ // Autoset primary to true if this is the first attachment
+ // that is of type photo
+ if typ == attachment.TypePhoto {
+ cnt, err := r.db.Attachment.Query().
+ Where(
+ attachment.HasItemWith(item.ID(itemID)),
+ attachment.TypeEQ(typ),
+ ).
+ Count(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if cnt == 0 {
+ bldr = bldr.SetPrimary(true)
+ }
+ }
+
+ return bldr.Save(ctx)
}
func (r *AttachmentRepo) Get(ctx context.Context, id uuid.UUID) (*ent.Attachment, error) {
@@ -64,11 +87,33 @@ func (r *AttachmentRepo) Get(ctx context.Context, id uuid.UUID) (*ent.Attachment
Only(ctx)
}
-func (r *AttachmentRepo) Update(ctx context.Context, itemId uuid.UUID, typ attachment.Type) (*ent.Attachment, error) {
- itm, err := r.db.Attachment.UpdateOneID(itemId).
- SetType(typ).
- Save(ctx)
+func (r *AttachmentRepo) Update(ctx context.Context, itemID uuid.UUID, data *ItemAttachmentUpdate) (*ent.Attachment, error) {
+ // TODO: execute within Tx
+ typ := attachment.Type(data.Type)
+ bldr := r.db.Attachment.UpdateOneID(itemID).
+ SetType(typ)
+
+ // Primary only applies to photos
+ if typ == attachment.TypePhoto {
+ bldr = bldr.SetPrimary(data.Primary)
+ } else {
+ bldr = bldr.SetPrimary(false)
+ }
+
+ itm, err := bldr.Save(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // Ensure all other attachments are not primary
+ err = r.db.Attachment.Update().
+ Where(
+ attachment.HasItemWith(item.ID(itemID)),
+ attachment.IDNEQ(itm.ID),
+ ).
+ SetPrimary(false).
+ Exec(ctx)
if err != nil {
return nil, err
}
diff --git a/backend/internal/data/repo/repo_item_attachments_test.go b/backend/internal/data/repo/repo_item_attachments_test.go
index 15f70c8..9007b2e 100644
--- a/backend/internal/data/repo/repo_item_attachments_test.go
+++ b/backend/internal/data/repo/repo_item_attachments_test.go
@@ -8,6 +8,7 @@ import (
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestAttachmentRepo_Create(t *testing.T) {
@@ -23,8 +24,8 @@ func TestAttachmentRepo_Create(t *testing.T) {
type args struct {
ctx context.Context
- itemId uuid.UUID
- docId uuid.UUID
+ itemID uuid.UUID
+ docID uuid.UUID
typ attachment.Type
}
tests := []struct {
@@ -37,8 +38,8 @@ func TestAttachmentRepo_Create(t *testing.T) {
name: "create attachment",
args: args{
ctx: context.Background(),
- itemId: item.ID,
- docId: doc.ID,
+ itemID: item.ID,
+ docID: doc.ID,
typ: attachment.TypePhoto,
},
want: &ent.Attachment{
@@ -49,8 +50,8 @@ func TestAttachmentRepo_Create(t *testing.T) {
name: "create attachment with invalid item id",
args: args{
ctx: context.Background(),
- itemId: uuid.New(),
- docId: doc.ID,
+ itemID: uuid.New(),
+ docID: doc.ID,
typ: "blarg",
},
wantErr: true,
@@ -58,8 +59,7 @@ func TestAttachmentRepo_Create(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
-
- got, err := tRepos.Attachments.Create(tt.args.ctx, tt.args.itemId, tt.args.docId, tt.args.typ)
+ got, err := tRepos.Attachments.Create(tt.args.ctx, tt.args.itemID, tt.args.docID, tt.args.typ)
if (err != nil) != tt.wantErr {
t.Errorf("AttachmentRepo.Create() error = %v, wantErr %v", err, tt.wantErr)
return
@@ -72,9 +72,9 @@ func TestAttachmentRepo_Create(t *testing.T) {
assert.Equal(t, tt.want.Type, got.Type)
withItems, err := tRepos.Attachments.Get(tt.args.ctx, got.ID)
- assert.NoError(t, err)
- assert.Equal(t, tt.args.itemId, withItems.Edges.Item.ID)
- assert.Equal(t, tt.args.docId, withItems.Edges.Document.ID)
+ require.NoError(t, err)
+ assert.Equal(t, tt.args.itemID, withItems.Edges.Item.ID)
+ assert.Equal(t, tt.args.docID, withItems.Edges.Document.ID)
ids = append(ids, got.ID)
})
@@ -97,7 +97,7 @@ func useAttachments(t *testing.T, n int) []*ent.Attachment {
attachments := make([]*ent.Attachment, n)
for i := 0; i < n; i++ {
attachment, err := tRepos.Attachments.Create(context.Background(), item.ID, doc.ID, attachment.TypePhoto)
- assert.NoError(t, err)
+ require.NoError(t, err)
attachments[i] = attachment
ids = append(ids, attachment.ID)
@@ -111,23 +111,25 @@ func TestAttachmentRepo_Update(t *testing.T) {
for _, typ := range []attachment.Type{"photo", "manual", "warranty", "attachment"} {
t.Run(string(typ), func(t *testing.T) {
- _, err := tRepos.Attachments.Update(context.Background(), entity.ID, typ)
- assert.NoError(t, err)
+ _, err := tRepos.Attachments.Update(context.Background(), entity.ID, &ItemAttachmentUpdate{
+ Type: string(typ),
+ })
+
+ require.NoError(t, err)
updated, err := tRepos.Attachments.Get(context.Background(), entity.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, typ, updated.Type)
})
}
-
}
func TestAttachmentRepo_Delete(t *testing.T) {
entity := useAttachments(t, 1)[0]
err := tRepos.Attachments.Delete(context.Background(), entity.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
_, err = tRepos.Attachments.Get(context.Background(), entity.ID)
- assert.Error(t, err)
+ require.Error(t, err)
}
diff --git a/backend/internal/data/repo/repo_items.go b/backend/internal/data/repo/repo_items.go
index 71619fa..72ba904 100644
--- a/backend/internal/data/repo/repo_items.go
+++ b/backend/internal/data/repo/repo_items.go
@@ -2,31 +2,45 @@ package repo
import (
"context"
+ "fmt"
"time"
"github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
"github.com/hay-kot/homebox/backend/internal/data/ent"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
"github.com/hay-kot/homebox/backend/internal/data/ent/item"
"github.com/hay-kot/homebox/backend/internal/data/ent/itemfield"
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
+ "github.com/hay-kot/homebox/backend/internal/data/types"
)
type ItemsRepository struct {
- db *ent.Client
+ db *ent.Client
+ bus *eventbus.EventBus
}
type (
+ FieldQuery struct {
+ Name string
+ Value string
+ }
+
ItemQuery struct {
Page int
PageSize int
- Search string `json:"search"`
- LocationIDs []uuid.UUID `json:"locationIds"`
- LabelIDs []uuid.UUID `json:"labelIds"`
- SortBy string `json:"sortBy"`
- IncludeArchived bool `json:"includeArchived"`
+ Search string `json:"search"`
+ AssetID AssetID `json:"assetId"`
+ LocationIDs []uuid.UUID `json:"locationIds"`
+ LabelIDs []uuid.UUID `json:"labelIds"`
+ ParentItemIDs []uuid.UUID `json:"parentIds"`
+ SortBy string `json:"sortBy"`
+ IncludeArchived bool `json:"includeArchived"`
+ Fields []FieldQuery `json:"fields"`
+ OrderBy string `json:"orderBy"`
}
ItemField struct {
@@ -36,22 +50,25 @@ type (
TextValue string `json:"textValue"`
NumberValue int `json:"numberValue"`
BooleanValue bool `json:"booleanValue"`
- TimeValue time.Time `json:"timeValue,omitempty"`
+ // TimeValue time.Time `json:"timeValue,omitempty"`
}
ItemCreate struct {
ImportRef string `json:"-"`
- ParentID uuid.UUID `json:"parentId" extensions:"x-nullable"`
- Name string `json:"name"`
- Description string `json:"description"`
+ ParentID uuid.UUID `json:"parentId" extensions:"x-nullable"`
+ Name string `json:"name" validate:"required,min=1,max=255"`
+ Description string `json:"description" validate:"max=1000"`
+ AssetID AssetID `json:"-"`
// Edges
LocationID uuid.UUID `json:"locationId"`
LabelIDs []uuid.UUID `json:"labelIds"`
}
+
ItemUpdate struct {
- ParentID uuid.UUID `json:"parentId" extensions:"x-nullable,x-omitempty"`
+ ParentID uuid.UUID `json:"parentId" extensions:"x-nullable,x-omitempty"`
ID uuid.UUID `json:"id"`
+ AssetID AssetID `json:"assetId" swaggertype:"string"`
Name string `json:"name"`
Description string `json:"description"`
Quantity int `json:"quantity"`
@@ -68,26 +85,32 @@ type (
Manufacturer string `json:"manufacturer"`
// Warranty
- LifetimeWarranty bool `json:"lifetimeWarranty"`
- WarrantyExpires time.Time `json:"warrantyExpires"`
- WarrantyDetails string `json:"warrantyDetails"`
+ LifetimeWarranty bool `json:"lifetimeWarranty"`
+ WarrantyExpires types.Date `json:"warrantyExpires"`
+ WarrantyDetails string `json:"warrantyDetails"`
// Purchase
- PurchaseTime time.Time `json:"purchaseTime"`
- PurchaseFrom string `json:"purchaseFrom"`
- PurchasePrice float64 `json:"purchasePrice,string"`
+ PurchaseTime types.Date `json:"purchaseTime"`
+ PurchaseFrom string `json:"purchaseFrom"`
+ PurchasePrice float64 `json:"purchasePrice,string"`
// Sold
- SoldTime time.Time `json:"soldTime"`
- SoldTo string `json:"soldTo"`
- SoldPrice float64 `json:"soldPrice,string"`
- SoldNotes string `json:"soldNotes"`
+ SoldTime types.Date `json:"soldTime"`
+ SoldTo string `json:"soldTo"`
+ SoldPrice float64 `json:"soldPrice,string"`
+ SoldNotes string `json:"soldNotes"`
// Extras
Notes string `json:"notes"`
Fields []ItemField `json:"fields"`
}
+ ItemPatch struct {
+ ID uuid.UUID `json:"id"`
+ Quantity *int `json:"quantity,omitempty" extensions:"x-nullable,x-omitempty"`
+ ImportRef *string `json:"-,omitempty" extensions:"x-nullable,x-omitempty"`
+ }
+
ItemSummary struct {
ImportRef string `json:"-"`
ID uuid.UUID `json:"id"`
@@ -99,47 +122,48 @@ type (
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
+ PurchasePrice float64 `json:"purchasePrice,string"`
+
// Edges
Location *LocationSummary `json:"location,omitempty" extensions:"x-nullable,x-omitempty"`
Labels []LabelSummary `json:"labels"`
+
+ ImageID *uuid.UUID `json:"imageId,omitempty"`
}
ItemOut struct {
Parent *ItemSummary `json:"parent,omitempty" extensions:"x-nullable,x-omitempty"`
ItemSummary
+ AssetID AssetID `json:"assetId,string"`
SerialNumber string `json:"serialNumber"`
ModelNumber string `json:"modelNumber"`
Manufacturer string `json:"manufacturer"`
// Warranty
- LifetimeWarranty bool `json:"lifetimeWarranty"`
- WarrantyExpires time.Time `json:"warrantyExpires"`
- WarrantyDetails string `json:"warrantyDetails"`
+ LifetimeWarranty bool `json:"lifetimeWarranty"`
+ WarrantyExpires types.Date `json:"warrantyExpires"`
+ WarrantyDetails string `json:"warrantyDetails"`
// Purchase
- PurchaseTime time.Time `json:"purchaseTime"`
- PurchaseFrom string `json:"purchaseFrom"`
- PurchasePrice float64 `json:"purchasePrice,string"`
+ PurchaseTime types.Date `json:"purchaseTime"`
+ PurchaseFrom string `json:"purchaseFrom"`
// Sold
- SoldTime time.Time `json:"soldTime"`
- SoldTo string `json:"soldTo"`
- SoldPrice float64 `json:"soldPrice,string"`
- SoldNotes string `json:"soldNotes"`
+ SoldTime types.Date `json:"soldTime"`
+ SoldTo string `json:"soldTo"`
+ SoldPrice float64 `json:"soldPrice,string"`
+ SoldNotes string `json:"soldNotes"`
// Extras
Notes string `json:"notes"`
Attachments []ItemAttachment `json:"attachments"`
Fields []ItemField `json:"fields"`
- Children []ItemSummary `json:"children"`
}
)
-var (
- mapItemsSummaryErr = mapTEachErrFunc(mapItemSummary)
-)
+var mapItemsSummaryErr = mapTEachErrFunc(mapItemSummary)
func mapItemSummary(item *ent.Item) ItemSummary {
var location *LocationSummary
@@ -153,14 +177,26 @@ func mapItemSummary(item *ent.Item) ItemSummary {
labels = mapEach(item.Edges.Label, mapLabelSummary)
}
+ var imageID *uuid.UUID
+ if item.Edges.Attachments != nil {
+ for _, a := range item.Edges.Attachments {
+ if a.Primary && a.Edges.Document != nil {
+ imageID = &a.ID
+ break
+ }
+ }
+ }
+
return ItemSummary{
- ID: item.ID,
- Name: item.Name,
- Description: item.Description,
- Quantity: item.Quantity,
- CreatedAt: item.CreatedAt,
- UpdatedAt: item.UpdatedAt,
- Archived: item.Archived,
+ ID: item.ID,
+ Name: item.Name,
+ Description: item.Description,
+ ImportRef: item.ImportRef,
+ Quantity: item.Quantity,
+ CreatedAt: item.CreatedAt,
+ UpdatedAt: item.UpdatedAt,
+ Archived: item.Archived,
+ PurchasePrice: item.PurchasePrice,
// Edges
Location: location,
@@ -168,11 +204,13 @@ func mapItemSummary(item *ent.Item) ItemSummary {
// Warranty
Insured: item.Insured,
+ ImageID: imageID,
}
}
var (
- mapItemOutErr = mapTErrFunc(mapItemOut)
+ mapItemOutErr = mapTErrFunc(mapItemOut)
+ mapItemsOutErr = mapTEachErrFunc(mapItemOut)
)
func mapFields(fields []*ent.ItemField) []ItemField {
@@ -185,7 +223,7 @@ func mapFields(fields []*ent.ItemField) []ItemField {
TextValue: f.TextValue,
NumberValue: f.NumberValue,
BooleanValue: f.BooleanValue,
- TimeValue: f.TimeValue,
+ // TimeValue: f.TimeValue,
}
}
return result
@@ -202,11 +240,6 @@ func mapItemOut(item *ent.Item) ItemOut {
fields = mapFields(item.Edges.Fields)
}
- var children []ItemSummary
- if item.Edges.Children != nil {
- children = mapEach(item.Edges.Children, mapItemSummary)
- }
-
var parent *ItemSummary
if item.Edges.Parent != nil {
v := mapItemSummary(item.Edges.Parent)
@@ -215,9 +248,10 @@ func mapItemOut(item *ent.Item) ItemOut {
return ItemOut{
Parent: parent,
+ AssetID: AssetID(item.AssetID),
ItemSummary: mapItemSummary(item),
LifetimeWarranty: item.LifetimeWarranty,
- WarrantyExpires: item.WarrantyExpires,
+ WarrantyExpires: types.DateFromTime(item.WarrantyExpires),
WarrantyDetails: item.WarrantyDetails,
// Identification
@@ -226,12 +260,11 @@ func mapItemOut(item *ent.Item) ItemOut {
Manufacturer: item.Manufacturer,
// Purchase
- PurchaseTime: item.PurchaseTime,
- PurchaseFrom: item.PurchaseFrom,
- PurchasePrice: item.PurchasePrice,
+ PurchaseTime: types.DateFromTime(item.PurchaseTime),
+ PurchaseFrom: item.PurchaseFrom,
// Sold
- SoldTime: item.SoldTime,
+ SoldTime: types.DateFromTime(item.SoldTime),
SoldTo: item.SoldTo,
SoldPrice: item.SoldPrice,
SoldNotes: item.SoldNotes,
@@ -240,7 +273,12 @@ func mapItemOut(item *ent.Item) ItemOut {
Notes: item.Notes,
Attachments: attachments,
Fields: fields,
- Children: children,
+ }
+}
+
+func (e *ItemsRepository) publishMutationEvent(GID uuid.UUID) {
+ if e.bus != nil {
+ e.bus.Publish(eventbus.EventItemMutation, eventbus.GroupMutationEvent{GID: GID})
}
}
@@ -252,7 +290,6 @@ func (e *ItemsRepository) getOne(ctx context.Context, where ...predicate.Item) (
WithLabel().
WithLocation().
WithGroup().
- WithChildren().
WithParent().
WithAttachments(func(aq *ent.AttachmentQuery) {
aq.WithDocument()
@@ -272,6 +309,10 @@ func (e *ItemsRepository) CheckRef(ctx context.Context, GID uuid.UUID, ref strin
return q.Where(item.ImportRef(ref)).Exist(ctx)
}
+func (e *ItemsRepository) GetByRef(ctx context.Context, GID uuid.UUID, ref string) (ItemOut, error) {
+ return e.getOne(ctx, item.ImportRef(ref), item.HasGroupWith(group.ID(GID)))
+}
+
// GetOneByGroup returns a single item by ID. If the item does not exist, an error is returned.
// GetOneByGroup ensures that the item belongs to a specific group.
func (e *ItemsRepository) GetOneByGroup(ctx context.Context, gid, id uuid.UUID) (ItemOut, error) {
@@ -295,37 +336,132 @@ func (e *ItemsRepository) QueryByGroup(ctx context.Context, gid uuid.UUID, q Ite
qb = qb.Where(item.Archived(false))
}
- if len(q.LabelIDs) > 0 {
- labels := make([]predicate.Item, 0, len(q.LabelIDs))
- for _, l := range q.LabelIDs {
- labels = append(labels, item.HasLabelWith(label.ID(l)))
- }
- qb = qb.Where(item.Or(labels...))
- }
-
- if len(q.LocationIDs) > 0 {
- locations := make([]predicate.Item, 0, len(q.LocationIDs))
- for _, l := range q.LocationIDs {
- locations = append(locations, item.HasLocationWith(location.ID(l)))
- }
- qb = qb.Where(item.Or(locations...))
- }
-
if q.Search != "" {
qb.Where(
item.Or(
item.NameContainsFold(q.Search),
item.DescriptionContainsFold(q.Search),
+ item.SerialNumberContainsFold(q.Search),
+ item.ModelNumberContainsFold(q.Search),
+ item.ManufacturerContainsFold(q.Search),
+ item.NotesContainsFold(q.Search),
),
)
}
+ if !q.AssetID.Nil() {
+ qb = qb.Where(item.AssetID(q.AssetID.Int()))
+ }
+
+ // Filters within this block define a AND relationship where each subset
+ // of filters is OR'd together.
+ //
+ // The goal is to allow matches like where the item has
+ // - one of the selected labels AND
+ // - one of the selected locations AND
+ // - one of the selected fields key/value matches
+ var andPredicates []predicate.Item
+ {
+ if len(q.LabelIDs) > 0 {
+ labelPredicates := make([]predicate.Item, 0, len(q.LabelIDs))
+ for _, l := range q.LabelIDs {
+ labelPredicates = append(labelPredicates, item.HasLabelWith(label.ID(l)))
+ }
+
+ andPredicates = append(andPredicates, item.Or(labelPredicates...))
+ }
+
+ if len(q.LocationIDs) > 0 {
+ locationPredicates := make([]predicate.Item, 0, len(q.LocationIDs))
+ for _, l := range q.LocationIDs {
+ locationPredicates = append(locationPredicates, item.HasLocationWith(location.ID(l)))
+ }
+
+ andPredicates = append(andPredicates, item.Or(locationPredicates...))
+ }
+
+ if len(q.Fields) > 0 {
+ fieldPredicates := make([]predicate.Item, 0, len(q.Fields))
+ for _, f := range q.Fields {
+ fieldPredicates = append(fieldPredicates, item.HasFieldsWith(
+ itemfield.And(
+ itemfield.Name(f.Name),
+ itemfield.TextValue(f.Value),
+ ),
+ ))
+ }
+
+ andPredicates = append(andPredicates, item.Or(fieldPredicates...))
+ }
+
+ if len(q.ParentItemIDs) > 0 {
+ andPredicates = append(andPredicates, item.HasParentWith(item.IDIn(q.ParentItemIDs...)))
+ }
+ }
+
+ if len(andPredicates) > 0 {
+ qb = qb.Where(item.And(andPredicates...))
+ }
+
+ count, err := qb.Count(ctx)
+ if err != nil {
+ return PaginationResult[ItemSummary]{}, err
+ }
+
+ // Order
+ switch q.OrderBy {
+ case "createdAt":
+ qb = qb.Order(ent.Desc(item.FieldCreatedAt))
+ case "updatedAt":
+ qb = qb.Order(ent.Desc(item.FieldUpdatedAt))
+ default: // "name"
+ qb = qb.Order(ent.Asc(item.FieldName))
+ }
+
+ qb = qb.
+ WithLabel().
+ WithLocation().
+ WithAttachments(func(aq *ent.AttachmentQuery) {
+ aq.Where(
+ attachment.Primary(true),
+ ).
+ WithDocument()
+ })
+
if q.Page != -1 || q.PageSize != -1 {
qb = qb.
Offset(calculateOffset(q.Page, q.PageSize)).
Limit(q.PageSize)
}
+ items, err := mapItemsSummaryErr(qb.All(ctx))
+ if err != nil {
+ return PaginationResult[ItemSummary]{}, err
+ }
+
+ return PaginationResult[ItemSummary]{
+ Page: q.Page,
+ PageSize: q.PageSize,
+ Total: count,
+ Items: items,
+ }, nil
+}
+
+// QueryByAssetID returns items by asset ID. If the item does not exist, an error is returned.
+func (e *ItemsRepository) QueryByAssetID(ctx context.Context, gid uuid.UUID, assetID AssetID, page int, pageSize int) (PaginationResult[ItemSummary], error) {
+ qb := e.db.Item.Query().Where(
+ item.HasGroupWith(group.ID(gid)),
+ item.AssetID(int(assetID)),
+ )
+
+ if page != -1 || pageSize != -1 {
+ qb.Offset(calculateOffset(page, pageSize)).
+ Limit(pageSize)
+ } else {
+ page = -1
+ pageSize = -1
+ }
+
items, err := mapItemsSummaryErr(
qb.Order(ent.Asc(item.FieldName)).
WithLabel().
@@ -336,36 +472,71 @@ func (e *ItemsRepository) QueryByGroup(ctx context.Context, gid uuid.UUID, q Ite
return PaginationResult[ItemSummary]{}, err
}
- count, err := qb.Count(ctx)
- if err != nil {
- return PaginationResult[ItemSummary]{}, err
- }
-
return PaginationResult[ItemSummary]{
- Page: q.Page,
- PageSize: q.PageSize,
- Total: count,
+ Page: page,
+ PageSize: pageSize,
+ Total: len(items),
Items: items,
}, nil
-
}
// GetAll returns all the items in the database with the Labels and Locations eager loaded.
-func (e *ItemsRepository) GetAll(ctx context.Context, gid uuid.UUID) ([]ItemSummary, error) {
- return mapItemsSummaryErr(e.db.Item.Query().
+func (e *ItemsRepository) GetAll(ctx context.Context, gid uuid.UUID) ([]ItemOut, error) {
+ return mapItemsOutErr(e.db.Item.Query().
Where(item.HasGroupWith(group.ID(gid))).
WithLabel().
WithLocation().
+ WithFields().
All(ctx))
}
+func (e *ItemsRepository) GetAllZeroAssetID(ctx context.Context, GID uuid.UUID) ([]ItemSummary, error) {
+ q := e.db.Item.Query().Where(
+ item.HasGroupWith(group.ID(GID)),
+ item.AssetID(0),
+ ).Order(
+ ent.Asc(item.FieldCreatedAt),
+ )
+
+ return mapItemsSummaryErr(q.All(ctx))
+}
+
+func (e *ItemsRepository) GetHighestAssetID(ctx context.Context, GID uuid.UUID) (AssetID, error) {
+ q := e.db.Item.Query().Where(
+ item.HasGroupWith(group.ID(GID)),
+ ).Order(
+ ent.Desc(item.FieldAssetID),
+ ).Limit(1)
+
+ result, err := q.First(ctx)
+ if err != nil {
+ if ent.IsNotFound(err) {
+ return 0, nil
+ }
+ return 0, err
+ }
+
+ return AssetID(result.AssetID), nil
+}
+
+func (e *ItemsRepository) SetAssetID(ctx context.Context, GID uuid.UUID, ID uuid.UUID, assetID AssetID) error {
+ q := e.db.Item.Update().Where(
+ item.HasGroupWith(group.ID(GID)),
+ item.ID(ID),
+ )
+
+ _, err := q.SetAssetID(int(assetID)).Save(ctx)
+ return err
+}
+
func (e *ItemsRepository) Create(ctx context.Context, gid uuid.UUID, data ItemCreate) (ItemOut, error) {
q := e.db.Item.Create().
SetImportRef(data.ImportRef).
SetName(data.Name).
SetDescription(data.Description).
SetGroupID(gid).
- SetLocationID(data.LocationID)
+ SetLocationID(data.LocationID).
+ SetAssetID(int(data.AssetID))
if data.LabelIDs != nil && len(data.LabelIDs) > 0 {
q.AddLabelIDs(data.LabelIDs...)
@@ -376,11 +547,18 @@ func (e *ItemsRepository) Create(ctx context.Context, gid uuid.UUID, data ItemCr
return ItemOut{}, err
}
+ e.publishMutationEvent(gid)
return e.GetOne(ctx, result.ID)
}
func (e *ItemsRepository) Delete(ctx context.Context, id uuid.UUID) error {
- return e.db.Item.DeleteOneID(id).Exec(ctx)
+ err := e.db.Item.DeleteOneID(id).Exec(ctx)
+ if err != nil {
+ return err
+ }
+
+ e.publishMutationEvent(id)
+ return nil
}
func (e *ItemsRepository) DeleteByGroup(ctx context.Context, gid, id uuid.UUID) error {
@@ -390,11 +568,16 @@ func (e *ItemsRepository) DeleteByGroup(ctx context.Context, gid, id uuid.UUID)
item.ID(id),
item.HasGroupWith(group.ID(gid)),
).Exec(ctx)
+ if err != nil {
+ return err
+ }
+
+ e.publishMutationEvent(gid)
return err
}
-func (e *ItemsRepository) UpdateByGroup(ctx context.Context, gid uuid.UUID, data ItemUpdate) (ItemOut, error) {
- q := e.db.Item.Update().Where(item.ID(data.ID), item.HasGroupWith(group.ID(gid))).
+func (e *ItemsRepository) UpdateByGroup(ctx context.Context, GID uuid.UUID, data ItemUpdate) (ItemOut, error) {
+ q := e.db.Item.Update().Where(item.ID(data.ID), item.HasGroupWith(group.ID(GID))).
SetName(data.Name).
SetDescription(data.Description).
SetLocationID(data.LocationID).
@@ -402,19 +585,20 @@ func (e *ItemsRepository) UpdateByGroup(ctx context.Context, gid uuid.UUID, data
SetModelNumber(data.ModelNumber).
SetManufacturer(data.Manufacturer).
SetArchived(data.Archived).
- SetPurchaseTime(data.PurchaseTime).
+ SetPurchaseTime(data.PurchaseTime.Time()).
SetPurchaseFrom(data.PurchaseFrom).
SetPurchasePrice(data.PurchasePrice).
- SetSoldTime(data.SoldTime).
+ SetSoldTime(data.SoldTime.Time()).
SetSoldTo(data.SoldTo).
SetSoldPrice(data.SoldPrice).
SetSoldNotes(data.SoldNotes).
SetNotes(data.Notes).
SetLifetimeWarranty(data.LifetimeWarranty).
SetInsured(data.Insured).
- SetWarrantyExpires(data.WarrantyExpires).
+ SetWarrantyExpires(data.WarrantyExpires.Time()).
SetWarrantyDetails(data.WarrantyDetails).
- SetQuantity(data.Quantity)
+ SetQuantity(data.Quantity).
+ SetAssetID(int(data.AssetID))
currentLabels, err := e.db.Item.Query().Where(item.ID(data.ID)).QueryLabel().All(ctx)
if err != nil {
@@ -464,7 +648,7 @@ func (e *ItemsRepository) UpdateByGroup(ctx context.Context, gid uuid.UUID, data
SetTextValue(f.TextValue).
SetNumberValue(f.NumberValue).
SetBooleanValue(f.BooleanValue).
- SetTimeValue(f.TimeValue).
+ // SetTimeValue(f.TimeValue).
Save(ctx)
if err != nil {
return ItemOut{}, err
@@ -480,8 +664,8 @@ func (e *ItemsRepository) UpdateByGroup(ctx context.Context, gid uuid.UUID, data
SetName(f.Name).
SetTextValue(f.TextValue).
SetNumberValue(f.NumberValue).
- SetBooleanValue(f.BooleanValue).
- SetTimeValue(f.TimeValue)
+ SetBooleanValue(f.BooleanValue)
+ // SetTimeValue(f.TimeValue)
_, err = opt.Save(ctx)
if err != nil {
@@ -504,5 +688,227 @@ func (e *ItemsRepository) UpdateByGroup(ctx context.Context, gid uuid.UUID, data
}
}
+ e.publishMutationEvent(GID)
return e.GetOne(ctx, data.ID)
}
+
+func (e *ItemsRepository) GetAllZeroImportRef(ctx context.Context, GID uuid.UUID) ([]uuid.UUID, error) {
+ var ids []uuid.UUID
+
+ err := e.db.Item.Query().
+ Where(
+ item.HasGroupWith(group.ID(GID)),
+ item.Or(
+ item.ImportRefEQ(""),
+ item.ImportRefIsNil(),
+ ),
+ ).
+ Select(item.FieldID).
+ Scan(ctx, &ids)
+ if err != nil {
+ return nil, err
+ }
+
+ return ids, nil
+}
+
+func (e *ItemsRepository) Patch(ctx context.Context, GID, ID uuid.UUID, data ItemPatch) error {
+ q := e.db.Item.Update().
+ Where(
+ item.ID(ID),
+ item.HasGroupWith(group.ID(GID)),
+ )
+
+ if data.ImportRef != nil {
+ q.SetImportRef(*data.ImportRef)
+ }
+
+ if data.Quantity != nil {
+ q.SetQuantity(*data.Quantity)
+ }
+
+ e.publishMutationEvent(GID)
+ return q.Exec(ctx)
+}
+
+func (e *ItemsRepository) GetAllCustomFieldValues(ctx context.Context, GID uuid.UUID, name string) ([]string, error) {
+ type st struct {
+ Value string `json:"text_value"`
+ }
+
+ var values []st
+
+ err := e.db.Item.Query().
+ Where(
+ item.HasGroupWith(group.ID(GID)),
+ ).
+ QueryFields().
+ Where(
+ itemfield.Name(name),
+ ).
+ Unique(true).
+ Select(itemfield.FieldTextValue).
+ Scan(ctx, &values)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get field values: %w", err)
+ }
+
+ valueStrings := make([]string, len(values))
+ for i, f := range values {
+ valueStrings[i] = f.Value
+ }
+
+ return valueStrings, nil
+}
+
+func (e *ItemsRepository) GetAllCustomFieldNames(ctx context.Context, GID uuid.UUID) ([]string, error) {
+ type st struct {
+ Name string `json:"name"`
+ }
+
+ var fields []st
+
+ err := e.db.Item.Query().
+ Where(
+ item.HasGroupWith(group.ID(GID)),
+ ).
+ QueryFields().
+ Unique(true).
+ Select(itemfield.FieldName).
+ Scan(ctx, &fields)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get custom fields: %w", err)
+ }
+
+ fieldNames := make([]string, len(fields))
+ for i, f := range fields {
+ fieldNames[i] = f.Name
+ }
+
+ return fieldNames, nil
+}
+
+// ZeroOutTimeFields is a helper function that can be invoked via the UI by a group member which will
+// set all date fields to the beginning of the day.
+//
+// This is designed to resolve a long-time bug that has since been fixed with the time selector on the
+// frontend. This function is intended to be used as a one-time fix for existing databases and may be
+// removed in the future.
+func (e *ItemsRepository) ZeroOutTimeFields(ctx context.Context, GID uuid.UUID) (int, error) {
+ q := e.db.Item.Query().Where(
+ item.HasGroupWith(group.ID(GID)),
+ item.Or(
+ item.PurchaseTimeNotNil(),
+ item.PurchaseFromLT("0002-01-01"),
+ item.SoldTimeNotNil(),
+ item.SoldToLT("0002-01-01"),
+ item.WarrantyExpiresNotNil(),
+ item.WarrantyDetailsLT("0002-01-01"),
+ ),
+ )
+
+ items, err := q.All(ctx)
+ if err != nil {
+ return -1, fmt.Errorf("ZeroOutTimeFields() -> failed to get items: %w", err)
+ }
+
+ toDateOnly := func(t time.Time) time.Time {
+ return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
+ }
+
+ updated := 0
+
+ for _, i := range items {
+ updateQ := e.db.Item.Update().Where(item.ID(i.ID))
+
+ if !i.PurchaseTime.IsZero() {
+ switch {
+ case i.PurchaseTime.Year() < 100:
+ updateQ.ClearPurchaseTime()
+ default:
+ updateQ.SetPurchaseTime(toDateOnly(i.PurchaseTime))
+ }
+ } else {
+ updateQ.ClearPurchaseTime()
+ }
+
+ if !i.SoldTime.IsZero() {
+ switch {
+ case i.SoldTime.Year() < 100:
+ updateQ.ClearSoldTime()
+ default:
+ updateQ.SetSoldTime(toDateOnly(i.SoldTime))
+ }
+ } else {
+ updateQ.ClearSoldTime()
+ }
+
+ if !i.WarrantyExpires.IsZero() {
+ switch {
+ case i.WarrantyExpires.Year() < 100:
+ updateQ.ClearWarrantyExpires()
+ default:
+ updateQ.SetWarrantyExpires(toDateOnly(i.WarrantyExpires))
+ }
+ } else {
+ updateQ.ClearWarrantyExpires()
+ }
+
+ _, err = updateQ.Save(ctx)
+ if err != nil {
+ return updated, fmt.Errorf("ZeroOutTimeFields() -> failed to update item: %w", err)
+ }
+
+ updated++
+ }
+
+ return updated, nil
+}
+
+func (e *ItemsRepository) SetPrimaryPhotos(ctx context.Context, GID uuid.UUID) (int, error) {
+ // All items where there is no primary photo
+ itemIDs, err := e.db.Item.Query().
+ Where(
+ item.HasGroupWith(group.ID(GID)),
+ item.HasAttachmentsWith(
+ attachment.TypeEQ(attachment.TypePhoto),
+ attachment.Not(
+ attachment.And(
+ attachment.Primary(true),
+ attachment.TypeEQ(attachment.TypePhoto),
+ ),
+ ),
+ ),
+ ).
+ IDs(ctx)
+ if err != nil {
+ return -1, err
+ }
+
+ updated := 0
+ for _, id := range itemIDs {
+ // Find the first photo attachment
+ a, err := e.db.Attachment.Query().
+ Where(
+ attachment.HasItemWith(item.ID(id)),
+ attachment.TypeEQ(attachment.TypePhoto),
+ attachment.Primary(false),
+ ).
+ First(ctx)
+ if err != nil {
+ return updated, err
+ }
+
+ // Set it as primary
+ _, err = e.db.Attachment.UpdateOne(a).
+ SetPrimary(true).
+ Save(ctx)
+ if err != nil {
+ return updated, err
+ }
+
+ updated++
+ }
+
+ return updated, nil
+}
diff --git a/backend/internal/data/repo/repo_items_test.go b/backend/internal/data/repo/repo_items_test.go
index 4b958b0..9d60596 100644
--- a/backend/internal/data/repo/repo_items_test.go
+++ b/backend/internal/data/repo/repo_items_test.go
@@ -6,7 +6,9 @@ import (
"time"
"github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/types"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func itemFactory() ItemCreate {
@@ -20,7 +22,7 @@ func useItems(t *testing.T, len int) []ItemOut {
t.Helper()
location, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory())
- assert.NoError(t, err)
+ require.NoError(t, err)
items := make([]ItemOut, len)
for i := 0; i < len; i++ {
@@ -28,7 +30,7 @@ func useItems(t *testing.T, len int) []ItemOut {
itm.LocationID = location.ID
item, err := tRepos.Items.Create(context.Background(), tGroup.ID, itm)
- assert.NoError(t, err)
+ require.NoError(t, err)
items[i] = item
}
@@ -36,6 +38,8 @@ func useItems(t *testing.T, len int) []ItemOut {
for _, item := range items {
_ = tRepos.Items.Delete(context.Background(), item.ID)
}
+
+ _ = tRepos.Locations.delete(context.Background(), location.ID)
})
return items
@@ -57,23 +61,22 @@ func TestItemsRepository_RecursiveRelationships(t *testing.T) {
// Append Parent ID
_, err := tRepos.Items.UpdateByGroup(context.Background(), tGroup.ID, update)
- assert.NoError(t, err)
+ require.NoError(t, err)
// Check Parent ID
updated, err := tRepos.Items.GetOne(context.Background(), child.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, parent.ID, updated.Parent.ID)
// Remove Parent ID
update.ParentID = uuid.Nil
_, err = tRepos.Items.UpdateByGroup(context.Background(), tGroup.ID, update)
- assert.NoError(t, err)
+ require.NoError(t, err)
// Check Parent ID
updated, err = tRepos.Items.GetOne(context.Background(), child.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Nil(t, updated.Parent)
-
}
}
@@ -82,7 +85,7 @@ func TestItemsRepository_GetOne(t *testing.T) {
for _, item := range entity {
result, err := tRepos.Items.GetOne(context.Background(), item.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, item.ID, result.ID)
}
}
@@ -92,9 +95,9 @@ func TestItemsRepository_GetAll(t *testing.T) {
expected := useItems(t, length)
results, err := tRepos.Items.GetAll(context.Background(), tGroup.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
- assert.Equal(t, length, len(results))
+ assert.Len(t, results, length)
for _, item := range results {
for _, expectedItem := range expected {
@@ -109,24 +112,23 @@ func TestItemsRepository_GetAll(t *testing.T) {
func TestItemsRepository_Create(t *testing.T) {
location, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory())
- assert.NoError(t, err)
+ require.NoError(t, err)
itm := itemFactory()
itm.LocationID = location.ID
result, err := tRepos.Items.Create(context.Background(), tGroup.ID, itm)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.NotEmpty(t, result.ID)
// Cleanup - Also deletes item
- err = tRepos.Locations.Delete(context.Background(), location.ID)
- assert.NoError(t, err)
-
+ err = tRepos.Locations.delete(context.Background(), location.ID)
+ require.NoError(t, err)
}
func TestItemsRepository_Create_Location(t *testing.T) {
location, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory())
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.NotEmpty(t, location.ID)
item := itemFactory()
@@ -134,18 +136,18 @@ func TestItemsRepository_Create_Location(t *testing.T) {
// Create Resource
result, err := tRepos.Items.Create(context.Background(), tGroup.ID, item)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.NotEmpty(t, result.ID)
// Get Resource
foundItem, err := tRepos.Items.GetOne(context.Background(), result.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, result.ID, foundItem.ID)
assert.Equal(t, location.ID, foundItem.Location.ID)
// Cleanup - Also deletes item
- err = tRepos.Locations.Delete(context.Background(), location.ID)
- assert.NoError(t, err)
+ err = tRepos.Locations.delete(context.Background(), location.ID)
+ require.NoError(t, err)
}
func TestItemsRepository_Delete(t *testing.T) {
@@ -153,11 +155,11 @@ func TestItemsRepository_Delete(t *testing.T) {
for _, item := range entities {
err := tRepos.Items.Delete(context.Background(), item.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
}
results, err := tRepos.Items.GetAll(context.Background(), tGroup.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Empty(t, results)
}
@@ -210,7 +212,7 @@ func TestItemsRepository_Update_Labels(t *testing.T) {
}
updated, err := tRepos.Items.UpdateByGroup(context.Background(), tGroup.ID, updateData)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Len(t, tt.want, len(updated.Labels))
for _, label := range updated.Labels {
@@ -218,7 +220,6 @@ func TestItemsRepository_Update_Labels(t *testing.T) {
}
})
}
-
}
func TestItemsRepository_Update(t *testing.T) {
@@ -234,24 +235,24 @@ func TestItemsRepository_Update(t *testing.T) {
LabelIDs: nil,
ModelNumber: fk.Str(10),
Manufacturer: fk.Str(10),
- PurchaseTime: time.Now(),
+ PurchaseTime: types.DateFromTime(time.Now()),
PurchaseFrom: fk.Str(10),
PurchasePrice: 300.99,
- SoldTime: time.Now(),
+ SoldTime: types.DateFromTime(time.Now()),
SoldTo: fk.Str(10),
SoldPrice: 300.99,
SoldNotes: fk.Str(10),
Notes: fk.Str(10),
- WarrantyExpires: time.Now(),
+ WarrantyExpires: types.DateFromTime(time.Now()),
WarrantyDetails: fk.Str(10),
LifetimeWarranty: true,
}
updatedEntity, err := tRepos.Items.UpdateByGroup(context.Background(), tGroup.ID, updateData)
- assert.NoError(t, err)
+ require.NoError(t, err)
got, err := tRepos.Items.GetOne(context.Background(), updatedEntity.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, updateData.ID, got.ID)
assert.Equal(t, updateData.Name, got.Name)
@@ -261,13 +262,58 @@ func TestItemsRepository_Update(t *testing.T) {
assert.Equal(t, updateData.Manufacturer, got.Manufacturer)
// assert.Equal(t, updateData.PurchaseTime, got.PurchaseTime)
assert.Equal(t, updateData.PurchaseFrom, got.PurchaseFrom)
- assert.Equal(t, updateData.PurchasePrice, got.PurchasePrice)
+ assert.InDelta(t, updateData.PurchasePrice, got.PurchasePrice, 0.01)
// assert.Equal(t, updateData.SoldTime, got.SoldTime)
assert.Equal(t, updateData.SoldTo, got.SoldTo)
- assert.Equal(t, updateData.SoldPrice, got.SoldPrice)
+ assert.InDelta(t, updateData.SoldPrice, got.SoldPrice, 0.01)
assert.Equal(t, updateData.SoldNotes, got.SoldNotes)
assert.Equal(t, updateData.Notes, got.Notes)
// assert.Equal(t, updateData.WarrantyExpires, got.WarrantyExpires)
assert.Equal(t, updateData.WarrantyDetails, got.WarrantyDetails)
assert.Equal(t, updateData.LifetimeWarranty, got.LifetimeWarranty)
}
+
+func TestItemRepository_GetAllCustomFields(t *testing.T) {
+ const FieldsCount = 5
+
+ entity := useItems(t, 1)[0]
+
+ fields := make([]ItemField, FieldsCount)
+ names := make([]string, FieldsCount)
+ values := make([]string, FieldsCount)
+
+ for i := 0; i < FieldsCount; i++ {
+ name := fk.Str(10)
+ fields[i] = ItemField{
+ Name: name,
+ Type: "text",
+ TextValue: fk.Str(10),
+ }
+ names[i] = name
+ values[i] = fields[i].TextValue
+ }
+
+ _, err := tRepos.Items.UpdateByGroup(context.Background(), tGroup.ID, ItemUpdate{
+ ID: entity.ID,
+ Name: entity.Name,
+ LocationID: entity.Location.ID,
+ Fields: fields,
+ })
+
+ require.NoError(t, err)
+
+ // Test getting all fields
+ {
+ results, err := tRepos.Items.GetAllCustomFieldNames(context.Background(), tGroup.ID)
+ require.NoError(t, err)
+ assert.ElementsMatch(t, names, results)
+ }
+
+ // Test getting all values from field
+ {
+ results, err := tRepos.Items.GetAllCustomFieldValues(context.Background(), tUser.GroupID, names[0])
+
+ require.NoError(t, err)
+ assert.ElementsMatch(t, values[:1], results)
+ }
+}
diff --git a/backend/internal/data/repo/repo_labels.go b/backend/internal/data/repo/repo_labels.go
index 9d5b11a..2358f9c 100644
--- a/backend/internal/data/repo/repo_labels.go
+++ b/backend/internal/data/repo/repo_labels.go
@@ -5,27 +5,29 @@ import (
"time"
"github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
- "github.com/hay-kot/homebox/backend/internal/data/ent/item"
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
)
type LabelRepository struct {
- db *ent.Client
+ db *ent.Client
+ bus *eventbus.EventBus
}
+
type (
LabelCreate struct {
- Name string `json:"name"`
- Description string `json:"description"`
+ Name string `json:"name" validate:"required,min=1,max=255"`
+ Description string `json:"description" validate:"max=255"`
Color string `json:"color"`
}
LabelUpdate struct {
ID uuid.UUID `json:"id"`
- Name string `json:"name"`
- Description string `json:"description"`
+ Name string `json:"name" validate:"required,min=1,max=255"`
+ Description string `json:"description" validate:"max=255"`
Color string `json:"color"`
}
@@ -39,7 +41,6 @@ type (
LabelOut struct {
LabelSummary
- Items []ItemSummary `json:"items"`
}
)
@@ -61,7 +62,12 @@ var (
func mapLabelOut(label *ent.Label) LabelOut {
return LabelOut{
LabelSummary: mapLabelSummary(label),
- Items: mapEach(label.Edges.Items, mapItemSummary),
+ }
+}
+
+func (r *LabelRepository) publishMutationEvent(GID uuid.UUID) {
+ if r.bus != nil {
+ r.bus.Publish(eventbus.EventLabelMutation, eventbus.GroupMutationEvent{GID: GID})
}
}
@@ -69,9 +75,6 @@ func (r *LabelRepository) getOne(ctx context.Context, where ...predicate.Label)
return mapLabelOutErr(r.db.Label.Query().
Where(where...).
WithGroup().
- WithItems(func(iq *ent.ItemQuery) {
- iq.Where(item.Archived(false))
- }).
Only(ctx),
)
}
@@ -84,28 +87,28 @@ func (r *LabelRepository) GetOneByGroup(ctx context.Context, gid, ld uuid.UUID)
return r.getOne(ctx, label.ID(ld), label.HasGroupWith(group.ID(gid)))
}
-func (r *LabelRepository) GetAll(ctx context.Context, groupId uuid.UUID) ([]LabelSummary, error) {
+func (r *LabelRepository) GetAll(ctx context.Context, groupID uuid.UUID) ([]LabelSummary, error) {
return mapLabelsOut(r.db.Label.Query().
- Where(label.HasGroupWith(group.ID(groupId))).
+ Where(label.HasGroupWith(group.ID(groupID))).
Order(ent.Asc(label.FieldName)).
WithGroup().
All(ctx),
)
}
-func (r *LabelRepository) Create(ctx context.Context, groupdId uuid.UUID, data LabelCreate) (LabelOut, error) {
+func (r *LabelRepository) Create(ctx context.Context, groupID uuid.UUID, data LabelCreate) (LabelOut, error) {
label, err := r.db.Label.Create().
SetName(data.Name).
SetDescription(data.Description).
SetColor(data.Color).
- SetGroupID(groupdId).
+ SetGroupID(groupID).
Save(ctx)
-
if err != nil {
return LabelOut{}, err
}
- label.Edges.Group = &ent.Group{ID: groupdId} // bootstrap group ID
+ label.Edges.Group = &ent.Group{ID: groupID} // bootstrap group ID
+ r.publishMutationEvent(groupID)
return mapLabelOut(label), err
}
@@ -122,25 +125,19 @@ func (r *LabelRepository) update(ctx context.Context, data LabelUpdate, where ..
Save(ctx)
}
-func (r *LabelRepository) Update(ctx context.Context, data LabelUpdate) (LabelOut, error) {
- _, err := r.update(ctx, data, label.ID(data.ID))
- if err != nil {
- return LabelOut{}, err
- }
-
- return r.GetOne(ctx, data.ID)
-}
-
func (r *LabelRepository) UpdateByGroup(ctx context.Context, GID uuid.UUID, data LabelUpdate) (LabelOut, error) {
_, err := r.update(ctx, data, label.ID(data.ID), label.HasGroupWith(group.ID(GID)))
if err != nil {
return LabelOut{}, err
}
+ r.publishMutationEvent(GID)
return r.GetOne(ctx, data.ID)
}
-func (r *LabelRepository) Delete(ctx context.Context, id uuid.UUID) error {
+// delete removes the label from the database. This should only be used when
+// the label's ownership is already confirmed/validated.
+func (r *LabelRepository) delete(ctx context.Context, id uuid.UUID) error {
return r.db.Label.DeleteOneID(id).Exec(ctx)
}
@@ -150,6 +147,11 @@ func (r *LabelRepository) DeleteByGroup(ctx context.Context, gid, id uuid.UUID)
label.ID(id),
label.HasGroupWith(group.ID(gid)),
).Exec(ctx)
+ if err != nil {
+ return err
+ }
- return err
+ r.publishMutationEvent(gid)
+
+ return nil
}
diff --git a/backend/internal/data/repo/repo_labels_test.go b/backend/internal/data/repo/repo_labels_test.go
index 691b915..8b1d66f 100644
--- a/backend/internal/data/repo/repo_labels_test.go
+++ b/backend/internal/data/repo/repo_labels_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func labelFactory() LabelCreate {
@@ -22,13 +23,13 @@ func useLabels(t *testing.T, len int) []LabelOut {
itm := labelFactory()
item, err := tRepos.Labels.Create(context.Background(), tGroup.ID, itm)
- assert.NoError(t, err)
+ require.NoError(t, err)
labels[i] = item
}
t.Cleanup(func() {
for _, item := range labels {
- _ = tRepos.Labels.Delete(context.Background(), item.ID)
+ _ = tRepos.Labels.delete(context.Background(), item.ID)
}
})
@@ -41,7 +42,7 @@ func TestLabelRepository_Get(t *testing.T) {
// Get by ID
foundLoc, err := tRepos.Labels.GetOne(context.Background(), label.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, label.ID, foundLoc.ID)
}
@@ -49,26 +50,26 @@ func TestLabelRepositoryGetAll(t *testing.T) {
useLabels(t, 10)
all, err := tRepos.Labels.GetAll(context.Background(), tGroup.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Len(t, all, 10)
}
func TestLabelRepository_Create(t *testing.T) {
loc, err := tRepos.Labels.Create(context.Background(), tGroup.ID, labelFactory())
- assert.NoError(t, err)
+ require.NoError(t, err)
// Get by ID
foundLoc, err := tRepos.Labels.GetOne(context.Background(), loc.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, loc.ID, foundLoc.ID)
- err = tRepos.Labels.Delete(context.Background(), loc.ID)
- assert.NoError(t, err)
+ err = tRepos.Labels.delete(context.Background(), loc.ID)
+ require.NoError(t, err)
}
func TestLabelRepository_Update(t *testing.T) {
loc, err := tRepos.Labels.Create(context.Background(), tGroup.ID, labelFactory())
- assert.NoError(t, err)
+ require.NoError(t, err)
updateData := LabelUpdate{
ID: loc.ID,
@@ -76,27 +77,27 @@ func TestLabelRepository_Update(t *testing.T) {
Description: fk.Str(100),
}
- update, err := tRepos.Labels.Update(context.Background(), updateData)
- assert.NoError(t, err)
+ update, err := tRepos.Labels.UpdateByGroup(context.Background(), tGroup.ID, updateData)
+ require.NoError(t, err)
foundLoc, err := tRepos.Labels.GetOne(context.Background(), loc.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, update.ID, foundLoc.ID)
assert.Equal(t, update.Name, foundLoc.Name)
assert.Equal(t, update.Description, foundLoc.Description)
- err = tRepos.Labels.Delete(context.Background(), loc.ID)
- assert.NoError(t, err)
+ err = tRepos.Labels.delete(context.Background(), loc.ID)
+ require.NoError(t, err)
}
func TestLabelRepository_Delete(t *testing.T) {
loc, err := tRepos.Labels.Create(context.Background(), tGroup.ID, labelFactory())
- assert.NoError(t, err)
+ require.NoError(t, err)
- err = tRepos.Labels.Delete(context.Background(), loc.ID)
- assert.NoError(t, err)
+ err = tRepos.Labels.delete(context.Background(), loc.ID)
+ require.NoError(t, err)
_, err = tRepos.Labels.GetOne(context.Background(), loc.ID)
- assert.Error(t, err)
+ require.Error(t, err)
}
diff --git a/backend/internal/data/repo/repo_locations.go b/backend/internal/data/repo/repo_locations.go
index f7d63eb..fd98fd7 100644
--- a/backend/internal/data/repo/repo_locations.go
+++ b/backend/internal/data/repo/repo_locations.go
@@ -2,28 +2,31 @@ package repo
import (
"context"
+ "strings"
"time"
"github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
- "github.com/hay-kot/homebox/backend/internal/data/ent/item"
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
)
type LocationRepository struct {
- db *ent.Client
+ db *ent.Client
+ bus *eventbus.EventBus
}
type (
LocationCreate struct {
- Name string `json:"name"`
- Description string `json:"description"`
+ Name string `json:"name"`
+ ParentID uuid.UUID `json:"parentId" extensions:"x-nullable"`
+ Description string `json:"description"`
}
LocationUpdate struct {
- ParentID uuid.UUID `json:"parentId" extensions:"x-nullable"`
+ ParentID uuid.UUID `json:"parentId" extensions:"x-nullable"`
ID uuid.UUID `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
@@ -45,7 +48,6 @@ type (
LocationOut struct {
Parent *LocationSummary `json:"parent,omitempty"`
LocationSummary
- Items []ItemSummary `json:"items"`
Children []LocationSummary `json:"children"`
}
)
@@ -60,9 +62,7 @@ func mapLocationSummary(location *ent.Location) LocationSummary {
}
}
-var (
- mapLocationOutErr = mapTErrFunc(mapLocationOut)
-)
+var mapLocationOutErr = mapTErrFunc(mapLocationOut)
func mapLocationOut(location *ent.Location) LocationOut {
var parent *LocationSummary
@@ -86,12 +86,21 @@ func mapLocationOut(location *ent.Location) LocationOut {
CreatedAt: location.CreatedAt,
UpdatedAt: location.UpdatedAt,
},
- Items: mapEach(location.Edges.Items, mapItemSummary),
}
}
-// GetALlWithCount returns all locations with item count field populated
-func (r *LocationRepository) GetAll(ctx context.Context, groupId uuid.UUID) ([]LocationOutCount, error) {
+func (r *LocationRepository) publishMutationEvent(GID uuid.UUID) {
+ if r.bus != nil {
+ r.bus.Publish(eventbus.EventLocationMutation, eventbus.GroupMutationEvent{GID: GID})
+ }
+}
+
+type LocationQuery struct {
+ FilterChildren bool `json:"filterChildren" schema:"filterChildren"`
+}
+
+// GetAll returns all locations with item count field populated
+func (r *LocationRepository) GetAll(ctx context.Context, GID uuid.UUID, filter LocationQuery) ([]LocationOutCount, error) {
query := `--sql
SELECT
id,
@@ -101,7 +110,7 @@ func (r *LocationRepository) GetAll(ctx context.Context, groupId uuid.UUID) ([]L
updated_at,
(
SELECT
- COUNT(*)
+ SUM(items.quantity)
FROM
items
WHERE
@@ -111,26 +120,38 @@ func (r *LocationRepository) GetAll(ctx context.Context, groupId uuid.UUID) ([]L
FROM
locations
WHERE
- locations.group_locations = ?
- AND locations.location_children IS NULL
+ locations.group_locations = ? {{ FILTER_CHILDREN }}
ORDER BY
locations.name ASC
`
- rows, err := r.db.Sql().QueryContext(ctx, query, groupId)
+ if filter.FilterChildren {
+ query = strings.Replace(query, "{{ FILTER_CHILDREN }}", "AND locations.location_children IS NULL", 1)
+ } else {
+ query = strings.Replace(query, "{{ FILTER_CHILDREN }}", "", 1)
+ }
+
+ rows, err := r.db.Sql().QueryContext(ctx, query, GID)
if err != nil {
return nil, err
}
+ defer func() { _ = rows.Close() }()
list := []LocationOutCount{}
for rows.Next() {
var ct LocationOutCount
- err := rows.Scan(&ct.ID, &ct.Name, &ct.Description, &ct.CreatedAt, &ct.UpdatedAt, &ct.ItemCount)
+ var maybeCount *int
+
+ err := rows.Scan(&ct.ID, &ct.Name, &ct.Description, &ct.CreatedAt, &ct.UpdatedAt, &maybeCount)
if err != nil {
return nil, err
}
+ if maybeCount != nil {
+ ct.ItemCount = *maybeCount
+ }
+
list = append(list, ct)
}
@@ -141,9 +162,6 @@ func (r *LocationRepository) getOne(ctx context.Context, where ...predicate.Loca
return mapLocationOutErr(r.db.Location.Query().
Where(where...).
WithGroup().
- WithItems(func(iq *ent.ItemQuery) {
- iq.Where(item.Archived(false)).WithLabel()
- }).
WithParent().
WithChildren().
Only(ctx))
@@ -158,17 +176,22 @@ func (r *LocationRepository) GetOneByGroup(ctx context.Context, GID, ID uuid.UUI
}
func (r *LocationRepository) Create(ctx context.Context, GID uuid.UUID, data LocationCreate) (LocationOut, error) {
- location, err := r.db.Location.Create().
+ q := r.db.Location.Create().
SetName(data.Name).
SetDescription(data.Description).
- SetGroupID(GID).
- Save(ctx)
+ SetGroupID(GID)
+ if data.ParentID != uuid.Nil {
+ q.SetParentID(data.ParentID)
+ }
+
+ location, err := q.Save(ctx)
if err != nil {
return LocationOut{}, err
}
location.Edges.Group = &ent.Group{ID: GID} // bootstrap group ID
+ r.publishMutationEvent(GID)
return mapLocationOut(location), nil
}
@@ -192,19 +215,243 @@ func (r *LocationRepository) update(ctx context.Context, data LocationUpdate, wh
return r.Get(ctx, data.ID)
}
-func (r *LocationRepository) Update(ctx context.Context, data LocationUpdate) (LocationOut, error) {
- return r.update(ctx, data, location.ID(data.ID))
+func (r *LocationRepository) UpdateByGroup(ctx context.Context, GID, ID uuid.UUID, data LocationUpdate) (LocationOut, error) {
+ v, err := r.update(ctx, data, location.ID(ID), location.HasGroupWith(group.ID(GID)))
+ if err != nil {
+ return LocationOut{}, err
+ }
+
+ r.publishMutationEvent(GID)
+ return v, err
}
-func (r *LocationRepository) UpdateOneByGroup(ctx context.Context, GID, ID uuid.UUID, data LocationUpdate) (LocationOut, error) {
- return r.update(ctx, data, location.ID(ID), location.HasGroupWith(group.ID(GID)))
-}
-
-func (r *LocationRepository) Delete(ctx context.Context, ID uuid.UUID) error {
+// delete should only be used after checking that the location is owned by the
+// group. Otherwise, use DeleteByGroup
+func (r *LocationRepository) delete(ctx context.Context, ID uuid.UUID) error {
return r.db.Location.DeleteOneID(ID).Exec(ctx)
}
func (r *LocationRepository) DeleteByGroup(ctx context.Context, GID, ID uuid.UUID) error {
_, err := r.db.Location.Delete().Where(location.ID(ID), location.HasGroupWith(group.ID(GID))).Exec(ctx)
+ if err != nil {
+ return err
+ }
+ r.publishMutationEvent(GID)
+
return err
}
+
+type TreeItem struct {
+ ID uuid.UUID `json:"id"`
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Children []*TreeItem `json:"children"`
+}
+
+type FlatTreeItem struct {
+ ID uuid.UUID
+ Name string
+ Type string
+ ParentID uuid.UUID
+ Level int
+}
+
+type TreeQuery struct {
+ WithItems bool `json:"withItems" schema:"withItems"`
+}
+
+type ItemType string
+
+const (
+ ItemTypeLocation ItemType = "location"
+ ItemTypeItem ItemType = "item"
+)
+
+type ItemPath struct {
+ Type ItemType `json:"type"`
+ ID uuid.UUID `json:"id"`
+ Name string `json:"name"`
+}
+
+func (r *LocationRepository) PathForLoc(ctx context.Context, GID, locID uuid.UUID) ([]ItemPath, error) {
+ query := `WITH RECURSIVE location_path AS (
+ SELECT id, name, location_children
+ FROM locations
+ WHERE id = ? -- Replace ? with the ID of the item's location
+ AND group_locations = ? -- Replace ? with the ID of the group
+
+ UNION ALL
+
+ SELECT loc.id, loc.name, loc.location_children
+ FROM locations loc
+ JOIN location_path lp ON loc.id = lp.location_children
+ )
+
+ SELECT id, name
+ FROM location_path`
+
+ rows, err := r.db.Sql().QueryContext(ctx, query, locID, GID)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = rows.Close() }()
+
+ var locations []ItemPath
+
+ for rows.Next() {
+ var location ItemPath
+ location.Type = ItemTypeLocation
+ if err := rows.Scan(&location.ID, &location.Name); err != nil {
+ return nil, err
+ }
+ locations = append(locations, location)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+
+ // Reverse the order of the locations so that the root is last
+ for i := len(locations)/2 - 1; i >= 0; i-- {
+ opp := len(locations) - 1 - i
+ locations[i], locations[opp] = locations[opp], locations[i]
+ }
+
+ return locations, nil
+}
+
+func (r *LocationRepository) Tree(ctx context.Context, GID uuid.UUID, tq TreeQuery) ([]TreeItem, error) {
+ query := `
+ WITH recursive location_tree(id, NAME, parent_id, level, node_type) AS
+ (
+ SELECT id,
+ NAME,
+ location_children AS parent_id,
+ 0 AS level,
+ 'location' AS node_type
+ FROM locations
+ WHERE location_children IS NULL
+ AND group_locations = ?
+
+ UNION ALL
+ SELECT c.id,
+ c.NAME,
+ c.location_children AS parent_id,
+ level + 1,
+ 'location' AS node_type
+ FROM locations c
+ JOIN location_tree p
+ ON c.location_children = p.id
+ WHERE level < 10 -- prevent infinite loop & excessive recursion
+ ){{ WITH_ITEMS }}
+
+ SELECT id,
+ NAME,
+ level,
+ parent_id,
+ node_type
+ FROM (
+ SELECT *
+ FROM location_tree
+
+
+ {{ WITH_ITEMS_FROM }}
+
+
+ ) tree
+ ORDER BY node_type DESC, -- sort locations before items
+ level,
+ lower(NAME)`
+
+ if tq.WithItems {
+ itemQuery := `, item_tree(id, NAME, parent_id, level, node_type) AS
+ (
+ SELECT id,
+ NAME,
+ location_items as parent_id,
+ 0 AS level,
+ 'item' AS node_type
+ FROM items
+ WHERE item_children IS NULL
+ AND location_items IN (SELECT id FROM location_tree)
+
+ UNION ALL
+
+ SELECT c.id,
+ c.NAME,
+ c.item_children AS parent_id,
+ level + 1,
+ 'item' AS node_type
+ FROM items c
+ JOIN item_tree p
+ ON c.item_children = p.id
+ WHERE c.item_children IS NOT NULL
+ AND level < 10 -- prevent infinite loop & excessive recursion
+ )`
+
+ // Conditional table joined to main query
+ itemsFrom := `
+ UNION ALL
+ SELECT *
+ FROM item_tree`
+
+ query = strings.ReplaceAll(query, "{{ WITH_ITEMS }}", itemQuery)
+ query = strings.ReplaceAll(query, "{{ WITH_ITEMS_FROM }}", itemsFrom)
+ } else {
+ query = strings.ReplaceAll(query, "{{ WITH_ITEMS }}", "")
+ query = strings.ReplaceAll(query, "{{ WITH_ITEMS_FROM }}", "")
+ }
+
+ rows, err := r.db.Sql().QueryContext(ctx, query, GID)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = rows.Close() }()
+
+ var locations []FlatTreeItem
+ for rows.Next() {
+ var location FlatTreeItem
+ if err := rows.Scan(&location.ID, &location.Name, &location.Level, &location.ParentID, &location.Type); err != nil {
+ return nil, err
+ }
+ locations = append(locations, location)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+
+ return ConvertLocationsToTree(locations), nil
+}
+
+func ConvertLocationsToTree(locations []FlatTreeItem) []TreeItem {
+ locationMap := make(map[uuid.UUID]*TreeItem, len(locations))
+
+ var rootIds []uuid.UUID
+
+ for _, location := range locations {
+ loc := &TreeItem{
+ ID: location.ID,
+ Name: location.Name,
+ Type: location.Type,
+ Children: []*TreeItem{},
+ }
+
+ locationMap[location.ID] = loc
+ if location.ParentID != uuid.Nil {
+ parent, ok := locationMap[location.ParentID]
+ if ok {
+ parent.Children = append(parent.Children, loc)
+ }
+ } else {
+ rootIds = append(rootIds, location.ID)
+ }
+ }
+
+ roots := make([]TreeItem, 0, len(rootIds))
+ for _, id := range rootIds {
+ roots = append(roots, *locationMap[id])
+ }
+
+ return roots
+}
diff --git a/backend/internal/data/repo/repo_locations_test.go b/backend/internal/data/repo/repo_locations_test.go
index d48779d..e8b353c 100644
--- a/backend/internal/data/repo/repo_locations_test.go
+++ b/backend/internal/data/repo/repo_locations_test.go
@@ -2,9 +2,13 @@ package repo
import (
"context"
+ "encoding/json"
"testing"
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func locationFactory() LocationCreate {
@@ -14,62 +18,78 @@ func locationFactory() LocationCreate {
}
}
+func useLocations(t *testing.T, len int) []LocationOut {
+ t.Helper()
+
+ out := make([]LocationOut, len)
+
+ for i := 0; i < len; i++ {
+ loc, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory())
+ require.NoError(t, err)
+ out[i] = loc
+ }
+
+ t.Cleanup(func() {
+ for _, loc := range out {
+ err := tRepos.Locations.delete(context.Background(), loc.ID)
+ if err != nil {
+ assert.True(t, ent.IsNotFound(err))
+ }
+ }
+ })
+
+ return out
+}
+
func TestLocationRepository_Get(t *testing.T) {
loc, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory())
- assert.NoError(t, err)
+ require.NoError(t, err)
// Get by ID
foundLoc, err := tRepos.Locations.Get(context.Background(), loc.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, loc.ID, foundLoc.ID)
- err = tRepos.Locations.Delete(context.Background(), loc.ID)
- assert.NoError(t, err)
+ err = tRepos.Locations.delete(context.Background(), loc.ID)
+ require.NoError(t, err)
}
func TestLocationRepositoryGetAllWithCount(t *testing.T) {
ctx := context.Background()
- result, err := tRepos.Locations.Create(ctx, tGroup.ID, LocationCreate{
- Name: fk.Str(10),
- Description: fk.Str(100),
- })
- assert.NoError(t, err)
+ result := useLocations(t, 1)[0]
- _, err = tRepos.Items.Create(ctx, tGroup.ID, ItemCreate{
+ _, err := tRepos.Items.Create(ctx, tGroup.ID, ItemCreate{
Name: fk.Str(10),
Description: fk.Str(100),
LocationID: result.ID,
})
- assert.NoError(t, err)
+ require.NoError(t, err)
- results, err := tRepos.Locations.GetAll(context.Background(), tGroup.ID)
- assert.NoError(t, err)
+ results, err := tRepos.Locations.GetAll(context.Background(), tGroup.ID, LocationQuery{})
+ require.NoError(t, err)
for _, loc := range results {
if loc.ID == result.ID {
assert.Equal(t, 1, loc.ItemCount)
}
}
-
}
func TestLocationRepository_Create(t *testing.T) {
- loc, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory())
- assert.NoError(t, err)
+ loc := useLocations(t, 1)[0]
// Get by ID
foundLoc, err := tRepos.Locations.Get(context.Background(), loc.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, loc.ID, foundLoc.ID)
- err = tRepos.Locations.Delete(context.Background(), loc.ID)
- assert.NoError(t, err)
+ err = tRepos.Locations.delete(context.Background(), loc.ID)
+ require.NoError(t, err)
}
func TestLocationRepository_Update(t *testing.T) {
- loc, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory())
- assert.NoError(t, err)
+ loc := useLocations(t, 1)[0]
updateData := LocationUpdate{
ID: loc.ID,
@@ -77,27 +97,197 @@ func TestLocationRepository_Update(t *testing.T) {
Description: fk.Str(100),
}
- update, err := tRepos.Locations.Update(context.Background(), updateData)
- assert.NoError(t, err)
+ update, err := tRepos.Locations.UpdateByGroup(context.Background(), tGroup.ID, updateData.ID, updateData)
+ require.NoError(t, err)
foundLoc, err := tRepos.Locations.Get(context.Background(), loc.ID)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, update.ID, foundLoc.ID)
assert.Equal(t, update.Name, foundLoc.Name)
assert.Equal(t, update.Description, foundLoc.Description)
- err = tRepos.Locations.Delete(context.Background(), loc.ID)
- assert.NoError(t, err)
+ err = tRepos.Locations.delete(context.Background(), loc.ID)
+ require.NoError(t, err)
}
func TestLocationRepository_Delete(t *testing.T) {
- loc, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory())
- assert.NoError(t, err)
+ loc := useLocations(t, 1)[0]
- err = tRepos.Locations.Delete(context.Background(), loc.ID)
- assert.NoError(t, err)
+ err := tRepos.Locations.delete(context.Background(), loc.ID)
+ require.NoError(t, err)
_, err = tRepos.Locations.Get(context.Background(), loc.ID)
- assert.Error(t, err)
+ require.Error(t, err)
+}
+
+func TestItemRepository_TreeQuery(t *testing.T) {
+ locs := useLocations(t, 3)
+
+ // Set relations
+ _, err := tRepos.Locations.UpdateByGroup(context.Background(), tGroup.ID, locs[0].ID, LocationUpdate{
+ ID: locs[0].ID,
+ ParentID: locs[1].ID,
+ Name: locs[0].Name,
+ Description: locs[0].Description,
+ })
+ require.NoError(t, err)
+
+ locations, err := tRepos.Locations.Tree(context.Background(), tGroup.ID, TreeQuery{WithItems: true})
+
+ require.NoError(t, err)
+
+ assert.Len(t, locations, 2)
+
+ // Check roots
+ for _, loc := range locations {
+ if loc.ID == locs[1].ID {
+ assert.Len(t, loc.Children, 1)
+ }
+ }
+}
+
+func TestLocationRepository_PathForLoc(t *testing.T) {
+ locs := useLocations(t, 3)
+
+ // Set relations 3 -> 2 -> 1
+ for i := 0; i < 2; i++ {
+ _, err := tRepos.Locations.UpdateByGroup(context.Background(), tGroup.ID, locs[i].ID, LocationUpdate{
+ ID: locs[i].ID,
+ ParentID: locs[i+1].ID,
+ Name: locs[i].Name,
+ Description: locs[i].Description,
+ })
+ require.NoError(t, err)
+ }
+
+ last := locs[0]
+
+ path, err := tRepos.Locations.PathForLoc(context.Background(), tGroup.ID, last.ID)
+
+ require.NoError(t, err)
+ assert.Len(t, path, 3)
+
+ // Check path and order
+ for i, loc := range path {
+ assert.Equal(t, locs[2-i].ID, loc.ID)
+ assert.Equal(t, locs[2-i].Name, loc.Name)
+ }
+}
+
+func TestConvertLocationsToTree(t *testing.T) {
+ uuid1, uuid2, uuid3, uuid4 := uuid.New(), uuid.New(), uuid.New(), uuid.New()
+
+ testCases := []struct {
+ name string
+ locations []FlatTreeItem
+ expected []TreeItem
+ }{
+ {
+ name: "Convert locations to tree",
+ locations: []FlatTreeItem{
+ {
+ ID: uuid1,
+ Name: "Root1",
+ ParentID: uuid.Nil,
+ Level: 0,
+ },
+ {
+ ID: uuid2,
+ Name: "Child1",
+ ParentID: uuid1,
+ Level: 1,
+ },
+ {
+ ID: uuid3,
+ Name: "Child2",
+ ParentID: uuid1,
+ Level: 1,
+ },
+ },
+ expected: []TreeItem{
+ {
+ ID: uuid1,
+ Name: "Root1",
+ Children: []*TreeItem{
+ {
+ ID: uuid2,
+ Name: "Child1",
+ Children: []*TreeItem{},
+ },
+ {
+ ID: uuid3,
+ Name: "Child2",
+ Children: []*TreeItem{},
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Convert locations to tree with deeply nested children",
+ locations: []FlatTreeItem{
+ {
+ ID: uuid1,
+ Name: "Root1",
+ ParentID: uuid.Nil,
+ Level: 0,
+ },
+ {
+ ID: uuid2,
+ Name: "Child1",
+ ParentID: uuid1,
+ Level: 1,
+ },
+ {
+ ID: uuid3,
+ Name: "Child2",
+ ParentID: uuid2,
+ Level: 2,
+ },
+ {
+ ID: uuid4,
+ Name: "Child3",
+ ParentID: uuid3,
+ Level: 3,
+ },
+ },
+ expected: []TreeItem{
+ {
+ ID: uuid1,
+ Name: "Root1",
+ Children: []*TreeItem{
+ {
+ ID: uuid2,
+ Name: "Child1",
+ Children: []*TreeItem{
+ {
+ ID: uuid3,
+ Name: "Child2",
+ Children: []*TreeItem{
+ {
+ ID: uuid4,
+ Name: "Child3",
+ Children: []*TreeItem{},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ result := ConvertLocationsToTree(tc.locations)
+
+ // Compare JSON strings
+ expected, _ := json.Marshal(tc.expected)
+ got, _ := json.Marshal(result)
+ assert.Equal(t, string(expected), string(got))
+ })
+ }
}
diff --git a/backend/internal/data/repo/repo_maintenance_entry.go b/backend/internal/data/repo/repo_maintenance_entry.go
new file mode 100644
index 0000000..2714bbd
--- /dev/null
+++ b/backend/internal/data/repo/repo_maintenance_entry.go
@@ -0,0 +1,207 @@
+package repo
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/group"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/item"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
+ "github.com/hay-kot/homebox/backend/internal/data/types"
+)
+
+// MaintenanceEntryRepository is a repository for maintenance entries that are
+// associated with an item in the database. An entry represents a maintenance event
+// that has been performed on an item.
+type MaintenanceEntryRepository struct {
+ db *ent.Client
+}
+
+type MaintenanceEntryCreate struct {
+ CompletedDate types.Date `json:"completedDate"`
+ ScheduledDate types.Date `json:"scheduledDate"`
+ Name string `json:"name" validate:"required"`
+ Description string `json:"description"`
+ Cost float64 `json:"cost,string"`
+}
+
+func (mc MaintenanceEntryCreate) Validate() error {
+ if mc.CompletedDate.Time().IsZero() && mc.ScheduledDate.Time().IsZero() {
+ return errors.New("either completedDate or scheduledDate must be set")
+ }
+ return nil
+}
+
+type MaintenanceEntryUpdate struct {
+ CompletedDate types.Date `json:"completedDate"`
+ ScheduledDate types.Date `json:"scheduledDate"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Cost float64 `json:"cost,string"`
+}
+
+func (mu MaintenanceEntryUpdate) Validate() error {
+ if mu.CompletedDate.Time().IsZero() && mu.ScheduledDate.Time().IsZero() {
+ return errors.New("either completedDate or scheduledDate must be set")
+ }
+ return nil
+}
+
+type (
+ MaintenanceEntry struct {
+ ID uuid.UUID `json:"id"`
+ CompletedDate types.Date `json:"completedDate"`
+ ScheduledDate types.Date `json:"scheduledDate"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Cost float64 `json:"cost,string"`
+ }
+
+ MaintenanceLog struct {
+ ItemID uuid.UUID `json:"itemId"`
+ CostAverage float64 `json:"costAverage"`
+ CostTotal float64 `json:"costTotal"`
+ Entries []MaintenanceEntry `json:"entries"`
+ }
+)
+
+var (
+ mapMaintenanceEntryErr = mapTErrFunc(mapMaintenanceEntry)
+ mapEachMaintenanceEntry = mapTEachFunc(mapMaintenanceEntry)
+)
+
+func mapMaintenanceEntry(entry *ent.MaintenanceEntry) MaintenanceEntry {
+ return MaintenanceEntry{
+ ID: entry.ID,
+ CompletedDate: types.Date(entry.Date),
+ ScheduledDate: types.Date(entry.ScheduledDate),
+ Name: entry.Name,
+ Description: entry.Description,
+ Cost: entry.Cost,
+ }
+}
+
+func (r *MaintenanceEntryRepository) GetScheduled(ctx context.Context, GID uuid.UUID, dt types.Date) ([]MaintenanceEntry, error) {
+ entries, err := r.db.MaintenanceEntry.Query().
+ Where(
+ maintenanceentry.HasItemWith(
+ item.HasGroupWith(group.ID(GID)),
+ ),
+ maintenanceentry.ScheduledDate(dt.Time()),
+ maintenanceentry.Or(
+ maintenanceentry.DateIsNil(),
+ maintenanceentry.DateEQ(time.Time{}),
+ ),
+ ).
+ All(ctx)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return mapEachMaintenanceEntry(entries), nil
+}
+
+func (r *MaintenanceEntryRepository) Create(ctx context.Context, itemID uuid.UUID, input MaintenanceEntryCreate) (MaintenanceEntry, error) {
+ item, err := r.db.MaintenanceEntry.Create().
+ SetItemID(itemID).
+ SetDate(input.CompletedDate.Time()).
+ SetScheduledDate(input.ScheduledDate.Time()).
+ SetName(input.Name).
+ SetDescription(input.Description).
+ SetCost(input.Cost).
+ Save(ctx)
+
+ return mapMaintenanceEntryErr(item, err)
+}
+
+func (r *MaintenanceEntryRepository) Update(ctx context.Context, ID uuid.UUID, input MaintenanceEntryUpdate) (MaintenanceEntry, error) {
+ item, err := r.db.MaintenanceEntry.UpdateOneID(ID).
+ SetDate(input.CompletedDate.Time()).
+ SetScheduledDate(input.ScheduledDate.Time()).
+ SetName(input.Name).
+ SetDescription(input.Description).
+ SetCost(input.Cost).
+ Save(ctx)
+
+ return mapMaintenanceEntryErr(item, err)
+}
+
+type MaintenanceLogQuery struct {
+ Completed bool `json:"completed" schema:"completed"`
+ Scheduled bool `json:"scheduled" schema:"scheduled"`
+}
+
+func (r *MaintenanceEntryRepository) GetLog(ctx context.Context, groupID, itemID uuid.UUID, query MaintenanceLogQuery) (MaintenanceLog, error) {
+ log := MaintenanceLog{
+ ItemID: itemID,
+ }
+
+ q := r.db.MaintenanceEntry.Query().Where(
+ maintenanceentry.ItemID(itemID),
+ maintenanceentry.HasItemWith(
+ item.HasGroupWith(group.IDEQ(groupID)),
+ ),
+ )
+
+ if query.Completed {
+ q = q.Where(maintenanceentry.And(
+ maintenanceentry.DateNotNil(),
+ maintenanceentry.DateNEQ(time.Time{}),
+ ))
+ } else if query.Scheduled {
+ q = q.Where(maintenanceentry.And(
+ maintenanceentry.Or(
+ maintenanceentry.DateIsNil(),
+ maintenanceentry.DateEQ(time.Time{}),
+ ),
+ maintenanceentry.ScheduledDateNotNil(),
+ maintenanceentry.ScheduledDateNEQ(time.Time{}),
+ ))
+ }
+
+ entries, err := q.Order(ent.Desc(maintenanceentry.FieldDate)).
+ All(ctx)
+ if err != nil {
+ return MaintenanceLog{}, err
+ }
+
+ log.Entries = mapEachMaintenanceEntry(entries)
+
+ var maybeTotal *float64
+ var maybeAverage *float64
+
+ statement := `
+SELECT
+ SUM(cost_total) AS total_of_totals,
+ AVG(cost_total) AS avg_of_averages
+FROM
+ (
+ SELECT
+ strftime('%m-%Y', date) AS my,
+ SUM(cost) AS cost_total
+ FROM
+ maintenance_entries
+ WHERE
+ item_id = ?
+ GROUP BY
+ my
+ )`
+
+ row := r.db.Sql().QueryRowContext(ctx, statement, itemID)
+ err = row.Scan(&maybeTotal, &maybeAverage)
+ if err != nil {
+ return MaintenanceLog{}, err
+ }
+
+ log.CostAverage = orDefault(maybeAverage, 0)
+ log.CostTotal = orDefault(maybeTotal, 0)
+ return log, nil
+}
+
+func (r *MaintenanceEntryRepository) Delete(ctx context.Context, ID uuid.UUID) error {
+ return r.db.MaintenanceEntry.DeleteOneID(ID).Exec(ctx)
+}
diff --git a/backend/internal/data/repo/repo_maintenance_entry_test.go b/backend/internal/data/repo/repo_maintenance_entry_test.go
new file mode 100644
index 0000000..0fa288c
--- /dev/null
+++ b/backend/internal/data/repo/repo_maintenance_entry_test.go
@@ -0,0 +1,87 @@
+package repo
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/hay-kot/homebox/backend/internal/data/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// get the previous month from the current month, accounts for errors when run
+// near the beginning or end of the month/year
+func getPrevMonth(now time.Time) time.Time {
+ t := now.AddDate(0, -1, 0)
+
+ // avoid infinite loop
+ max := 15
+ for t.Month() == now.Month() {
+ t = t.AddDate(0, 0, -1)
+
+ max--
+ if max == 0 {
+ panic("max exceeded")
+ }
+ }
+
+ return t
+}
+
+func TestMaintenanceEntryRepository_GetLog(t *testing.T) {
+ item := useItems(t, 1)[0]
+
+ // Create 10 maintenance entries for the item
+ created := make([]MaintenanceEntryCreate, 10)
+
+ thisMonth := time.Now()
+ lastMonth := getPrevMonth(thisMonth)
+
+ for i := 0; i < 10; i++ {
+ dt := lastMonth
+ if i%2 == 0 {
+ dt = thisMonth
+ }
+
+ created[i] = MaintenanceEntryCreate{
+ CompletedDate: types.DateFromTime(dt),
+ Name: "Maintenance",
+ Description: "Maintenance description",
+ Cost: 10,
+ }
+ }
+
+ for _, entry := range created {
+ _, err := tRepos.MaintEntry.Create(context.Background(), item.ID, entry)
+ if err != nil {
+ t.Fatalf("failed to create maintenance entry: %v", err)
+ }
+ }
+
+ // Get the log for the item
+ log, err := tRepos.MaintEntry.GetLog(context.Background(), tGroup.ID, item.ID, MaintenanceLogQuery{
+ Completed: true,
+ })
+ if err != nil {
+ t.Fatalf("failed to get maintenance log: %v", err)
+ }
+
+ assert.Equal(t, item.ID, log.ItemID)
+ assert.Len(t, log.Entries, 10)
+
+ // Calculate the average cost
+ var total float64
+
+ for _, entry := range log.Entries {
+ total += entry.Cost
+ }
+
+ assert.InDelta(t, total, log.CostTotal, .001, "total cost should be equal to the sum of all entries")
+ assert.InDelta(t, total/2, log.CostAverage, 001, "average cost should be the average of the two months")
+
+ for _, entry := range log.Entries {
+ err := tRepos.MaintEntry.Delete(context.Background(), entry.ID)
+ require.NoError(t, err)
+ }
+}
diff --git a/backend/internal/data/repo/repo_notifier.go b/backend/internal/data/repo/repo_notifier.go
new file mode 100644
index 0000000..f31be4b
--- /dev/null
+++ b/backend/internal/data/repo/repo_notifier.go
@@ -0,0 +1,120 @@
+package repo
+
+import (
+ "context"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/hay-kot/homebox/backend/internal/data/ent"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
+)
+
+type NotifierRepository struct {
+ db *ent.Client
+ mapper MapFunc[*ent.Notifier, NotifierOut]
+}
+
+func NewNotifierRepository(db *ent.Client) *NotifierRepository {
+ return &NotifierRepository{
+ db: db,
+ mapper: func(n *ent.Notifier) NotifierOut {
+ return NotifierOut{
+ ID: n.ID,
+ UserID: n.UserID,
+ GroupID: n.GroupID,
+ CreatedAt: n.CreatedAt,
+ UpdatedAt: n.UpdatedAt,
+
+ Name: n.Name,
+ IsActive: n.IsActive,
+ URL: n.URL,
+ }
+ },
+ }
+}
+
+type (
+ NotifierCreate struct {
+ Name string `json:"name" validate:"required,min=1,max=255"`
+ IsActive bool `json:"isActive"`
+ URL string `json:"url" validate:"required,shoutrrr"`
+ }
+
+ NotifierUpdate struct {
+ Name string `json:"name" validate:"required,min=1,max=255"`
+ IsActive bool `json:"isActive"`
+ URL *string `json:"url" validate:"omitempty,shoutrrr" extensions:"x-nullable"`
+ }
+
+ NotifierOut struct {
+ ID uuid.UUID `json:"id"`
+ UserID uuid.UUID `json:"userId"`
+ GroupID uuid.UUID `json:"groupId"`
+ CreatedAt time.Time `json:"createdAt"`
+ UpdatedAt time.Time `json:"updatedAt"`
+
+ Name string `json:"name"`
+ IsActive bool `json:"isActive"`
+ URL string `json:"-"` // URL field is not exposed to the client
+ }
+)
+
+func (r *NotifierRepository) GetByUser(ctx context.Context, userID uuid.UUID) ([]NotifierOut, error) {
+ notifier, err := r.db.Notifier.Query().
+ Where(notifier.UserID(userID)).
+ Order(ent.Asc(notifier.FieldName)).
+ All(ctx)
+
+ return r.mapper.MapEachErr(notifier, err)
+}
+
+func (r *NotifierRepository) GetByGroup(ctx context.Context, groupID uuid.UUID) ([]NotifierOut, error) {
+ notifier, err := r.db.Notifier.Query().
+ Where(notifier.GroupID(groupID)).
+ Order(ent.Asc(notifier.FieldName)).
+ All(ctx)
+
+ return r.mapper.MapEachErr(notifier, err)
+}
+
+func (r *NotifierRepository) GetActiveByGroup(ctx context.Context, groupID uuid.UUID) ([]NotifierOut, error) {
+ notifier, err := r.db.Notifier.Query().
+ Where(notifier.GroupID(groupID), notifier.IsActive(true)).
+ Order(ent.Asc(notifier.FieldName)).
+ All(ctx)
+
+ return r.mapper.MapEachErr(notifier, err)
+}
+
+func (r *NotifierRepository) Create(ctx context.Context, groupID, userID uuid.UUID, input NotifierCreate) (NotifierOut, error) {
+ notifier, err := r.db.Notifier.
+ Create().
+ SetGroupID(groupID).
+ SetUserID(userID).
+ SetName(input.Name).
+ SetIsActive(input.IsActive).
+ SetURL(input.URL).
+ Save(ctx)
+
+ return r.mapper.MapErr(notifier, err)
+}
+
+func (r *NotifierRepository) Update(ctx context.Context, userID uuid.UUID, id uuid.UUID, input NotifierUpdate) (NotifierOut, error) {
+ q := r.db.Notifier.
+ UpdateOneID(id).
+ SetName(input.Name).
+ SetIsActive(input.IsActive)
+
+ if input.URL != nil {
+ q.SetURL(*input.URL)
+ }
+
+ notifier, err := q.Save(ctx)
+
+ return r.mapper.MapErr(notifier, err)
+}
+
+func (r *NotifierRepository) Delete(ctx context.Context, userID uuid.UUID, ID uuid.UUID) error {
+ _, err := r.db.Notifier.Delete().Where(notifier.UserID(userID), notifier.ID(ID)).Exec(ctx)
+ return err
+}
diff --git a/backend/internal/data/repo/repo_tokens.go b/backend/internal/data/repo/repo_tokens.go
index 7d9115b..42843e0 100644
--- a/backend/internal/data/repo/repo_tokens.go
+++ b/backend/internal/data/repo/repo_tokens.go
@@ -6,7 +6,10 @@ import (
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent"
+ "github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
+ "github.com/hay-kot/homebox/backend/pkgs/hasher"
+ "github.com/hay-kot/homebox/backend/pkgs/set"
)
type TokenRepository struct {
@@ -39,7 +42,6 @@ func (r *TokenRepository) GetUserFromToken(ctx context.Context, token []byte) (U
QueryUser().
WithGroup().
Only(ctx)
-
if err != nil {
return UserOut{}, err
}
@@ -47,19 +49,49 @@ func (r *TokenRepository) GetUserFromToken(ctx context.Context, token []byte) (U
return mapUserOut(user), nil
}
-// Creates a token for a user
-func (r *TokenRepository) CreateToken(ctx context.Context, createToken UserAuthTokenCreate) (UserAuthToken, error) {
+func (r *TokenRepository) GetRoles(ctx context.Context, token string) (*set.Set[string], error) {
+ tokenHash := hasher.HashToken(token)
+ roles, err := r.db.AuthRoles.
+ Query().
+ Where(authroles.HasTokenWith(
+ authtokens.Token(tokenHash),
+ )).
+ All(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ roleSet := set.Make[string](len(roles))
+
+ for _, role := range roles {
+ roleSet.Insert(role.Role.String())
+ }
+
+ return &roleSet, nil
+}
+
+// CreateToken Creates a token for a user
+func (r *TokenRepository) CreateToken(ctx context.Context, createToken UserAuthTokenCreate, roles ...authroles.Role) (UserAuthToken, error) {
dbToken, err := r.db.AuthTokens.Create().
SetToken(createToken.TokenHash).
SetUserID(createToken.UserID).
SetExpiresAt(createToken.ExpiresAt).
Save(ctx)
-
if err != nil {
return UserAuthToken{}, err
}
+ for _, role := range roles {
+ _, err := r.db.AuthRoles.Create().
+ SetRole(role).
+ SetToken(dbToken).
+ Save(ctx)
+ if err != nil {
+ return UserAuthToken{}, err
+ }
+ }
+
return UserAuthToken{
UserAuthTokenCreate: UserAuthTokenCreate{
TokenHash: dbToken.Token,
@@ -79,7 +111,6 @@ func (r *TokenRepository) DeleteToken(ctx context.Context, token []byte) error {
// PurgeExpiredTokens removes all expired tokens from the database
func (r *TokenRepository) PurgeExpiredTokens(ctx context.Context) (int, error) {
tokensDeleted, err := r.db.AuthTokens.Delete().Where(authtokens.ExpiresAtLTE(time.Now())).Exec(ctx)
-
if err != nil {
return 0, err
}
diff --git a/backend/internal/data/repo/repo_tokens_test.go b/backend/internal/data/repo/repo_tokens_test.go
index e066911..a0b4375 100644
--- a/backend/internal/data/repo/repo_tokens_test.go
+++ b/backend/internal/data/repo/repo_tokens_test.go
@@ -7,15 +7,15 @@ import (
"github.com/hay-kot/homebox/backend/pkgs/hasher"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestAuthTokenRepo_CreateToken(t *testing.T) {
- asrt := assert.New(t)
ctx := context.Background()
user := userFactory()
userOut, err := tRepos.Users.Create(ctx, user)
- asrt.NoError(err)
+ require.NoError(t, err)
expiresAt := time.Now().Add(time.Hour)
@@ -27,23 +27,22 @@ func TestAuthTokenRepo_CreateToken(t *testing.T) {
UserID: userOut.ID,
})
- asrt.NoError(err)
- asrt.Equal(userOut.ID, token.UserID)
- asrt.Equal(expiresAt, token.ExpiresAt)
+ require.NoError(t, err)
+ assert.Equal(t, userOut.ID, token.UserID)
+ assert.Equal(t, expiresAt, token.ExpiresAt)
// Cleanup
- asrt.NoError(tRepos.Users.Delete(ctx, userOut.ID))
+ require.NoError(t, tRepos.Users.Delete(ctx, userOut.ID))
_, err = tRepos.AuthTokens.DeleteAll(ctx)
- asrt.NoError(err)
+ require.NoError(t, err)
}
func TestAuthTokenRepo_DeleteToken(t *testing.T) {
- asrt := assert.New(t)
ctx := context.Background()
user := userFactory()
userOut, err := tRepos.Users.Create(ctx, user)
- asrt.NoError(err)
+ require.NoError(t, err)
expiresAt := time.Now().Add(time.Hour)
@@ -54,15 +53,14 @@ func TestAuthTokenRepo_DeleteToken(t *testing.T) {
ExpiresAt: expiresAt,
UserID: userOut.ID,
})
- asrt.NoError(err)
+ require.NoError(t, err)
// Delete token
err = tRepos.AuthTokens.DeleteToken(ctx, []byte(generatedToken.Raw))
- asrt.NoError(err)
+ require.NoError(t, err)
}
func TestAuthTokenRepo_GetUserByToken(t *testing.T) {
- assert := assert.New(t)
ctx := context.Background()
user := userFactory()
@@ -77,24 +75,23 @@ func TestAuthTokenRepo_GetUserByToken(t *testing.T) {
UserID: userOut.ID,
})
- assert.NoError(err)
+ require.NoError(t, err)
// Get User from token
foundUser, err := tRepos.AuthTokens.GetUserFromToken(ctx, token.TokenHash)
- assert.NoError(err)
- assert.Equal(userOut.ID, foundUser.ID)
- assert.Equal(userOut.Name, foundUser.Name)
- assert.Equal(userOut.Email, foundUser.Email)
+ require.NoError(t, err)
+ assert.Equal(t, userOut.ID, foundUser.ID)
+ assert.Equal(t, userOut.Name, foundUser.Name)
+ assert.Equal(t, userOut.Email, foundUser.Email)
// Cleanup
- assert.NoError(tRepos.Users.Delete(ctx, userOut.ID))
+ require.NoError(t, tRepos.Users.Delete(ctx, userOut.ID))
_, err = tRepos.AuthTokens.DeleteAll(ctx)
- assert.NoError(err)
+ require.NoError(t, err)
}
func TestAuthTokenRepo_PurgeExpiredTokens(t *testing.T) {
- assert := assert.New(t)
ctx := context.Background()
user := userFactory()
@@ -112,27 +109,26 @@ func TestAuthTokenRepo_PurgeExpiredTokens(t *testing.T) {
UserID: userOut.ID,
})
- assert.NoError(err)
- assert.NotNil(createdToken)
+ require.NoError(t, err)
+ assert.NotNil(t, createdToken)
createdTokens = append(createdTokens, createdToken)
-
}
// Purge expired tokens
tokensDeleted, err := tRepos.AuthTokens.PurgeExpiredTokens(ctx)
- assert.NoError(err)
- assert.Equal(5, tokensDeleted)
+ require.NoError(t, err)
+ assert.Equal(t, 5, tokensDeleted)
// Check if tokens are deleted
for _, token := range createdTokens {
_, err := tRepos.AuthTokens.GetUserFromToken(ctx, token.TokenHash)
- assert.Error(err)
+ require.Error(t, err)
}
// Cleanup
- assert.NoError(tRepos.Users.Delete(ctx, userOut.ID))
+ require.NoError(t, tRepos.Users.Delete(ctx, userOut.ID))
_, err = tRepos.AuthTokens.DeleteAll(ctx)
- assert.NoError(err)
+ require.NoError(t, err)
}
diff --git a/backend/internal/data/repo/repo_users.go b/backend/internal/data/repo/repo_users.go
index 0eaa127..68b1eb5 100644
--- a/backend/internal/data/repo/repo_users.go
+++ b/backend/internal/data/repo/repo_users.go
@@ -60,32 +60,32 @@ func mapUserOut(user *ent.User) UserOut {
}
}
-func (e *UserRepository) GetOneId(ctx context.Context, id uuid.UUID) (UserOut, error) {
- return mapUserOutErr(e.db.User.Query().
- Where(user.ID(id)).
+func (r *UserRepository) GetOneID(ctx context.Context, ID uuid.UUID) (UserOut, error) {
+ return mapUserOutErr(r.db.User.Query().
+ Where(user.ID(ID)).
WithGroup().
Only(ctx))
}
-func (e *UserRepository) GetOneEmail(ctx context.Context, email string) (UserOut, error) {
- return mapUserOutErr(e.db.User.Query().
- Where(user.Email(email)).
+func (r *UserRepository) GetOneEmail(ctx context.Context, email string) (UserOut, error) {
+ return mapUserOutErr(r.db.User.Query().
+ Where(user.EmailEqualFold(email)).
WithGroup().
Only(ctx),
)
}
-func (e *UserRepository) GetAll(ctx context.Context) ([]UserOut, error) {
- return mapUsersOutErr(e.db.User.Query().WithGroup().All(ctx))
+func (r *UserRepository) GetAll(ctx context.Context) ([]UserOut, error) {
+ return mapUsersOutErr(r.db.User.Query().WithGroup().All(ctx))
}
-func (e *UserRepository) Create(ctx context.Context, usr UserCreate) (UserOut, error) {
+func (r *UserRepository) Create(ctx context.Context, usr UserCreate) (UserOut, error) {
role := user.RoleUser
if usr.IsOwner {
role = user.RoleOwner
}
- entUser, err := e.db.User.
+ entUser, err := r.db.User.
Create().
SetName(usr.Name).
SetEmail(usr.Email).
@@ -98,11 +98,11 @@ func (e *UserRepository) Create(ctx context.Context, usr UserCreate) (UserOut, e
return UserOut{}, err
}
- return e.GetOneId(ctx, entUser.ID)
+ return r.GetOneID(ctx, entUser.ID)
}
-func (e *UserRepository) Update(ctx context.Context, ID uuid.UUID, data UserUpdate) error {
- q := e.db.User.Update().
+func (r *UserRepository) Update(ctx context.Context, ID uuid.UUID, data UserUpdate) error {
+ q := r.db.User.Update().
Where(user.ID(ID)).
SetName(data.Name).
SetEmail(data.Email)
@@ -111,19 +111,18 @@ func (e *UserRepository) Update(ctx context.Context, ID uuid.UUID, data UserUpda
return err
}
-func (e *UserRepository) Delete(ctx context.Context, id uuid.UUID) error {
- _, err := e.db.User.Delete().Where(user.ID(id)).Exec(ctx)
+func (r *UserRepository) Delete(ctx context.Context, id uuid.UUID) error {
+ _, err := r.db.User.Delete().Where(user.ID(id)).Exec(ctx)
return err
}
-func (e *UserRepository) DeleteAll(ctx context.Context) error {
- _, err := e.db.User.Delete().Exec(ctx)
+func (r *UserRepository) DeleteAll(ctx context.Context) error {
+ _, err := r.db.User.Delete().Exec(ctx)
return err
}
-func (e *UserRepository) GetSuperusers(ctx context.Context) ([]*ent.User, error) {
- users, err := e.db.User.Query().Where(user.IsSuperuser(true)).All(ctx)
-
+func (r *UserRepository) GetSuperusers(ctx context.Context) ([]*ent.User, error) {
+ users, err := r.db.User.Query().Where(user.IsSuperuser(true)).All(ctx)
if err != nil {
return nil, err
}
diff --git a/backend/internal/data/repo/repo_users_test.go b/backend/internal/data/repo/repo_users_test.go
index 882de1c..ef85f44 100644
--- a/backend/internal/data/repo/repo_users_test.go
+++ b/backend/internal/data/repo/repo_users_test.go
@@ -2,10 +2,10 @@ package repo
import (
"context"
- "fmt"
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func userFactory() UserCreate {
@@ -24,18 +24,18 @@ func TestUserRepo_GetOneEmail(t *testing.T) {
ctx := context.Background()
_, err := tRepos.Users.Create(ctx, user)
- assert.NoError(err)
+ require.NoError(t, err)
foundUser, err := tRepos.Users.GetOneEmail(ctx, user.Email)
assert.NotNil(foundUser)
- assert.Nil(err)
+ require.NoError(t, err)
assert.Equal(user.Email, foundUser.Email)
assert.Equal(user.Name, foundUser.Name)
// Cleanup
err = tRepos.Users.DeleteAll(ctx)
- assert.NoError(err)
+ require.NoError(t, err)
}
func TestUserRepo_GetOneId(t *testing.T) {
@@ -44,16 +44,16 @@ func TestUserRepo_GetOneId(t *testing.T) {
ctx := context.Background()
userOut, _ := tRepos.Users.Create(ctx, user)
- foundUser, err := tRepos.Users.GetOneId(ctx, userOut.ID)
+ foundUser, err := tRepos.Users.GetOneID(ctx, userOut.ID)
assert.NotNil(foundUser)
- assert.Nil(err)
+ require.NoError(t, err)
assert.Equal(user.Email, foundUser.Email)
assert.Equal(user.Name, foundUser.Name)
// Cleanup
err = tRepos.Users.DeleteAll(ctx)
- assert.NoError(err)
+ require.NoError(t, err)
}
func TestUserRepo_GetAll(t *testing.T) {
@@ -77,11 +77,10 @@ func TestUserRepo_GetAll(t *testing.T) {
// Validate
allUsers, err := tRepos.Users.GetAll(ctx)
- assert.NoError(t, err)
+ require.NoError(t, err)
assert.Equal(t, len(created), len(allUsers))
for _, usr := range created {
- fmt.Printf("%+v\n", usr)
for _, usr2 := range allUsers {
if usr.ID == usr2.ID {
assert.Equal(t, usr.Email, usr2.Email)
@@ -98,12 +97,12 @@ func TestUserRepo_GetAll(t *testing.T) {
// Cleanup
err = tRepos.Users.DeleteAll(ctx)
- assert.NoError(t, err)
+ require.NoError(t, err)
}
func TestUserRepo_Update(t *testing.T) {
user, err := tRepos.Users.Create(context.Background(), userFactory())
- assert.NoError(t, err)
+ require.NoError(t, err)
updateData := UserUpdate{
Name: fk.Str(10),
@@ -112,11 +111,11 @@ func TestUserRepo_Update(t *testing.T) {
// Update
err = tRepos.Users.Update(context.Background(), user.ID, updateData)
- assert.NoError(t, err)
+ require.NoError(t, err)
// Validate
- updated, err := tRepos.Users.GetOneId(context.Background(), user.ID)
- assert.NoError(t, err)
+ updated, err := tRepos.Users.GetOneID(context.Background(), user.ID)
+ require.NoError(t, err)
assert.NotEqual(t, user.Name, updated.Name)
assert.NotEqual(t, user.Email, updated.Email)
}
@@ -133,13 +132,12 @@ func TestUserRepo_Delete(t *testing.T) {
ctx := context.Background()
allUsers, _ := tRepos.Users.GetAll(ctx)
- assert.Greater(t, len(allUsers), 0)
+ assert.NotEmpty(t, allUsers)
err := tRepos.Users.DeleteAll(ctx)
- assert.NoError(t, err)
+ require.NoError(t, err)
allUsers, _ = tRepos.Users.GetAll(ctx)
- assert.Equal(t, len(allUsers), 0)
-
+ assert.Empty(t, allUsers)
}
func TestUserRepo_GetSuperusers(t *testing.T) {
@@ -163,7 +161,7 @@ func TestUserRepo_GetSuperusers(t *testing.T) {
ctx := context.Background()
superUsers, err := tRepos.Users.GetSuperusers(ctx)
- assert.NoError(t, err)
+ require.NoError(t, err)
for _, usr := range superUsers {
assert.True(t, usr.IsSuperuser)
@@ -171,5 +169,5 @@ func TestUserRepo_GetSuperusers(t *testing.T) {
// Cleanup
err = tRepos.Users.DeleteAll(ctx)
- assert.NoError(t, err)
+ require.NoError(t, err)
}
diff --git a/backend/internal/data/repo/repos_all.go b/backend/internal/data/repo/repos_all.go
index e726e88..2ccc022 100644
--- a/backend/internal/data/repo/repos_all.go
+++ b/backend/internal/data/repo/repos_all.go
@@ -1,6 +1,10 @@
+// Package repo provides the data access layer for the application.
package repo
-import "github.com/hay-kot/homebox/backend/internal/data/ent"
+import (
+ "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
+ "github.com/hay-kot/homebox/backend/internal/data/ent"
+)
// AllRepos is a container for all the repository interfaces
type AllRepos struct {
@@ -11,20 +15,22 @@ type AllRepos struct {
Labels *LabelRepository
Items *ItemsRepository
Docs *DocumentRepository
- DocTokens *DocumentTokensRepository
Attachments *AttachmentRepo
+ MaintEntry *MaintenanceEntryRepository
+ Notifiers *NotifierRepository
}
-func New(db *ent.Client, root string) *AllRepos {
+func New(db *ent.Client, bus *eventbus.EventBus, root string) *AllRepos {
return &AllRepos{
Users: &UserRepository{db},
AuthTokens: &TokenRepository{db},
- Groups: &GroupRepository{db},
- Locations: &LocationRepository{db},
- Labels: &LabelRepository{db},
- Items: &ItemsRepository{db},
+ Groups: NewGroupRepository(db),
+ Locations: &LocationRepository{db, bus},
+ Labels: &LabelRepository{db, bus},
+ Items: &ItemsRepository{db, bus},
Docs: &DocumentRepository{db, root},
- DocTokens: &DocumentTokensRepository{db},
Attachments: &AttachmentRepo{db},
+ MaintEntry: &MaintenanceEntryRepository{db},
+ Notifiers: NewNotifierRepository(db),
}
}
diff --git a/backend/internal/data/types/date.go b/backend/internal/data/types/date.go
new file mode 100644
index 0000000..9401e06
--- /dev/null
+++ b/backend/internal/data/types/date.go
@@ -0,0 +1,108 @@
+// Package types provides custom types for the application.
+package types
+
+import (
+ "errors"
+ "strings"
+ "time"
+)
+
+// Date is a custom type that implements the MarshalJSON interface
+// that applies date only formatting to the time.Time fields in order
+// to avoid common time and timezone pitfalls when working with Times.
+//
+// Examples:
+//
+// "2019-01-01" -> time.Time{2019-01-01 00:00:00 +0000 UTC}
+// "2019-01-01T21:10:30Z" -> time.Time{2019-01-01 00:00:00 +0000 UTC}
+// "2019-01-01T21:10:30+01:00" -> time.Time{2019-01-01 00:00:00 +0000 UTC}
+type Date time.Time
+
+func (d Date) Time() time.Time {
+ return time.Time(d)
+}
+
+// DateFromTime returns a Date type from a time.Time type by stripping
+// the time and timezone information.
+func DateFromTime(t time.Time) Date {
+ dateOnlyTime := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC)
+ return Date(dateOnlyTime)
+}
+
+// DateFromString returns a Date type from a string by parsing the
+// string into a time.Time type and then stripping the time and
+// timezone information.
+//
+// Errors are ignored and an empty Date is returned.
+func DateFromString(s string) Date {
+ if s == "" {
+ return Date{}
+ }
+
+ try := [...]string{
+ "2006-01-02",
+ "01/02/2006",
+ "2006/01/02",
+ time.RFC3339,
+ }
+
+ for _, format := range try {
+ t, err := time.Parse(format, s)
+ if err == nil {
+ return DateFromTime(t)
+ }
+ }
+
+ return Date{}
+}
+
+func (d Date) String() string {
+ if time.Time(d).IsZero() {
+ return ""
+ }
+
+ return time.Time(d).Format("2006-01-02")
+}
+
+func (d Date) MarshalJSON() ([]byte, error) {
+ if time.Time(d).IsZero() {
+ return []byte(`""`), nil
+ }
+
+ return []byte(`"` + d.String() + `"`), nil
+}
+
+func (d *Date) UnmarshalJSON(data []byte) (err error) {
+ // unescape the string if necessary `\"` -> `"`
+ str := strings.Trim(string(data), "\"")
+ if str == "" || str == "null" || str == `""` {
+ *d = Date{}
+ return nil
+ }
+
+ try := [...]string{
+ "2006-01-02",
+ "01/02/2006",
+ time.RFC3339,
+ }
+
+ set := false
+ var t time.Time
+
+ for _, format := range try {
+ t, err = time.Parse(format, str)
+ if err == nil {
+ set = true
+ break
+ }
+ }
+
+ if !set {
+ return errors.New("invalid date format")
+ }
+
+ // strip the time and timezone information
+ *d = DateFromTime(t)
+
+ return nil
+}
diff --git a/backend/internal/sys/config/conf.go b/backend/internal/sys/config/conf.go
index 8e55756..8b7b23c 100644
--- a/backend/internal/sys/config/conf.go
+++ b/backend/internal/sys/config/conf.go
@@ -1,13 +1,14 @@
+// Package config provides the configuration for the application.
package config
import (
"encoding/json"
"errors"
"fmt"
-
- "github.com/ardanlabs/conf/v2"
-
"os"
+ "time"
+
+ "github.com/ardanlabs/conf/v3"
)
const (
@@ -16,41 +17,49 @@ const (
)
type Config struct {
- Mode string `yaml:"mode" conf:"default:development"` // development or production
- Web WebConfig `yaml:"web"`
- Storage Storage `yaml:"storage"`
- Log LoggerConf `yaml:"logger"`
- Mailer MailerConf `yaml:"mailer"`
- Swagger SwaggerConf `yaml:"swagger"`
- Demo bool `yaml:"demo"`
- AllowRegistration bool `yaml:"disable_registration" conf:"default:true"`
- Debug DebugConf `yaml:"debug"`
+ conf.Version
+ Mode string `yaml:"mode" conf:"default:development"` // development or production
+ Web WebConfig `yaml:"web"`
+ Storage Storage `yaml:"storage"`
+ Log LoggerConf `yaml:"logger"`
+ Mailer MailerConf `yaml:"mailer"`
+ Demo bool `yaml:"demo"`
+ Debug DebugConf `yaml:"debug"`
+ Options Options `yaml:"options"`
+}
+
+type Options struct {
+ AllowRegistration bool `yaml:"disable_registration" conf:"default:true"`
+ AutoIncrementAssetID bool `yaml:"auto_increment_asset_id" conf:"default:true"`
+ CurrencyConfig string `yaml:"currencies"`
}
type DebugConf struct {
Enabled bool `yaml:"enabled" conf:"default:false"`
- Port string `yaml:"port" conf:"default:4000"`
-}
-
-type SwaggerConf struct {
- Host string `yaml:"host" conf:"default:localhost:7745"`
- Scheme string `yaml:"scheme" conf:"default:http"`
+ Port string `yaml:"port" conf:"default:4000"`
}
type WebConfig struct {
- Port string `yaml:"port" conf:"default:7745"`
- Host string `yaml:"host"`
- MaxUploadSize int64 `yaml:"max_file_upload" conf:"default:10"`
+ Port string `yaml:"port" conf:"default:7745"`
+ Host string `yaml:"host"`
+ MaxUploadSize int64 `yaml:"max_file_upload" conf:"default:10"`
+ ReadTimeout time.Duration `yaml:"read_timeout" conf:"default:10s"`
+ WriteTimeout time.Duration `yaml:"write_timeout" conf:"default:10s"`
+ IdleTimeout time.Duration `yaml:"idle_timeout" conf:"default:30s"`
}
// New parses the CLI/Config file and returns a Config struct. If the file argument is an empty string, the
// file is not read. If the file is not empty, the file is read and the Config struct is returned.
-func New() (*Config, error) {
+func New(buildstr string, description string) (*Config, error) {
var cfg Config
const prefix = "HBOX"
- help, err := conf.Parse(prefix, &cfg)
+ cfg.Version = conf.Version{
+ Build: buildstr,
+ Desc: description,
+ }
+ help, err := conf.Parse(prefix, &cfg)
if err != nil {
if errors.Is(err, conf.ErrHelpWanted) {
fmt.Println(help)
@@ -66,11 +75,9 @@ func New() (*Config, error) {
// This is useful for debugging. If the marshaller errors out, it will panic.
func (c *Config) Print() {
res, err := json.MarshalIndent(c, "", " ")
-
if err != nil {
panic(err)
}
fmt.Println(string(res))
-
}
diff --git a/backend/internal/sys/config/conf_database.go b/backend/internal/sys/config/conf_database.go
index 69a67b9..2c6a761 100644
--- a/backend/internal/sys/config/conf_database.go
+++ b/backend/internal/sys/config/conf_database.go
@@ -6,6 +6,6 @@ const (
type Storage struct {
// Data is the path to the root directory
- Data string `yaml:"data" conf:"default:./.data"`
- SqliteUrl string `yaml:"sqlite-url" conf:"default:./.data/homebox.db?_fk=1"`
+ Data string `yaml:"data" conf:"default:./.data"`
+ SqliteURL string `yaml:"sqlite-url" conf:"default:./.data/homebox.db?_pragma=busy_timeout=999&_pragma=journal_mode=WAL&_fk=1"`
}
diff --git a/backend/internal/sys/config/conf_mailer_test.go b/backend/internal/sys/config/conf_mailer_test.go
index 8656755..6bf7c74 100644
--- a/backend/internal/sys/config/conf_mailer_test.go
+++ b/backend/internal/sys/config/conf_mailer_test.go
@@ -36,5 +36,4 @@ func Test_MailerReady_Failure(t *testing.T) {
mc.From = "from"
assert.True(t, mc.Ready())
-
}
diff --git a/backend/internal/sys/validate/errors.go b/backend/internal/sys/validate/errors.go
index d08a448..09fdf2c 100644
--- a/backend/internal/sys/validate/errors.go
+++ b/backend/internal/sys/validate/errors.go
@@ -29,7 +29,7 @@ func (err *InvalidRouteKeyError) Error() string {
return "invalid route key: " + err.key
}
-func NewInvalidRouteKeyError(key string) error {
+func NewRouteKeyError(key string) error {
return &InvalidRouteKeyError{key}
}
@@ -88,7 +88,7 @@ func (fe FieldErrors) Nil() bool {
return len(fe) == 0
}
-// Error implments the error interface.
+// Error implements the error interface.
func (fe FieldErrors) Error() string {
d, err := json.Marshal(fe)
if err != nil {
@@ -101,6 +101,10 @@ func NewFieldErrors(errs ...FieldError) FieldErrors {
return errs
}
+func NewFieldError(field, reason string) FieldError {
+ return FieldError{Field: field, Error: reason}
+}
+
func IsFieldError(err error) bool {
v := FieldErrors{}
return errors.As(err, &v)
diff --git a/backend/internal/sys/validate/validate.go b/backend/internal/sys/validate/validate.go
index ed22c0f..d9dbe24 100644
--- a/backend/internal/sys/validate/validate.go
+++ b/backend/internal/sys/validate/validate.go
@@ -1,22 +1,68 @@
+// Package validate provides a wrapper around the go-playground/validator package
package validate
-import "github.com/go-playground/validator/v10"
+import (
+ "strings"
+
+ "github.com/go-playground/validator/v10"
+)
var validate *validator.Validate
-func init() {
+func init() { // nolint
validate = validator.New()
+
+ err := validate.RegisterValidation("shoutrrr", func(fl validator.FieldLevel) bool {
+ prefixes := [...]string{
+ "bark://",
+ "discord://",
+ "smtp://",
+ "gotify://",
+ "googlechat://",
+ "ifttt://",
+ "join://",
+ "mattermost://",
+ "matrix://",
+ "ntfy://",
+ "opsgenie://",
+ "pushbullet://",
+ "pushover://",
+ "rocketchat://",
+ "slack://",
+ "teams://",
+ "telegram://",
+ "zulip://",
+ "generic://",
+ "generic+",
+ }
+
+ str := fl.Field().String()
+ if str == "" {
+ return false
+ }
+
+ for _, prefix := range prefixes {
+ if strings.HasPrefix(str, prefix) {
+ return true
+ }
+ }
+
+ return false
+ })
+
+ if err != nil {
+ panic(err)
+ }
}
-// Checks a struct for validation errors and returns any errors the occur. This
+// Check a struct for validation errors and returns any errors the occur. This
// wraps the validate.Struct() function and provides some error wrapping. When
// a validator.ValidationErrors is returned, it is wrapped transformed into a
// FieldErrors array and returned.
func Check(val any) error {
err := validate.Struct(val)
-
if err != nil {
- verrors, ok := err.(validator.ValidationErrors)
+ verrors, ok := err.(validator.ValidationErrors) // nolint - we know it's a validator.ValidationErrors
if !ok {
return err
}
diff --git a/backend/internal/web/adapters/actions.go b/backend/internal/web/adapters/actions.go
new file mode 100644
index 0000000..3905723
--- /dev/null
+++ b/backend/internal/web/adapters/actions.go
@@ -0,0 +1,75 @@
+package adapters
+
+import (
+ "net/http"
+
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/server"
+)
+
+// Action is a function that adapts a function to the server.Handler interface.
+// It decodes the request body into a value of type T and passes it to the function f.
+// The function f is expected to return a value of type Y and an error.
+//
+// Example:
+//
+// type Body struct {
+// Foo string `json:"foo"`
+// }
+//
+// fn := func(r *http.Request, b Body) (any, error) {
+// // do something with b
+// return nil, nil
+// }
+//
+// r.Post("/foo", adapters.Action(fn, http.StatusCreated))
+func Action[T any, Y any](f AdapterFunc[T, Y], ok int) errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ v, err := DecodeBody[T](r)
+ if err != nil {
+ return err
+ }
+
+ res, err := f(r, v)
+ if err != nil {
+ return err
+ }
+
+ return server.JSON(w, ok, res)
+ }
+}
+
+// ActionID functions the same as Action, but it also decodes a UUID from the URL path.
+//
+// Example:
+//
+// type Body struct {
+// Foo string `json:"foo"`
+// }
+//
+// fn := func(r *http.Request, ID uuid.UUID, b Body) (any, error) {
+// // do something with ID and b
+// return nil, nil
+// }
+//
+// r.Post("/foo/{id}", adapters.ActionID(fn, http.StatusCreated))
+func ActionID[T any, Y any](param string, f IDFunc[T, Y], ok int) errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ ID, err := RouteUUID(r, param)
+ if err != nil {
+ return err
+ }
+
+ v, err := DecodeBody[T](r)
+ if err != nil {
+ return err
+ }
+
+ res, err := f(r, ID, v)
+ if err != nil {
+ return err
+ }
+
+ return server.JSON(w, ok, res)
+ }
+}
diff --git a/backend/internal/web/adapters/adapters.go b/backend/internal/web/adapters/adapters.go
new file mode 100644
index 0000000..8372a60
--- /dev/null
+++ b/backend/internal/web/adapters/adapters.go
@@ -0,0 +1,10 @@
+package adapters
+
+import (
+ "net/http"
+
+ "github.com/google/uuid"
+)
+
+type AdapterFunc[T any, Y any] func(*http.Request, T) (Y, error)
+type IDFunc[T any, Y any] func(*http.Request, uuid.UUID, T) (Y, error)
diff --git a/backend/internal/web/adapters/command.go b/backend/internal/web/adapters/command.go
new file mode 100644
index 0000000..d3d099b
--- /dev/null
+++ b/backend/internal/web/adapters/command.go
@@ -0,0 +1,62 @@
+package adapters
+
+import (
+ "net/http"
+
+ "github.com/google/uuid"
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/server"
+)
+
+type CommandFunc[T any] func(*http.Request) (T, error)
+type CommandIDFunc[T any] func(*http.Request, uuid.UUID) (T, error)
+
+// Command is an HandlerAdapter that returns a errchain.HandlerFunc that
+// The command adapters are used to handle commands that do not accept a body
+// or a query. You can think of them as a way to handle RPC style Rest Endpoints.
+//
+// Example:
+//
+// fn := func(r *http.Request) (interface{}, error) {
+// // do something
+// return nil, nil
+// }
+//
+// r.Get("/foo", adapters.Command(fn, http.NoContent))
+func Command[T any](f CommandFunc[T], ok int) errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ res, err := f(r)
+ if err != nil {
+ return err
+ }
+
+ return server.JSON(w, ok, res)
+ }
+}
+
+// CommandID is the same as the Command adapter but it accepts a UUID as a parameter
+// in the URL. The parameter name is passed as the first argument.
+//
+// Example:
+//
+// fn := func(r *http.Request, id uuid.UUID) (interface{}, error) {
+// // do something
+// return nil, nil
+// }
+//
+// r.Get("/foo/{id}", adapters.CommandID("id", fn, http.NoContent))
+func CommandID[T any](param string, f CommandIDFunc[T], ok int) errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ ID, err := RouteUUID(r, param)
+ if err != nil {
+ return err
+ }
+
+ res, err := f(r, ID)
+ if err != nil {
+ return err
+ }
+
+ return server.JSON(w, ok, res)
+ }
+}
diff --git a/backend/internal/web/adapters/decoders.go b/backend/internal/web/adapters/decoders.go
new file mode 100644
index 0000000..ad5b82b
--- /dev/null
+++ b/backend/internal/web/adapters/decoders.go
@@ -0,0 +1,65 @@
+package adapters
+
+import (
+ "net/http"
+
+ "github.com/pkg/errors"
+
+ "github.com/go-chi/chi/v5"
+ "github.com/google/uuid"
+ "github.com/gorilla/schema"
+ "github.com/hay-kot/homebox/backend/internal/sys/validate"
+ "github.com/hay-kot/httpkit/server"
+)
+
+var queryDecoder = schema.NewDecoder()
+
+func DecodeQuery[T any](r *http.Request) (T, error) {
+ var v T
+ err := queryDecoder.Decode(&v, r.URL.Query())
+ if err != nil {
+ return v, errors.Wrap(err, "decoding error")
+ }
+
+ err = validate.Check(v)
+ if err != nil {
+ return v, errors.Wrap(err, "validation error")
+ }
+
+ return v, nil
+}
+
+type Validator interface {
+ Validate() error
+}
+
+func DecodeBody[T any](r *http.Request) (T, error) {
+ var val T
+
+ err := server.Decode(r, &val)
+ if err != nil {
+ return val, errors.Wrap(err, "body decoding error")
+ }
+
+ err = validate.Check(val)
+ if err != nil {
+ return val, err
+ }
+
+ if v, ok := any(val).(Validator); ok {
+ err = v.Validate()
+ if err != nil {
+ return val, errors.Wrap(err, "validation error")
+ }
+ }
+
+ return val, nil
+}
+
+func RouteUUID(r *http.Request, key string) (uuid.UUID, error) {
+ ID, err := uuid.Parse(chi.URLParam(r, key))
+ if err != nil {
+ return uuid.Nil, validate.NewRouteKeyError(key)
+ }
+ return ID, nil
+}
diff --git a/backend/internal/web/adapters/doc.go b/backend/internal/web/adapters/doc.go
new file mode 100644
index 0000000..1b6792b
--- /dev/null
+++ b/backend/internal/web/adapters/doc.go
@@ -0,0 +1,9 @@
+/*
+Package adapters offers common adapters for turing regular functions into HTTP Handlers
+There are three types of adapters
+
+ - Query adapters
+ - Action adapters
+ - Command adapters
+*/
+package adapters
diff --git a/backend/internal/web/adapters/query.go b/backend/internal/web/adapters/query.go
new file mode 100644
index 0000000..b044475
--- /dev/null
+++ b/backend/internal/web/adapters/query.go
@@ -0,0 +1,73 @@
+package adapters
+
+import (
+ "net/http"
+
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/server"
+)
+
+// Query is a server.Handler that decodes a query from the request and calls the provided function.
+//
+// Example:
+//
+// type Query struct {
+// Foo string `schema:"foo"`
+// }
+//
+// fn := func(r *http.Request, q Query) (any, error) {
+// // do something with q
+// return nil, nil
+// }
+//
+// r.Get("/foo", adapters.Query(fn, http.StatusOK))
+func Query[T any, Y any](f AdapterFunc[T, Y], ok int) errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ q, err := DecodeQuery[T](r)
+ if err != nil {
+ return err
+ }
+
+ res, err := f(r, q)
+ if err != nil {
+ return err
+ }
+
+ return server.JSON(w, ok, res)
+ }
+}
+
+// QueryID is a server.Handler that decodes a query and an ID from the request and calls the provided function.
+//
+// Example:
+//
+// type Query struct {
+// Foo string `schema:"foo"`
+// }
+//
+// fn := func(r *http.Request, ID uuid.UUID, q Query) (any, error) {
+// // do something with ID and q
+// return nil, nil
+// }
+//
+// r.Get("/foo/{id}", adapters.QueryID(fn, http.StatusOK))
+func QueryID[T any, Y any](param string, f IDFunc[T, Y], ok int) errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ ID, err := RouteUUID(r, param)
+ if err != nil {
+ return err
+ }
+
+ q, err := DecodeQuery[T](r)
+ if err != nil {
+ return err
+ }
+
+ res, err := f(r, ID, q)
+ if err != nil {
+ return err
+ }
+
+ return server.JSON(w, ok, res)
+ }
+}
diff --git a/backend/internal/web/mid/doc.go b/backend/internal/web/mid/doc.go
new file mode 100644
index 0000000..4f71563
--- /dev/null
+++ b/backend/internal/web/mid/doc.go
@@ -0,0 +1,2 @@
+// Package mid provides web middleware.
+package mid
diff --git a/backend/internal/web/mid/errors.go b/backend/internal/web/mid/errors.go
index 7aa659c..c8b04d6 100644
--- a/backend/internal/web/mid/errors.go
+++ b/backend/internal/web/mid/errors.go
@@ -3,38 +3,48 @@ package mid
import (
"net/http"
+ "github.com/go-chi/chi/v5/middleware"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
- "github.com/hay-kot/homebox/backend/pkgs/server"
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/server"
"github.com/rs/zerolog"
)
-func Errors(log zerolog.Logger) server.Middleware {
- return func(h server.Handler) server.Handler {
- return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
- err := h.ServeHTTP(w, r)
+type ErrorResponse struct {
+ Error string `json:"error"`
+ Fields map[string]string `json:"fields,omitempty"`
+}
+func Errors(log zerolog.Logger) errchain.ErrorHandler {
+ return func(h errchain.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ err := h.ServeHTTP(w, r)
if err != nil {
- var resp server.ErrorResponse
+ var resp ErrorResponse
var code int
+ traceID := r.Context().Value(middleware.RequestIDKey).(string)
log.Err(err).
- Str("trace_id", server.GetTraceID(r.Context())).
+ Stack().
+ Str("req_id", traceID).
Msg("ERROR occurred")
switch {
case validate.IsUnauthorizedError(err):
code = http.StatusUnauthorized
- resp = server.ErrorResponse{
+ resp = ErrorResponse{
Error: "unauthorized",
}
case validate.IsInvalidRouteKeyError(err):
code = http.StatusBadRequest
- resp = server.ErrorResponse{
+ resp = ErrorResponse{
Error: err.Error(),
}
case validate.IsFieldError(err):
- fieldErrors := err.(validate.FieldErrors)
+ code = http.StatusUnprocessableEntity
+
+ fieldErrors := err.(validate.FieldErrors) // nolint
resp.Error = "Validation Error"
resp.Fields = map[string]string{}
@@ -42,29 +52,26 @@ func Errors(log zerolog.Logger) server.Middleware {
resp.Fields[fieldError.Field] = fieldError.Error
}
case validate.IsRequestError(err):
- requestError := err.(*validate.RequestError)
+ requestError := err.(*validate.RequestError) // nolint
resp.Error = requestError.Error()
- code = requestError.Status
+
+ if requestError.Status == 0 {
+ code = http.StatusBadRequest
+ } else {
+ code = requestError.Status
+ }
case ent.IsNotFound(err):
resp.Error = "Not Found"
code = http.StatusNotFound
default:
resp.Error = "Unknown Error"
code = http.StatusInternalServerError
-
}
- if err := server.Respond(w, code, resp); err != nil {
- return err
- }
-
- // If Showdown error, return error
- if server.IsShutdownError(err) {
- return err
+ if err := server.JSON(w, code, resp); err != nil {
+ log.Err(err).Msg("failed to write response")
}
}
-
- return nil
})
}
}
diff --git a/backend/internal/web/mid/logger.go b/backend/internal/web/mid/logger.go
index 86b8cdb..0be4722 100644
--- a/backend/internal/web/mid/logger.go
+++ b/backend/internal/web/mid/logger.go
@@ -1,97 +1,44 @@
package mid
import (
- "fmt"
+ "bufio"
+ "errors"
+ "net"
"net/http"
- "github.com/hay-kot/homebox/backend/pkgs/server"
+ "github.com/go-chi/chi/v5/middleware"
"github.com/rs/zerolog"
)
-type statusRecorder struct {
+type spy struct {
http.ResponseWriter
- Status int
+ status int
}
-func (r *statusRecorder) WriteHeader(status int) {
- r.Status = status
- r.ResponseWriter.WriteHeader(status)
+func (s *spy) WriteHeader(status int) {
+ s.status = status
+ s.ResponseWriter.WriteHeader(status)
}
-func Logger(log zerolog.Logger) server.Middleware {
- return func(next server.Handler) server.Handler {
- return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
- traceId := server.GetTraceID(r.Context())
+func (s *spy) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ hj, ok := s.ResponseWriter.(http.Hijacker)
+ if !ok {
+ return nil, nil, errors.New("response writer does not support hijacking")
+ }
+ return hj.Hijack()
+}
- log.Info().
- Str("trace_id", traceId).
- Str("method", r.Method).
- Str("path", r.URL.Path).
- Str("remove_address", r.RemoteAddr).
- Msg("request started")
+func Logger(l zerolog.Logger) func(http.Handler) http.Handler {
+ return func(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ reqID := r.Context().Value(middleware.RequestIDKey).(string)
- record := &statusRecorder{ResponseWriter: w, Status: http.StatusOK}
+ l.Info().Str("method", r.Method).Str("path", r.URL.Path).Str("rid", reqID).Msg("request received")
- err := next.ServeHTTP(record, r)
+ s := &spy{ResponseWriter: w}
+ h.ServeHTTP(s, r)
- log.Info().
- Str("trace_id", traceId).
- Str("method", r.Method).
- Str("url", r.URL.Path).
- Str("remote_address", r.RemoteAddr).
- Int("status_code", record.Status).
- Msg("request completed")
-
- return err
- })
- }
-}
-
-func SugarLogger(log zerolog.Logger) server.Middleware {
- orange := func(s string) string { return "\033[33m" + s + "\033[0m" }
- aqua := func(s string) string { return "\033[36m" + s + "\033[0m" }
- red := func(s string) string { return "\033[31m" + s + "\033[0m" }
- green := func(s string) string { return "\033[32m" + s + "\033[0m" }
-
- fmtCode := func(code int) string {
- switch {
- case code >= 500:
- return red(fmt.Sprintf("%d", code))
- case code >= 400:
- return orange(fmt.Sprintf("%d", code))
- case code >= 300:
- return aqua(fmt.Sprintf("%d", code))
- default:
- return green(fmt.Sprintf("%d", code))
- }
- }
- bold := func(s string) string { return "\033[1m" + s + "\033[0m" }
-
- atLeast6 := func(s string) string {
- for len(s) <= 6 {
- s += " "
- }
- return s
- }
-
- return func(next server.Handler) server.Handler {
- return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
-
- record := &statusRecorder{ResponseWriter: w, Status: http.StatusOK}
-
- err := next.ServeHTTP(record, r) // Blocks until the next handler returns.
-
- url := fmt.Sprintf("%s %s", r.RequestURI, r.Proto)
-
- log.Info().
- Str("trace_id", server.GetTraceID(r.Context())).
- Msgf("%s %s %s",
- bold(fmtCode(record.Status)),
- bold(orange(atLeast6(r.Method))),
- aqua(url),
- )
-
- return err
+ l.Info().Str("method", r.Method).Str("path", r.URL.Path).Int("status", s.status).Str("rid", reqID).Msg("request finished")
})
}
}
diff --git a/backend/internal/web/mid/panic.go b/backend/internal/web/mid/panic.go
deleted file mode 100644
index 9879bb8..0000000
--- a/backend/internal/web/mid/panic.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package mid
-
-import (
- "fmt"
- "net/http"
- "runtime/debug"
-
- "github.com/hay-kot/homebox/backend/pkgs/server"
-)
-
-// Panic is a middleware that recovers from panics anywhere in the chain and wraps the error.
-// and returns it up the middleware chain.
-func Panic(develop bool) server.Middleware {
- return func(h server.Handler) server.Handler {
- return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (err error) {
- defer func() {
- if rec := recover(); rec != nil {
- trace := debug.Stack()
-
- if develop {
- err = fmt.Errorf("PANIC [%v]", rec)
- fmt.Printf("%s", string(trace))
- } else {
- err = fmt.Errorf("PANIC [%v] TRACE[%s]", rec, string(trace))
- }
-
- }
- }()
-
- return h.ServeHTTP(w, r)
- })
- }
-}
diff --git a/backend/pkgs/cgofreesqlite/sqlite.go b/backend/pkgs/cgofreesqlite/sqlite.go
new file mode 100644
index 0000000..c9faf7a
--- /dev/null
+++ b/backend/pkgs/cgofreesqlite/sqlite.go
@@ -0,0 +1,40 @@
+// Package cgofreesqlite package provides a CGO free implementation of the sqlite3 driver. This wraps the
+// modernc.org/sqlite driver and adds the PRAGMA foreign_keys = ON; statement to the connection
+// initialization as well as registering the driver with the sql package as "sqlite3" for compatibility
+// with entgo.io
+//
+// NOTE: This does come with around a 30% performance hit compared to the CGO version of the driver.
+// however it greatly simplifies the build process and allows for cross compilation.
+package cgofreesqlite
+
+import (
+ "database/sql"
+ "database/sql/driver"
+
+ "modernc.org/sqlite"
+)
+
+type CGOFreeSqliteDriver struct {
+ *sqlite.Driver
+}
+
+type sqlite3DriverConn interface {
+ Exec(string, []driver.Value) (driver.Result, error)
+}
+
+func (d CGOFreeSqliteDriver) Open(name string) (conn driver.Conn, err error) {
+ conn, err = d.Driver.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ _, err = conn.(sqlite3DriverConn).Exec("PRAGMA foreign_keys = ON;", nil)
+ if err != nil {
+ _ = conn.Close()
+ return nil, err
+ }
+ return conn, err
+}
+
+func init() { //nolint:gochecknoinits
+ sql.Register("sqlite3", CGOFreeSqliteDriver{Driver: &sqlite.Driver{}})
+}
diff --git a/backend/pkgs/faker/random.go b/backend/pkgs/faker/random.go
index 67c7114..62e4ff2 100644
--- a/backend/pkgs/faker/random.go
+++ b/backend/pkgs/faker/random.go
@@ -1,3 +1,4 @@
+// Package faker provides a simple interface for generating fake data for testing.
package faker
import (
@@ -7,11 +8,9 @@ import (
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
-type Faker struct {
-}
+type Faker struct{}
func NewFaker() *Faker {
- rand.Seed(time.Now().UnixNano())
return &Faker{}
}
@@ -20,7 +19,6 @@ func (f *Faker) Time() time.Time {
}
func (f *Faker) Str(length int) string {
-
b := make([]rune, length)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
diff --git a/backend/pkgs/faker/randoms_test.go b/backend/pkgs/faker/randoms_test.go
index 0773205..c03c564 100644
--- a/backend/pkgs/faker/randoms_test.go
+++ b/backend/pkgs/faker/randoms_test.go
@@ -20,7 +20,7 @@ func ValidateUnique(values []string) bool {
func Test_GetRandomString(t *testing.T) {
t.Parallel()
// Test that the function returns a string of the correct length
- var generated = make([]string, Loops)
+ generated := make([]string, Loops)
faker := NewFaker()
@@ -36,7 +36,7 @@ func Test_GetRandomString(t *testing.T) {
func Test_GetRandomEmail(t *testing.T) {
t.Parallel()
// Test that the function returns a string of the correct length
- var generated = make([]string, Loops)
+ generated := make([]string, Loops)
faker := NewFaker()
@@ -52,8 +52,8 @@ func Test_GetRandomEmail(t *testing.T) {
func Test_GetRandomBool(t *testing.T) {
t.Parallel()
- var trues = 0
- var falses = 0
+ trues := 0
+ falses := 0
faker := NewFaker()
@@ -91,5 +91,4 @@ func Test_RandomNumber(t *testing.T) {
t.Errorf("RandomNumber() failed to generate a number between %v and %v", MIN, MAX)
}
}
-
}
diff --git a/backend/pkgs/hasher/doc.go b/backend/pkgs/hasher/doc.go
new file mode 100644
index 0000000..4cbdab4
--- /dev/null
+++ b/backend/pkgs/hasher/doc.go
@@ -0,0 +1,2 @@
+// Package hasher provides a simple interface for hashing and verifying passwords.
+package hasher
diff --git a/backend/pkgs/hasher/password.go b/backend/pkgs/hasher/password.go
index 64e88b2..a68c868 100644
--- a/backend/pkgs/hasher/password.go
+++ b/backend/pkgs/hasher/password.go
@@ -9,11 +9,11 @@ import (
var enabled = true
-func init() {
+func init() { // nolint: gochecknoinits
disableHas := os.Getenv("UNSAFE_DISABLE_PASSWORD_PROJECTION") == "yes_i_am_sure"
if disableHas {
- fmt.Println("WARNING: Password projection is disabled. This is unsafe in production.")
+ fmt.Println("WARNING: Password protection is disabled. This is unsafe in production.")
enabled = false
}
}
diff --git a/backend/pkgs/mailer/mailer.go b/backend/pkgs/mailer/mailer.go
index 22609aa..9b593bc 100644
--- a/backend/pkgs/mailer/mailer.go
+++ b/backend/pkgs/mailer/mailer.go
@@ -1,3 +1,4 @@
+// Package mailer provides a simple mailer for sending emails.
package mailer
import (
diff --git a/backend/pkgs/mailer/mailer_test.go b/backend/pkgs/mailer/mailer_test.go
index 87a0b60..89e55ca 100644
--- a/backend/pkgs/mailer/mailer_test.go
+++ b/backend/pkgs/mailer/mailer_test.go
@@ -5,7 +5,7 @@ import (
"os"
"testing"
- "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
const (
@@ -30,14 +30,12 @@ func GetTestMailer() (*Mailer, error) {
}
return mailer, nil
-
}
func Test_Mailer(t *testing.T) {
t.Parallel()
mailer, err := GetTestMailer()
-
if err != nil {
t.Skip("Error Reading Test Mailer Config - Skipping")
}
@@ -47,7 +45,6 @@ func Test_Mailer(t *testing.T) {
}
message, err := RenderWelcome()
-
if err != nil {
t.Error(err)
}
@@ -62,5 +59,5 @@ func Test_Mailer(t *testing.T) {
err = mailer.Send(msg)
- assert.Nil(t, err)
+ require.NoError(t, err)
}
diff --git a/backend/pkgs/mailer/templates.go b/backend/pkgs/mailer/templates.go
index b7984c0..cc5049f 100644
--- a/backend/pkgs/mailer/templates.go
+++ b/backend/pkgs/mailer/templates.go
@@ -41,7 +41,6 @@ func DefaultTemplateData() TemplateProps {
func render(tpl string, data TemplateProps) (string, error) {
tmpl, err := template.New("name").Parse(tpl)
-
if err != nil {
return "", err
}
diff --git a/backend/pkgs/pathlib/pathlib.go b/backend/pkgs/pathlib/pathlib.go
index 24420aa..e59366d 100644
--- a/backend/pkgs/pathlib/pathlib.go
+++ b/backend/pkgs/pathlib/pathlib.go
@@ -1,3 +1,4 @@
+// Package pathlib provides a way to safely create a file path without overwriting any existing files.
package pathlib
import (
@@ -14,7 +15,7 @@ var dirReader dirReaderFunc = func(directory string) []string {
if err != nil {
return nil
}
- defer f.Close()
+ defer func() { _ = f.Close() }()
names, err := f.Readdirnames(-1)
if err != nil {
diff --git a/backend/pkgs/server/constants.go b/backend/pkgs/server/constants.go
deleted file mode 100644
index e083a57..0000000
--- a/backend/pkgs/server/constants.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package server
-
-const (
- ContentType = "Content-Type"
- ContentJSON = "application/json"
- ContentXML = "application/xml"
- ContentFormUrlEncoded = "application/x-www-form-urlencoded"
-)
diff --git a/backend/pkgs/server/errors.go b/backend/pkgs/server/errors.go
deleted file mode 100644
index 5b1d60b..0000000
--- a/backend/pkgs/server/errors.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package server
-
-import "errors"
-
-type shutdownError struct {
- message string
-}
-
-func (e *shutdownError) Error() string {
- return e.message
-}
-
-// ShutdownError returns an error that indicates that the server has lost
-// integrity and should be shut down.
-func ShutdownError(message string) error {
- return &shutdownError{message}
-}
-
-// IsShutdownError returns true if the error is a shutdown error.
-func IsShutdownError(err error) bool {
- var e *shutdownError
- return errors.As(err, &e)
-}
diff --git a/backend/pkgs/server/handler.go b/backend/pkgs/server/handler.go
deleted file mode 100644
index 76ae131..0000000
--- a/backend/pkgs/server/handler.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package server
-
-import (
- "net/http"
-)
-
-type HandlerFunc func(w http.ResponseWriter, r *http.Request) error
-
-func (f HandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) error {
- return f(w, r)
-}
-
-type Handler interface {
- ServeHTTP(http.ResponseWriter, *http.Request) error
-}
-
-// ToHandler converts a function to a customer implementation of the Handler interface.
-// that returns an error. This wrapper around the handler function and simply
-// returns the nil in all cases
-func ToHandler(handler http.Handler) Handler {
- return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
- handler.ServeHTTP(w, r)
- return nil
- })
-}
diff --git a/backend/pkgs/server/middleware.go b/backend/pkgs/server/middleware.go
deleted file mode 100644
index f24f06b..0000000
--- a/backend/pkgs/server/middleware.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package server
-
-import (
- "net/http"
- "strings"
-)
-
-type Middleware func(Handler) Handler
-
-// wrapMiddleware creates a new handler by wrapping middleware around a final
-// handler. The middlewares' Handlers will be executed by requests in the order
-// they are provided.
-func wrapMiddleware(mw []Middleware, handler Handler) Handler {
-
- // Loop backwards through the middleware invoking each one. Replace the
- // handler with the new wrapped handler. Looping backwards ensures that the
- // first middleware of the slice is the first to be executed by requests.
- for i := len(mw) - 1; i >= 0; i-- {
- h := mw[i]
- if h != nil {
- handler = h(handler)
- }
- }
-
- return handler
-}
-
-// StripTrailingSlash is a middleware that will strip trailing slashes from the request path.
-//
-// Example: /api/v1/ -> /api/v1
-func StripTrailingSlash() Middleware {
- return func(h Handler) Handler {
- return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
- r.URL.Path = strings.TrimSuffix(r.URL.Path, "/")
- return h.ServeHTTP(w, r)
- })
- }
-}
diff --git a/backend/pkgs/server/mux.go b/backend/pkgs/server/mux.go
deleted file mode 100644
index 9e77e32..0000000
--- a/backend/pkgs/server/mux.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package server
-
-import (
- "context"
- "net/http"
-
- "github.com/google/uuid"
-)
-
-type vkey int
-
-const (
- // Key is the key for the server in the request context.
- key vkey = 1
-)
-
-type Values struct {
- TraceID string
-}
-
-func GetTraceID(ctx context.Context) string {
- v, ok := ctx.Value(key).(Values)
- if !ok {
- return ""
- }
- return v.TraceID
-}
-
-func (s *Server) toHttpHandler(handler Handler, mw ...Middleware) http.HandlerFunc {
- handler = wrapMiddleware(mw, handler)
-
- handler = wrapMiddleware(s.mw, handler)
-
- return func(w http.ResponseWriter, r *http.Request) {
- ctx := r.Context()
-
- // Add the trace ID to the context
- ctx = context.WithValue(ctx, key, Values{
- TraceID: uuid.NewString(),
- })
-
- err := handler.ServeHTTP(w, r.WithContext(ctx))
-
- if err != nil {
- if IsShutdownError(err) {
- _ = s.Shutdown("SIGTERM")
- }
- }
- }
-}
-
-func (s *Server) handle(method, pattern string, handler Handler, mw ...Middleware) {
- h := s.toHttpHandler(handler, mw...)
-
- switch method {
- case http.MethodGet:
- s.mux.Get(pattern, h)
- case http.MethodPost:
- s.mux.Post(pattern, h)
- case http.MethodPut:
- s.mux.Put(pattern, h)
- case http.MethodDelete:
- s.mux.Delete(pattern, h)
- case http.MethodPatch:
- s.mux.Patch(pattern, h)
- case http.MethodHead:
- s.mux.Head(pattern, h)
- case http.MethodOptions:
- s.mux.Options(pattern, h)
- }
-}
-
-func (s *Server) Get(pattern string, handler Handler, mw ...Middleware) {
- s.handle(http.MethodGet, pattern, handler, mw...)
-}
-
-func (s *Server) Post(pattern string, handler Handler, mw ...Middleware) {
- s.handle(http.MethodPost, pattern, handler, mw...)
-}
-
-func (s *Server) Put(pattern string, handler Handler, mw ...Middleware) {
- s.handle(http.MethodPut, pattern, handler, mw...)
-}
-
-func (s *Server) Delete(pattern string, handler Handler, mw ...Middleware) {
- s.handle(http.MethodDelete, pattern, handler, mw...)
-}
-
-func (s *Server) Patch(pattern string, handler Handler, mw ...Middleware) {
- s.handle(http.MethodPatch, pattern, handler, mw...)
-}
-
-func (s *Server) Head(pattern string, handler Handler, mw ...Middleware) {
- s.handle(http.MethodHead, pattern, handler, mw...)
-}
-
-func (s *Server) Options(pattern string, handler Handler, mw ...Middleware) {
- s.handle(http.MethodOptions, pattern, handler, mw...)
-}
-
-func (s *Server) NotFound(handler Handler) {
- s.mux.NotFound(s.toHttpHandler(handler))
-}
diff --git a/backend/pkgs/server/request.go b/backend/pkgs/server/request.go
deleted file mode 100644
index 38c3189..0000000
--- a/backend/pkgs/server/request.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package server
-
-import (
- "encoding/json"
- "net/http"
-)
-
-// Decode reads the body of an HTTP request looking for a JSON document. The
-// body is decoded into the provided value.
-func Decode(r *http.Request, val interface{}) error {
- decoder := json.NewDecoder(r.Body)
- // decoder.DisallowUnknownFields()
- if err := decoder.Decode(val); err != nil {
- return err
- }
- return nil
-}
-
-// GetId is a shortcut to get the id from the request URL or return a default value
-func GetParam(r *http.Request, key, d string) string {
- val := r.URL.Query().Get(key)
-
- if val == "" {
- return d
- }
-
- return val
-}
-
-// GetSkip is a shortcut to get the skip from the request URL parameters
-func GetSkip(r *http.Request, d string) string {
- return GetParam(r, "skip", d)
-}
-
-// GetSkip is a shortcut to get the skip from the request URL parameters
-func GetId(r *http.Request, d string) string {
- return GetParam(r, "id", d)
-}
-
-// GetLimit is a shortcut to get the limit from the request URL parameters
-func GetLimit(r *http.Request, d string) string {
- return GetParam(r, "limit", d)
-}
-
-// GetQuery is a shortcut to get the sort from the request URL parameters
-func GetQuery(r *http.Request, d string) string {
- return GetParam(r, "query", d)
-}
diff --git a/backend/pkgs/server/request_test.go b/backend/pkgs/server/request_test.go
deleted file mode 100644
index 05dc8c5..0000000
--- a/backend/pkgs/server/request_test.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package server
-
-import (
- "net/http"
- "net/http/httptest"
- "strings"
- "testing"
-)
-
-type TestStruct struct {
- Name string `json:"name"`
- Data string `json:"data"`
-}
-
-func TestDecode(t *testing.T) {
- type args struct {
- r *http.Request
- val interface{}
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- name: "check_error",
- args: args{
- r: &http.Request{
- Body: http.NoBody,
- },
- val: make(map[string]interface{}),
- },
- wantErr: true,
- },
- {
- name: "check_success",
- args: args{
- r: httptest.NewRequest("POST", "/", strings.NewReader(`{"name":"test","data":"test"}`)),
- val: TestStruct{
- Name: "test",
- Data: "test",
- },
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if err := Decode(tt.args.r, &tt.args.val); (err != nil) != tt.wantErr {
- t.Errorf("Decode() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func TestGetParam(t *testing.T) {
- type args struct {
- r *http.Request
- key string
- d string
- }
- tests := []struct {
- name string
- args args
- want string
- }{
- {
- name: "check_default",
- args: args{
- r: httptest.NewRequest("POST", "/", strings.NewReader(`{"name":"test","data":"test"}`)),
- key: "id",
- d: "default",
- },
- want: "default",
- },
- {
- name: "check_id",
- args: args{
- r: httptest.NewRequest("POST", "/item?id=123", strings.NewReader(`{"name":"test","data":"test"}`)),
- key: "id",
- d: "",
- },
- want: "123",
- },
- {
- name: "check_query",
- args: args{
- r: httptest.NewRequest("POST", "/item?query=hello-world", strings.NewReader(`{"name":"test","data":"test"}`)),
- key: "query",
- d: "",
- },
- want: "hello-world",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := GetParam(tt.args.r, tt.args.key, tt.args.d); got != tt.want {
- t.Errorf("GetParam() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestGetSkip(t *testing.T) {
- type args struct {
- r *http.Request
- d string
- }
- tests := []struct {
- name string
- args args
- want string
- }{
- {
- name: "check_default",
- args: args{
- r: httptest.NewRequest("POST", "/", strings.NewReader(`{"name":"test","data":"test"}`)),
- d: "0",
- },
- want: "0",
- },
- {
- name: "check_skip",
- args: args{
- r: httptest.NewRequest("POST", "/item?skip=107", strings.NewReader(`{"name":"test","data":"test"}`)),
- d: "0",
- },
- want: "107",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := GetSkip(tt.args.r, tt.args.d); got != tt.want {
- t.Errorf("GetSkip() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestGetLimit(t *testing.T) {
- type args struct {
- r *http.Request
- d string
- }
- tests := []struct {
- name string
- args args
- want string
- }{
- {
- name: "check_default",
- args: args{
- r: httptest.NewRequest("POST", "/", strings.NewReader(`{"name":"test","data":"test"}`)),
- d: "0",
- },
- want: "0",
- },
- {
- name: "check_limit",
- args: args{
- r: httptest.NewRequest("POST", "/item?limit=107", strings.NewReader(`{"name":"test","data":"test"}`)),
- d: "0",
- },
- want: "107",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := GetLimit(tt.args.r, tt.args.d); got != tt.want {
- t.Errorf("GetLimit() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestGetQuery(t *testing.T) {
- type args struct {
- r *http.Request
- d string
- }
- tests := []struct {
- name string
- args args
- want string
- }{
- {
- name: "check_default",
- args: args{
- r: httptest.NewRequest("POST", "/", strings.NewReader(`{"name":"test","data":"test"}`)),
- d: "0",
- },
- want: "0",
- },
- {
- name: "check_query",
- args: args{
- r: httptest.NewRequest("POST", "/item?query=hello-query", strings.NewReader(`{"name":"test","data":"test"}`)),
- d: "0",
- },
- want: "hello-query",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := GetQuery(tt.args.r, tt.args.d); got != tt.want {
- t.Errorf("GetQuery() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/backend/pkgs/server/response.go b/backend/pkgs/server/response.go
deleted file mode 100644
index 7d5880e..0000000
--- a/backend/pkgs/server/response.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package server
-
-import (
- "encoding/json"
- "net/http"
-)
-
-type ErrorResponse struct {
- Error string `json:"error"`
- Fields map[string]string `json:"fields,omitempty"`
-}
-
-// Respond converts a Go value to JSON and sends it to the client.
-// Adapted from https://github.com/ardanlabs/service/tree/master/foundation/web
-func Respond(w http.ResponseWriter, statusCode int, data interface{}) error {
- if statusCode == http.StatusNoContent {
- w.WriteHeader(statusCode)
- return nil
- }
-
- // Convert the response value to JSON.
- jsonData, err := json.Marshal(data)
- if err != nil {
- panic(err)
- }
-
- // Set the content type and headers once we know marshaling has succeeded.
- w.Header().Set("Content-Type", ContentJSON)
-
- // Write the status code to the response.
- w.WriteHeader(statusCode)
-
- // Send the result back to the client.
- if _, err := w.Write(jsonData); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/backend/pkgs/server/response_test.go b/backend/pkgs/server/response_test.go
deleted file mode 100644
index ef23d60..0000000
--- a/backend/pkgs/server/response_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package server
-
-import (
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func Test_Respond_NoContent(t *testing.T) {
- recorder := httptest.NewRecorder()
- dummystruct := struct {
- Name string
- }{
- Name: "dummy",
- }
-
- err := Respond(recorder, http.StatusNoContent, dummystruct)
- assert.NoError(t, err)
-
- assert.Equal(t, http.StatusNoContent, recorder.Code)
- assert.Empty(t, recorder.Body.String())
-}
-
-func Test_Respond_JSON(t *testing.T) {
- recorder := httptest.NewRecorder()
- dummystruct := struct {
- Name string `json:"name"`
- }{
- Name: "dummy",
- }
-
- err := Respond(recorder, http.StatusCreated, dummystruct)
- assert.NoError(t, err)
-
- assert.Equal(t, http.StatusCreated, recorder.Code)
- assert.JSONEq(t, recorder.Body.String(), `{"name":"dummy"}`)
- assert.Equal(t, "application/json", recorder.Header().Get("Content-Type"))
-
-}
diff --git a/backend/pkgs/server/result.go b/backend/pkgs/server/result.go
deleted file mode 100644
index 69dcf81..0000000
--- a/backend/pkgs/server/result.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package server
-
-type Result struct {
- Error bool `json:"error,omitempty"`
- Details interface{} `json:"details,omitempty"`
- Message string `json:"message,omitempty"`
- Item interface{} `json:"item,omitempty"`
-}
-
-type Results struct {
- Items any `json:"items"`
-}
-
-// Wrap creates a Wrapper instance and adds the initial namespace and data to be returned.
-func Wrap(data interface{}) Result {
- return Result{
- Item: data,
- }
-}
diff --git a/backend/pkgs/server/server.go b/backend/pkgs/server/server.go
deleted file mode 100644
index 921c576..0000000
--- a/backend/pkgs/server/server.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package server
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "time"
-
- "github.com/go-chi/chi/v5"
-)
-
-var (
- ErrServerNotStarted = errors.New("server not started")
- ErrServerAlreadyStarted = errors.New("server already started")
-)
-
-type Server struct {
- Host string
- Port string
- Worker Worker
-
- wg sync.WaitGroup
- mux *chi.Mux
-
- // mw is the global middleware chain for the server.
- mw []Middleware
-
- started bool
- activeServer *http.Server
-
- idleTimeout time.Duration
- readTimeout time.Duration
- writeTimeout time.Duration
-}
-
-func NewServer(opts ...Option) *Server {
- s := &Server{
- Host: "localhost",
- Port: "8080",
- mux: chi.NewRouter(),
- Worker: NewSimpleWorker(),
- idleTimeout: 30 * time.Second,
- readTimeout: 10 * time.Second,
- writeTimeout: 10 * time.Second,
- }
-
- for _, opt := range opts {
- err := opt(s)
- if err != nil {
- panic(err)
- }
- }
-
- return s
-}
-
-func (s *Server) Shutdown(sig string) error {
- if !s.started {
- return ErrServerNotStarted
- }
- fmt.Printf("Received %s signal, shutting down\n", sig)
-
- // Create a context with a 5-second timeout.
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
-
- err := s.activeServer.Shutdown(ctx)
- s.started = false
- if err != nil {
- return err
- }
-
- fmt.Println("Http server shutdown, waiting for all tasks to finish")
- s.wg.Wait()
-
- return nil
-
-}
-
-func (s *Server) Start() error {
- if s.started {
- return ErrServerAlreadyStarted
- }
-
- s.activeServer = &http.Server{
- Addr: s.Host + ":" + s.Port,
- Handler: s.mux,
- IdleTimeout: s.idleTimeout,
- ReadTimeout: s.readTimeout,
- WriteTimeout: s.writeTimeout,
- }
-
- shutdownError := make(chan error)
-
- go func() {
- // Create a quit channel which carries os.Signal values.
- quit := make(chan os.Signal, 1)
-
- // Use signal.Notify() to listen for incoming SIGINT and SIGTERM signals and
- // relay them to the quit channel.
- signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
-
- // Read the signal from the quit channel. block until received
- sig := <-quit
-
- err := s.Shutdown(sig.String())
- if err != nil {
- shutdownError <- err
- }
-
- // Exit the application with a 0 (success) status code.
- os.Exit(0)
- }()
-
- s.started = true
- err := s.activeServer.ListenAndServe()
-
- if !errors.Is(err, http.ErrServerClosed) {
- return err
- }
-
- err = <-shutdownError
- if err != nil {
- return err
- }
-
- fmt.Println("Server shutdown successfully")
-
- return nil
-}
-
-// Background starts a go routine that runs on the servers pool. In the event of a shutdown
-// request, the server will wait until all open goroutines have finished before shutting down.
-func (svr *Server) Background(task func()) {
- svr.wg.Add(1)
- svr.Worker.Add(func() {
- defer svr.wg.Done()
- task()
- })
-}
diff --git a/backend/pkgs/server/server_options.go b/backend/pkgs/server/server_options.go
deleted file mode 100644
index 93b7781..0000000
--- a/backend/pkgs/server/server_options.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package server
-
-import "time"
-
-type Option = func(s *Server) error
-
-func WithMiddleware(mw ...Middleware) Option {
- return func(s *Server) error {
- s.mw = append(s.mw, mw...)
- return nil
- }
-}
-
-func WithWorker(w Worker) Option {
- return func(s *Server) error {
- s.Worker = w
- return nil
- }
-}
-
-func WithHost(host string) Option {
- return func(s *Server) error {
- s.Host = host
- return nil
- }
-}
-
-func WithPort(port string) Option {
- return func(s *Server) error {
- s.Port = port
- return nil
- }
-}
-
-func WithReadTimeout(seconds int) Option {
- return func(s *Server) error {
- s.readTimeout = time.Duration(seconds) * time.Second
- return nil
- }
-}
-
-func WithWriteTimeout(seconds int) Option {
- return func(s *Server) error {
- s.writeTimeout = time.Duration(seconds) * time.Second
- return nil
- }
-}
-
-func WithIdleTimeout(seconds int) Option {
- return func(s *Server) error {
- s.idleTimeout = time.Duration(seconds) * time.Second
- return nil
- }
-}
diff --git a/backend/pkgs/server/server_test.go b/backend/pkgs/server/server_test.go
deleted file mode 100644
index 95a93fe..0000000
--- a/backend/pkgs/server/server_test.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package server
-
-import (
- "net/http"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-func testServer(t *testing.T, r http.Handler) *Server {
- svr := NewServer(WithHost("127.0.0.1"), WithPort("19245"))
-
- if r != nil {
- svr.mux.Mount("/", r)
- }
- go func() {
- err := svr.Start()
- assert.NoError(t, err)
- }()
-
- ping := func() error {
- _, err := http.Get("http://127.0.0.1:19245")
- return err
- }
-
- for {
- if err := ping(); err == nil {
- break
- }
- time.Sleep(time.Millisecond * 100)
- }
-
- return svr
-}
-
-func Test_ServerShutdown_Error(t *testing.T) {
- svr := NewServer(WithHost("127.0.0.1"), WithPort("19245"))
-
- err := svr.Shutdown("test")
- assert.ErrorIs(t, err, ErrServerNotStarted)
-}
-
-func Test_ServerStarts_Error(t *testing.T) {
- svr := testServer(t, nil)
-
- err := svr.Start()
- assert.ErrorIs(t, err, ErrServerAlreadyStarted)
-
- err = svr.Shutdown("test")
- assert.NoError(t, err)
-}
-
-func Test_ServerStarts(t *testing.T) {
- svr := testServer(t, nil)
- err := svr.Shutdown("test")
- assert.NoError(t, err)
-}
-
-func Test_GracefulServerShutdownWithWorkers(t *testing.T) {
- isFinished := false
-
- svr := testServer(t, nil)
-
- svr.Background(func() {
- time.Sleep(time.Second * 4)
- isFinished = true
- })
-
- err := svr.Shutdown("test")
-
- assert.NoError(t, err)
- assert.True(t, isFinished)
-
-}
-
-func Test_GracefulServerShutdownWithRequests(t *testing.T) {
- var isFinished atomic.Bool
-
- router := http.NewServeMux()
-
- // add long running handler func
- router.HandleFunc("/test", func(rw http.ResponseWriter, r *http.Request) {
- time.Sleep(time.Second * 3)
- isFinished.Store(true)
- })
-
- svr := testServer(t, router)
-
- // Make request to "/test"
- go func() {
- _, _ = http.Get("http://127.0.0.1:19245/test") // This is probably bad?
- }()
-
- time.Sleep(time.Second) // Hack to wait for the request to be made
-
- err := svr.Shutdown("test")
- assert.NoError(t, err)
-
- assert.True(t, isFinished.Load())
-}
diff --git a/backend/pkgs/server/worker.go b/backend/pkgs/server/worker.go
deleted file mode 100644
index acd6e25..0000000
--- a/backend/pkgs/server/worker.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package server
-
-// TODO: #2 Implement Go routine pool/job queue
-
-type Worker interface {
- Add(func())
-}
-
-// SimpleWorker is a simple background worker that implements
-// the Worker interface and runs all tasks in a go routine without
-// a pool or que or limits. It's useful for simple or small applications
-// with minimal/short background tasks
-type SimpleWorker struct {
-}
-
-func NewSimpleWorker() *SimpleWorker {
- return &SimpleWorker{}
-}
-
-func (sw *SimpleWorker) Add(task func()) {
- go task()
-}
diff --git a/backend/pkgs/set/funcs.go b/backend/pkgs/set/funcs.go
index 0d9a261..d13cdcd 100644
--- a/backend/pkgs/set/funcs.go
+++ b/backend/pkgs/set/funcs.go
@@ -97,5 +97,4 @@ func Disjoint[T key](a, b Set[T]) bool {
}
}
return true
-
}
diff --git a/backend/pkgs/set/funcs_test.go b/backend/pkgs/set/funcs_test.go
index ab3aa0e..cd49ef7 100644
--- a/backend/pkgs/set/funcs_test.go
+++ b/backend/pkgs/set/funcs_test.go
@@ -28,7 +28,6 @@ var (
)
func TestDiff(t *testing.T) {
-
tests := []struct {
name string
args args
diff --git a/backend/pkgs/set/set.go b/backend/pkgs/set/set.go
index f2ffecc..fca1c98 100644
--- a/backend/pkgs/set/set.go
+++ b/backend/pkgs/set/set.go
@@ -1,3 +1,4 @@
+// Package set provides a simple set implementation.
package set
type key interface {
@@ -8,6 +9,12 @@ type Set[T key] struct {
mp map[T]struct{}
}
+func Make[T key](size int) Set[T] {
+ return Set[T]{
+ mp: make(map[T]struct{}, size),
+ }
+}
+
func New[T key](v ...T) Set[T] {
mp := make(map[T]struct{}, len(v))
diff --git a/docker-compose.yml b/docker-compose.yml
index a1108fc..6b57760 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,5 +1,3 @@
-version: "3.4"
-
services:
homebox:
image: homebox
diff --git a/docs/docs/api/openapi-2.0.json b/docs/docs/api/openapi-2.0.json
new file mode 100644
index 0000000..b10c93a
--- /dev/null
+++ b/docs/docs/api/openapi-2.0.json
@@ -0,0 +1,2992 @@
+{
+ "swagger": "2.0",
+ "info": {
+ "description": "Track, Manage, and Organize your Things.",
+ "title": "Homebox API",
+ "contact": {
+ "name": "Don't"
+ },
+ "version": "1.0"
+ },
+ "basePath": "/api",
+ "paths": {
+ "/v1/actions/ensure-asset-ids": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Ensures all items in the database have an asset ID",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Actions"
+ ],
+ "summary": "Ensure Asset IDs",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.ActionAmountResult"
+ }
+ }
+ }
+ }
+ },
+ "/v1/actions/ensure-import-refs": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Ensures all items in the database have an import ref",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Actions"
+ ],
+ "summary": "Ensures Import Refs",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.ActionAmountResult"
+ }
+ }
+ }
+ }
+ },
+ "/v1/actions/set-primary-photos": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Sets the first photo of each item as the primary photo",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Actions"
+ ],
+ "summary": "Set Primary Photos",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.ActionAmountResult"
+ }
+ }
+ }
+ }
+ },
+ "/v1/actions/zero-item-time-fields": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Resets all item date fields to the beginning of the day",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Actions"
+ ],
+ "summary": "Zero Out Time Fields",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.ActionAmountResult"
+ }
+ }
+ }
+ }
+ },
+ "/v1/assets/{id}": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Get Item by Asset ID",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Asset ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.PaginationResult-repo_ItemSummary"
+ }
+ }
+ }
+ }
+ },
+ "/v1/currency": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Base"
+ ],
+ "summary": "Currency",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/currencies.Currency"
+ }
+ }
+ }
+ }
+ },
+ "/v1/groups": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Get Group",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.Group"
+ }
+ }
+ }
+ },
+ "put": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Update Group",
+ "parameters": [
+ {
+ "description": "User Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.GroupUpdate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.Group"
+ }
+ }
+ }
+ }
+ },
+ "/v1/groups/invitations": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Create Group Invitation",
+ "parameters": [
+ {
+ "description": "User Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.GroupInvitationCreate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.GroupInvitation"
+ }
+ }
+ }
+ }
+ },
+ "/v1/groups/statistics": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Statistics"
+ ],
+ "summary": "Get Group Statistics",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.GroupStatistics"
+ }
+ }
+ }
+ }
+ },
+ "/v1/groups/statistics/labels": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Statistics"
+ ],
+ "summary": "Get Label Statistics",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.TotalsByOrganizer"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/groups/statistics/locations": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Statistics"
+ ],
+ "summary": "Get Location Statistics",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.TotalsByOrganizer"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/groups/statistics/purchase-price": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Statistics"
+ ],
+ "summary": "Get Purchase Price Statistics",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "start date",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "end date",
+ "name": "end",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.ValueOverTime"
+ }
+ }
+ }
+ }
+ },
+ "/v1/items": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Query All Items",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "search string",
+ "name": "q",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "page number",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "items per page",
+ "name": "pageSize",
+ "in": "query"
+ },
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "collectionFormat": "multi",
+ "description": "label Ids",
+ "name": "labels",
+ "in": "query"
+ },
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "collectionFormat": "multi",
+ "description": "location Ids",
+ "name": "locations",
+ "in": "query"
+ },
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "collectionFormat": "multi",
+ "description": "parent Ids",
+ "name": "parentIds",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.PaginationResult-repo_ItemSummary"
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Create Item",
+ "parameters": [
+ {
+ "description": "Item Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.ItemCreate"
+ }
+ }
+ ],
+ "responses": {
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/repo.ItemSummary"
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/export": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Export Items",
+ "responses": {
+ "200": {
+ "description": "text/csv",
+ "schema": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/fields": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Get All Custom Field Names",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/fields/values": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Get All Custom Field Values",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/import": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Import Items",
+ "parameters": [
+ {
+ "type": "file",
+ "description": "Image to upload",
+ "name": "csv",
+ "in": "formData",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/items/{id}": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Get Item",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Item ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.ItemOut"
+ }
+ }
+ }
+ },
+ "put": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Update Item",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Item ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "description": "Item Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.ItemUpdate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.ItemOut"
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Delete Item",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Item ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ },
+ "patch": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Update Item",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Item ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "description": "Item Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.ItemPatch"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.ItemOut"
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/{id}/attachments": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items Attachments"
+ ],
+ "summary": "Create Item Attachment",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Item ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "file",
+ "description": "File attachment",
+ "name": "file",
+ "in": "formData",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Type of file",
+ "name": "type",
+ "in": "formData",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "name of the file including extension",
+ "name": "name",
+ "in": "formData",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.ItemOut"
+ }
+ },
+ "422": {
+ "description": "Unprocessable Entity",
+ "schema": {
+ "$ref": "#/definitions/validate.ErrorResponse"
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/{id}/attachments/{attachment_id}": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/octet-stream"
+ ],
+ "tags": [
+ "Items Attachments"
+ ],
+ "summary": "Get Item Attachment",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Item ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Attachment ID",
+ "name": "attachment_id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.ItemAttachmentToken"
+ }
+ }
+ }
+ },
+ "put": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Items Attachments"
+ ],
+ "summary": "Update Item Attachment",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Item ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Attachment ID",
+ "name": "attachment_id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "description": "Attachment Update",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.ItemAttachmentUpdate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.ItemOut"
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Items Attachments"
+ ],
+ "summary": "Delete Item Attachment",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Item ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Attachment ID",
+ "name": "attachment_id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/items/{id}/maintenance": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Maintenance"
+ ],
+ "summary": "Get Maintenance Log",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceLog"
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Maintenance"
+ ],
+ "summary": "Create Maintenance Entry",
+ "parameters": [
+ {
+ "description": "Entry Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceEntryCreate"
+ }
+ }
+ ],
+ "responses": {
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceEntry"
+ }
+ }
+ }
+ }
+ },
+ "/v1/items/{id}/maintenance/{entry_id}": {
+ "put": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Maintenance"
+ ],
+ "summary": "Update Maintenance Entry",
+ "parameters": [
+ {
+ "description": "Entry Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceEntryUpdate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.MaintenanceEntry"
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Maintenance"
+ ],
+ "summary": "Delete Maintenance Entry",
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/items/{id}/path": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Get the full path of an item",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Item ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.ItemPath"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/labels": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Labels"
+ ],
+ "summary": "Get All Labels",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.LabelOut"
+ }
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Labels"
+ ],
+ "summary": "Create Label",
+ "parameters": [
+ {
+ "description": "Label Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.LabelCreate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.LabelSummary"
+ }
+ }
+ }
+ }
+ },
+ "/v1/labels/{id}": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Labels"
+ ],
+ "summary": "Get Label",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Label ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.LabelOut"
+ }
+ }
+ }
+ },
+ "put": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Labels"
+ ],
+ "summary": "Update Label",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Label ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.LabelOut"
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Labels"
+ ],
+ "summary": "Delete Label",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Label ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/locations": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Locations"
+ ],
+ "summary": "Get All Locations",
+ "parameters": [
+ {
+ "type": "boolean",
+ "description": "Filter locations with parents",
+ "name": "filterChildren",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.LocationOutCount"
+ }
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Locations"
+ ],
+ "summary": "Create Location",
+ "parameters": [
+ {
+ "description": "Location Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.LocationCreate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.LocationSummary"
+ }
+ }
+ }
+ }
+ },
+ "/v1/locations/tree": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Locations"
+ ],
+ "summary": "Get Locations Tree",
+ "parameters": [
+ {
+ "type": "boolean",
+ "description": "include items in response tree",
+ "name": "withItems",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.TreeItem"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/locations/{id}": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Locations"
+ ],
+ "summary": "Get Location",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Location ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.LocationOut"
+ }
+ }
+ }
+ },
+ "put": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Locations"
+ ],
+ "summary": "Update Location",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Location ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "description": "Location Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.LocationUpdate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.LocationOut"
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Locations"
+ ],
+ "summary": "Delete Location",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Location ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/notifiers": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Get Notifiers",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.NotifierOut"
+ }
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Create Notifier",
+ "parameters": [
+ {
+ "description": "Notifier Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.NotifierCreate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.NotifierOut"
+ }
+ }
+ }
+ }
+ },
+ "/v1/notifiers/test": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Test Notifier",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Notifier ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "URL",
+ "name": "url",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/notifiers/{id}": {
+ "put": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Update Notifier",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Notifier ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "description": "Notifier Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.NotifierUpdate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.NotifierOut"
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Notifiers"
+ ],
+ "summary": "Delete a Notifier",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Notifier ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/qrcode": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Items"
+ ],
+ "summary": "Create QR Code",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "data to be encoded into qrcode",
+ "name": "data",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "image/jpeg",
+ "schema": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "/v1/reporting/bill-of-materials": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Reporting"
+ ],
+ "summary": "Export Bill of Materials",
+ "responses": {
+ "200": {
+ "description": "text/csv",
+ "schema": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "/v1/status": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Base"
+ ],
+ "summary": "Application Info",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.APISummary"
+ }
+ }
+ }
+ }
+ },
+ "/v1/users/change-password": {
+ "put": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "User"
+ ],
+ "summary": "Change Password",
+ "parameters": [
+ {
+ "description": "Password Payload",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.ChangePassword"
+ }
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/users/login": {
+ "post": {
+ "consumes": [
+ "application/x-www-form-urlencoded",
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Authentication"
+ ],
+ "summary": "User Login",
+ "parameters": [
+ {
+ "type": "string",
+ "example": "admin@admin.com",
+ "description": "string",
+ "name": "username",
+ "in": "formData"
+ },
+ {
+ "type": "string",
+ "example": "admin",
+ "description": "string",
+ "name": "password",
+ "in": "formData"
+ },
+ {
+ "description": "Login Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.LoginForm"
+ }
+ },
+ {
+ "type": "string",
+ "description": "auth provider",
+ "name": "provider",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.TokenResponse"
+ }
+ }
+ }
+ }
+ },
+ "/v1/users/logout": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Authentication"
+ ],
+ "summary": "User Logout",
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/users/refresh": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "handleAuthRefresh returns a handler that will issue a new token from an existing token.\nThis does not validate that the user still exists within the database.",
+ "tags": [
+ "Authentication"
+ ],
+ "summary": "User Token Refresh",
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/v1/users/register": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "User"
+ ],
+ "summary": "Register New User",
+ "parameters": [
+ {
+ "description": "User Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/services.UserRegistration"
+ }
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/users/self": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "User"
+ ],
+ "summary": "Get User Self",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/v1.Wrapped"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "item": {
+ "$ref": "#/definitions/repo.UserOut"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ "put": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "User"
+ ],
+ "summary": "Update Account",
+ "parameters": [
+ {
+ "description": "User Data",
+ "name": "payload",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/repo.UserUpdate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/v1.Wrapped"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "item": {
+ "$ref": "#/definitions/repo.UserUpdate"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "User"
+ ],
+ "summary": "Delete Account",
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ }
+ },
+ "definitions": {
+ "currencies.Currency": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "string"
+ },
+ "local": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "symbol": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.DocumentOut": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "path": {
+ "type": "string"
+ },
+ "title": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.Group": {
+ "type": "object",
+ "properties": {
+ "createdAt": {
+ "type": "string"
+ },
+ "currency": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.GroupStatistics": {
+ "type": "object",
+ "properties": {
+ "totalItemPrice": {
+ "type": "number"
+ },
+ "totalItems": {
+ "type": "integer"
+ },
+ "totalLabels": {
+ "type": "integer"
+ },
+ "totalLocations": {
+ "type": "integer"
+ },
+ "totalUsers": {
+ "type": "integer"
+ },
+ "totalWithWarranty": {
+ "type": "integer"
+ }
+ }
+ },
+ "repo.GroupUpdate": {
+ "type": "object",
+ "properties": {
+ "currency": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.ItemAttachment": {
+ "type": "object",
+ "properties": {
+ "createdAt": {
+ "type": "string"
+ },
+ "document": {
+ "$ref": "#/definitions/repo.DocumentOut"
+ },
+ "id": {
+ "type": "string"
+ },
+ "primary": {
+ "type": "boolean"
+ },
+ "type": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.ItemAttachmentUpdate": {
+ "type": "object",
+ "properties": {
+ "primary": {
+ "type": "boolean"
+ },
+ "title": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.ItemCreate": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "description": {
+ "type": "string",
+ "maxLength": 1000
+ },
+ "labelIds": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "locationId": {
+ "description": "Edges",
+ "type": "string"
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 255,
+ "minLength": 1
+ },
+ "parentId": {
+ "type": "string",
+ "x-nullable": true
+ }
+ }
+ },
+ "repo.ItemField": {
+ "type": "object",
+ "properties": {
+ "booleanValue": {
+ "type": "boolean"
+ },
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "numberValue": {
+ "type": "integer"
+ },
+ "textValue": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.ItemOut": {
+ "type": "object",
+ "properties": {
+ "archived": {
+ "type": "boolean"
+ },
+ "assetId": {
+ "type": "string",
+ "example": "0"
+ },
+ "attachments": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.ItemAttachment"
+ }
+ },
+ "createdAt": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "fields": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.ItemField"
+ }
+ },
+ "id": {
+ "type": "string"
+ },
+ "imageId": {
+ "type": "string"
+ },
+ "insured": {
+ "type": "boolean"
+ },
+ "labels": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.LabelSummary"
+ }
+ },
+ "lifetimeWarranty": {
+ "description": "Warranty",
+ "type": "boolean"
+ },
+ "location": {
+ "description": "Edges",
+ "allOf": [
+ {
+ "$ref": "#/definitions/repo.LocationSummary"
+ }
+ ],
+ "x-nullable": true,
+ "x-omitempty": true
+ },
+ "manufacturer": {
+ "type": "string"
+ },
+ "modelNumber": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "notes": {
+ "description": "Extras",
+ "type": "string"
+ },
+ "parent": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/repo.ItemSummary"
+ }
+ ],
+ "x-nullable": true,
+ "x-omitempty": true
+ },
+ "purchaseFrom": {
+ "type": "string"
+ },
+ "purchasePrice": {
+ "type": "string",
+ "example": "0"
+ },
+ "purchaseTime": {
+ "description": "Purchase",
+ "type": "string"
+ },
+ "quantity": {
+ "type": "integer"
+ },
+ "serialNumber": {
+ "type": "string"
+ },
+ "soldNotes": {
+ "type": "string"
+ },
+ "soldPrice": {
+ "type": "string",
+ "example": "0"
+ },
+ "soldTime": {
+ "description": "Sold",
+ "type": "string"
+ },
+ "soldTo": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ },
+ "warrantyDetails": {
+ "type": "string"
+ },
+ "warrantyExpires": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.ItemPatch": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "quantity": {
+ "type": "integer",
+ "x-nullable": true,
+ "x-omitempty": true
+ }
+ }
+ },
+ "repo.ItemPath": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "type": {
+ "$ref": "#/definitions/repo.ItemType"
+ }
+ }
+ },
+ "repo.ItemSummary": {
+ "type": "object",
+ "properties": {
+ "archived": {
+ "type": "boolean"
+ },
+ "createdAt": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "imageId": {
+ "type": "string"
+ },
+ "insured": {
+ "type": "boolean"
+ },
+ "labels": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.LabelSummary"
+ }
+ },
+ "location": {
+ "description": "Edges",
+ "allOf": [
+ {
+ "$ref": "#/definitions/repo.LocationSummary"
+ }
+ ],
+ "x-nullable": true,
+ "x-omitempty": true
+ },
+ "name": {
+ "type": "string"
+ },
+ "purchasePrice": {
+ "type": "string",
+ "example": "0"
+ },
+ "quantity": {
+ "type": "integer"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.ItemType": {
+ "type": "string",
+ "enum": [
+ "location",
+ "item"
+ ],
+ "x-enum-varnames": [
+ "ItemTypeLocation",
+ "ItemTypeItem"
+ ]
+ },
+ "repo.ItemUpdate": {
+ "type": "object",
+ "properties": {
+ "archived": {
+ "type": "boolean"
+ },
+ "assetId": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "fields": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.ItemField"
+ }
+ },
+ "id": {
+ "type": "string"
+ },
+ "insured": {
+ "type": "boolean"
+ },
+ "labelIds": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "lifetimeWarranty": {
+ "description": "Warranty",
+ "type": "boolean"
+ },
+ "locationId": {
+ "description": "Edges",
+ "type": "string"
+ },
+ "manufacturer": {
+ "type": "string"
+ },
+ "modelNumber": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "notes": {
+ "description": "Extras",
+ "type": "string"
+ },
+ "parentId": {
+ "type": "string",
+ "x-nullable": true,
+ "x-omitempty": true
+ },
+ "purchaseFrom": {
+ "type": "string"
+ },
+ "purchasePrice": {
+ "type": "string",
+ "example": "0"
+ },
+ "purchaseTime": {
+ "description": "Purchase",
+ "type": "string"
+ },
+ "quantity": {
+ "type": "integer"
+ },
+ "serialNumber": {
+ "description": "Identifications",
+ "type": "string"
+ },
+ "soldNotes": {
+ "type": "string"
+ },
+ "soldPrice": {
+ "type": "string",
+ "example": "0"
+ },
+ "soldTime": {
+ "description": "Sold",
+ "type": "string"
+ },
+ "soldTo": {
+ "type": "string"
+ },
+ "warrantyDetails": {
+ "type": "string"
+ },
+ "warrantyExpires": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.LabelCreate": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "color": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string",
+ "maxLength": 255
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 255,
+ "minLength": 1
+ }
+ }
+ },
+ "repo.LabelOut": {
+ "type": "object",
+ "properties": {
+ "createdAt": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.LabelSummary": {
+ "type": "object",
+ "properties": {
+ "createdAt": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.LocationCreate": {
+ "type": "object",
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "parentId": {
+ "type": "string",
+ "x-nullable": true
+ }
+ }
+ },
+ "repo.LocationOut": {
+ "type": "object",
+ "properties": {
+ "children": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.LocationSummary"
+ }
+ },
+ "createdAt": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "parent": {
+ "$ref": "#/definitions/repo.LocationSummary"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.LocationOutCount": {
+ "type": "object",
+ "properties": {
+ "createdAt": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "itemCount": {
+ "type": "integer"
+ },
+ "name": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.LocationSummary": {
+ "type": "object",
+ "properties": {
+ "createdAt": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.LocationUpdate": {
+ "type": "object",
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "parentId": {
+ "type": "string",
+ "x-nullable": true
+ }
+ }
+ },
+ "repo.MaintenanceEntry": {
+ "type": "object",
+ "properties": {
+ "completedDate": {
+ "type": "string"
+ },
+ "cost": {
+ "type": "string",
+ "example": "0"
+ },
+ "description": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "scheduledDate": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.MaintenanceEntryCreate": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "completedDate": {
+ "type": "string"
+ },
+ "cost": {
+ "type": "string",
+ "example": "0"
+ },
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "scheduledDate": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.MaintenanceEntryUpdate": {
+ "type": "object",
+ "properties": {
+ "completedDate": {
+ "type": "string"
+ },
+ "cost": {
+ "type": "string",
+ "example": "0"
+ },
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "scheduledDate": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.MaintenanceLog": {
+ "type": "object",
+ "properties": {
+ "costAverage": {
+ "type": "number"
+ },
+ "costTotal": {
+ "type": "number"
+ },
+ "entries": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.MaintenanceEntry"
+ }
+ },
+ "itemId": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.NotifierCreate": {
+ "type": "object",
+ "required": [
+ "name",
+ "url"
+ ],
+ "properties": {
+ "isActive": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 255,
+ "minLength": 1
+ },
+ "url": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.NotifierOut": {
+ "type": "object",
+ "properties": {
+ "createdAt": {
+ "type": "string"
+ },
+ "groupId": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "isActive": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ },
+ "userId": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.NotifierUpdate": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "isActive": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 255,
+ "minLength": 1
+ },
+ "url": {
+ "type": "string",
+ "x-nullable": true
+ }
+ }
+ },
+ "repo.PaginationResult-repo_ItemSummary": {
+ "type": "object",
+ "properties": {
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.ItemSummary"
+ }
+ },
+ "page": {
+ "type": "integer"
+ },
+ "pageSize": {
+ "type": "integer"
+ },
+ "total": {
+ "type": "integer"
+ }
+ }
+ },
+ "repo.TotalsByOrganizer": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "total": {
+ "type": "number"
+ }
+ }
+ },
+ "repo.TreeItem": {
+ "type": "object",
+ "properties": {
+ "children": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.TreeItem"
+ }
+ },
+ "id": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.UserOut": {
+ "type": "object",
+ "properties": {
+ "email": {
+ "type": "string"
+ },
+ "groupId": {
+ "type": "string"
+ },
+ "groupName": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "isOwner": {
+ "type": "boolean"
+ },
+ "isSuperuser": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.UserUpdate": {
+ "type": "object",
+ "properties": {
+ "email": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ }
+ }
+ },
+ "repo.ValueOverTime": {
+ "type": "object",
+ "properties": {
+ "end": {
+ "type": "string"
+ },
+ "entries": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.ValueOverTimeEntry"
+ }
+ },
+ "start": {
+ "type": "string"
+ },
+ "valueAtEnd": {
+ "type": "number"
+ },
+ "valueAtStart": {
+ "type": "number"
+ }
+ }
+ },
+ "repo.ValueOverTimeEntry": {
+ "type": "object",
+ "properties": {
+ "date": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "value": {
+ "type": "number"
+ }
+ }
+ },
+ "services.UserRegistration": {
+ "type": "object",
+ "properties": {
+ "email": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ },
+ "token": {
+ "type": "string"
+ }
+ }
+ },
+ "v1.APISummary": {
+ "type": "object",
+ "properties": {
+ "allowRegistration": {
+ "type": "boolean"
+ },
+ "build": {
+ "$ref": "#/definitions/v1.Build"
+ },
+ "demo": {
+ "type": "boolean"
+ },
+ "health": {
+ "type": "boolean"
+ },
+ "message": {
+ "type": "string"
+ },
+ "title": {
+ "type": "string"
+ },
+ "versions": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "v1.ActionAmountResult": {
+ "type": "object",
+ "properties": {
+ "completed": {
+ "type": "integer"
+ }
+ }
+ },
+ "v1.Build": {
+ "type": "object",
+ "properties": {
+ "buildTime": {
+ "type": "string"
+ },
+ "commit": {
+ "type": "string"
+ },
+ "version": {
+ "type": "string"
+ }
+ }
+ },
+ "v1.ChangePassword": {
+ "type": "object",
+ "properties": {
+ "current": {
+ "type": "string"
+ },
+ "new": {
+ "type": "string"
+ }
+ }
+ },
+ "v1.GroupInvitation": {
+ "type": "object",
+ "properties": {
+ "expiresAt": {
+ "type": "string"
+ },
+ "token": {
+ "type": "string"
+ },
+ "uses": {
+ "type": "integer"
+ }
+ }
+ },
+ "v1.GroupInvitationCreate": {
+ "type": "object",
+ "required": [
+ "uses"
+ ],
+ "properties": {
+ "expiresAt": {
+ "type": "string"
+ },
+ "uses": {
+ "type": "integer",
+ "maximum": 100,
+ "minimum": 1
+ }
+ }
+ },
+ "v1.ItemAttachmentToken": {
+ "type": "object",
+ "properties": {
+ "token": {
+ "type": "string"
+ }
+ }
+ },
+ "v1.LoginForm": {
+ "type": "object",
+ "properties": {
+ "password": {
+ "type": "string"
+ },
+ "stayLoggedIn": {
+ "type": "boolean"
+ },
+ "username": {
+ "type": "string"
+ }
+ }
+ },
+ "v1.TokenResponse": {
+ "type": "object",
+ "properties": {
+ "attachmentToken": {
+ "type": "string"
+ },
+ "expiresAt": {
+ "type": "string"
+ },
+ "token": {
+ "type": "string"
+ }
+ }
+ },
+ "v1.Wrapped": {
+ "type": "object",
+ "properties": {
+ "item": {}
+ }
+ },
+ "validate.ErrorResponse": {
+ "type": "object",
+ "properties": {
+ "error": {
+ "type": "string"
+ },
+ "fields": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "securityDefinitions": {
+ "Bearer": {
+ "description": "\"Type 'Bearer TOKEN' to correctly set the API Key\"",
+ "type": "apiKey",
+ "name": "Authorization",
+ "in": "header"
+ }
+ }
+}
\ No newline at end of file
diff --git a/docs/docs/assets/img/homebox-email-banner.jpg b/docs/docs/assets/img/homebox-email-banner.jpg
new file mode 100644
index 0000000..e611c43
Binary files /dev/null and b/docs/docs/assets/img/homebox-email-banner.jpg differ
diff --git a/docs/docs/build.md b/docs/docs/build.md
index 8dbf5df..e9af902 100644
--- a/docs/docs/build.md
+++ b/docs/docs/build.md
@@ -4,12 +4,12 @@ This document describes how to build the project from source code.
## Prerequisites
-...
+TODO
## Building
-...
+TODO
## Running
-...
\ No newline at end of file
+TODO
\ No newline at end of file
diff --git a/docs/docs/import-csv.md b/docs/docs/import-csv.md
index 67b1e1d..6ed4f4b 100644
--- a/docs/docs/import-csv.md
+++ b/docs/docs/import-csv.md
@@ -2,55 +2,82 @@
## Quick Start
-Using the CSV import is the recommended way for adding items to the database. It is always going to be the fastest way to import any large amount of items and provides the most flexibility when it comes to adding items.
+Using the CSV import is the recommended way for adding items to the database. It is always going to be the fastest way to import any large number of items and provides the most flexibility when it comes to adding items.
-**Limitations**
+**Current Limitations**
- - Currently only supports importing items, locations, and labels
- - Does not support attachments. Attachments must be uploaded after import
-
-**Template**
-
-You can use this snippet as the headers for your CSV. Copy and paste it into your spreadsheet editor of choice and fill in the value.
-
-```csv
-Import RefLocation Labels Quantity Name Description Insured Serial Number Model Number Manufacturer Notes Purchase From Purchased Price Purchased Time Lifetime Warranty Warranty Expires Warranty Details Sold To Sold Price Sold Time Sold Notes
-```
-
-!!! tip "Column Order"
- Column headers are just there for reference, the important thing is that the order is correct. You can change the headers to anything you like, this behavior may change in the future.
+ - Imports only support importing items, locations, and labels
+ - Imports and Exports do not support attachments. Attachments must be uploaded after import
+ - CSV Exports do not support nested path exports (e.g. `Home / Office / Desk`) and will only export the Items direct parent, (though imports _do_ support nested paths)
+ - Cannot specify item-to-item relationships (e.g. `Item A` is a child of `Item B`)
+!!! tip "File Formats"
+ The CSV import supports both CSV and TSV files. The only difference is the delimiter used. CSV files use a comma `,` as the delimiter and TSV files use a tab `\t` as the delimiter. The file extension does not matter.
## CSV Reference
-| Column | Type | Description |
-| ----------------- | -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| ImportRef | String (100) | Import Refs are unique strings that can be used to deduplicate imports. Before an item is imported, we check the database for a matching ref. If the ref exists, we skip that item. |
-| Location | String | This is the location of the item that will be created. These are de-duplicated and won't create another instance when reused. |
-| Labels | `;` Separated String | List of labels to apply to the item separated by a `;`, can be existing or new |
-| Quantity | Integer | The quantity of items to create |
-| Name | String | Name of the item |
-| Description | String | Description of the item |
-| Insured | Boolean | Whether or not the item is insured |
-| Serial Number | String | Serial number of the item |
-| Model Number | String | Model of the item |
-| Manufacturer | String | Manufacturer of the item |
-| Notes | String (1000) | General notes about the product |
-| Purchase From | String | Name of the place the item was purchased from |
-| Purchase Price | Float64 | |
-| Purchase At | Date | Date the item was purchased |
-| Lifetime Warranty | Boolean | true or false - case insensitive |
-| Warranty Expires | Date | Date in the format |
-| Warranty Details | String | Details about the warranty |
-| Sold To | String | Name of the person the item was sold to |
-| Sold At | Date | Date the item was sold |
-| Sold Price | Float64 | |
-| Sold Notes | String (1000) | |
+Below are the supported columns. They are case-sensitive, can be in any ordered or can be omitted unless otherwise specified.
+
+### Special Syntax Columns
+
+`HB.import_ref`
+
+: Import Refs are unique strings that can be used to deduplicate imports. Before an item is imported, we check the database for a matching ref. If the ref exists, we skip the creation of that item.
+
+ * String Type
+ * Max 100 Characters
+
+ Import Refs are used to de-duplicate imports. It is HIGHLY recommended that you use them to manage your items if you intend to manage your inventory via CSV import/export. If you do not use import refs, you will end up with duplicate items in your database on subsequent imports.
+
+ !!! tip
+
+ Specifying import refs also allows you to update existing items via the CSV import. If you specify an import ref that already exists in the database, we will update the existing item instead of creating a new one.
+
+`HB.location`
+
+: This is the location of the item that will be created. These are de-duplicated and won't create another instance when reused.
+
+ * Supports Path Separators for nested locations (e.g. `Home / Office / Desk`)
+
+`HB.labels`
+
+: List of labels to apply to the item separated by a `;` can be existing or new labels.
+
+`HB.field.{field_name}` (e.g. `HB.field.Serial Number`)
+
+: This is a special column that allows you to add custom fields to the item. The column name must start with `HB.field.` followed by the name of the field. The value of the column will be the value of the field.
+
+ - If the cell value is empty, it will be ignored.
+
+### Standard Columns
+
+| Column | Type | Description |
+|----------------------|---------------|-----------------------------------------------|
+| HB.quantity | Integer | The quantity of items to create |
+| HB.name | String | Name of the item |
+| HB.asset_id | AssetID | Asset ID for the item |
+| HB.description | String | Description of the item |
+| HB.insured | Boolean | Whether or not the item is insured |
+| HB.serial_number | String | Serial number of the item |
+| HB.model_number | String | Model of the item |
+| HB.manufacturer | String | Manufacturer of the item |
+| HB.notes | String (1000) | General notes about the product |
+| HB.purchase_from | String | Name of the place the item was purchased from |
+| HB.purchase_price | Float64 | |
+| HB.purchase_time | Date | Date the item was purchased |
+| HB.lifetime_warranty | Boolean | true or false - case insensitive |
+| HB.warranty_expires | Date | Date in the format |
+| HB.warranty_details | String | Details about the warranty |
+| HB.sold_to | String | Name of the person the item was sold to |
+| HB.sold_time | Date | Date the item was sold |
+| HB.sold_price | Float64 | |
+| HB.sold_notes | String (1000) | |
**Type Key**
| Type | Format |
-| ------- | --------------------------------------------------- |
+|---------|-----------------------------------------------------|
| String | Max 255 Characters unless otherwise specified |
-| Date | MM/DD/YYYY |
+| Date | YYYY-MM-DD |
| Boolean | true or false, yes or no, 1 or 0 - case insensitive |
+| AssetID | 000-000 |
diff --git a/docs/docs/index.md b/docs/docs/index.md
index 8cc0a76..188dac5 100644
--- a/docs/docs/index.md
+++ b/docs/docs/index.md
@@ -15,39 +15,42 @@
-Homebox is the inventory and organization system built for the Home User! With a focus on simplicity and ease of use, Homebox is the perfect solution for your home inventory, organization, and management needs. While developing this project I've tried to keep the following principles in mind:
+Homebox is the inventory and organization system built for the Home User! With a focus on simplicity and ease of use, Homebox is the perfect solution for your home inventory, organization, and management needs. While developing this project, I've tried to keep the following principles in mind:
- _Simple_ - Homebox is designed to be simple and easy to use. No complicated setup or configuration required. Use either a single docker container, or deploy yourself by compiling the binary for your platform of choice.
-- _Blazingly Fast_ - Homebox is written in Go which makes it extremely fast and requires minimal resources to deploy. In general idle memory usage is less than 50MB for the whole container.
+- _Blazingly Fast_ - Homebox is written in Go, which makes it extremely fast and requires minimal resources to deploy. In general idle memory usage is less than 50MB for the whole container.
- _Portable_ - Homebox is designed to be portable and run on anywhere. We use SQLite and an embedded Web UI to make it easy to deploy, use, and backup.
## Project Status
-Homebox is currently in early-active development and is currently in **beta** stage. This means that the project may still be unstable and clunky. Overall we are striving to not introduce any breaking changes and have checks in place to ensure migrations and upgrades are smooth. However, we do not guarantee that there will be no breaking changes. We will try to keep the documentation up to date as we make changes.
+Homebox is currently in early active development and is currently in **beta** stage. This means that the project may still be unstable and clunky. Overall, we are striving to not introduce any breaking changes and have checks in place to ensure migrations and upgrades are smooth. However, we do not guarantee that there will be no breaking changes. We will try to keep the documentation up to date as we make changes.
## Features
-- Create and Manage _Items_ by provided a name and description - That's it! Homebox requires only a few details to be provided to create an item, after that you can specify as much detail as you want, or hide away some of the things you won't ever need.
+- Create and Manage _Items_ by providing a name and a description - That's it! Homebox requires only a few details to be provided to create an item, after that you can specify as much detail as you want, or hide away some of the things you won't ever need.
- Optional Details for Items include
- Warranty Information
- Sold To Information
- Purchased From Information
- Item Identifications (Serial, Model, etc)
- Categorized Attachments (Images, Manuals, General)
- - Arbitrary/Custom Fields - _Coming Soon!_
-- Csv Import for quickly creating and managing items - _Export Coming Soon!_
+ - Arbitrary/Custom Fields
+- CSV Import/Export for quickly creating and managing items
+- Custom Reporting
+ - Bill of Materials Export
+ - QR Code Label Generator
- Organize _Items_ by creating _Labels_ and _Locations_ and assigning them to items.
-- Multi-Tenant Support - All users are placed inside of a group and can only see items that are apart of their group. Invite family members to your group, or share an instance among friends!
+- Multi-Tenant Support - All users are placed in a group and can only see items in their group. Invite family members to your group, or share an instance among friends!
## Why Not Use Something Else?
-There are a lot of great inventory management systems out there, but none of them _really_ fit my needs as a home user. Snipe-IT is a fantastic product that has so many robust features and management options that it's easy to become overwhelmed and confused. I wanted something that was simple and easy to use that didn't require a lot of cognitive overhead to manage. I primarily built this to organize my IOT devices and save my warranty and documentation information in a central, searchable location.
+There are a lot of great inventory management systems out there, but none of them _really_ fit my needs as a home user. Snipe-IT is a fantastic product that has so many robust features and management options which makes it easy to become overwhelmed and confused. I wanted something that was simple and easy to use that didn't require a lot of cognitive overhead to manage. I primarily built this to organize my IOT devices and save my warranty and documentation information in a central, searchable location.
### Spreadsheet
-That's a fair point. If your needs can be fulfilled by a Spreadsheet, I'd suggest using that instead. I've found spreadsheets get pretty unwieldy when you have a lot of data and it's hard to keep track of what's where. I also wanted to be able to search and filter my data in a more robust way than a spreadsheet can provide. I also wanted to leave to door open for more advanced features in the future like maintenance logs, moving label generators, and more.
+That's a fair point. If your needs can be fulfilled by a Spreadsheet, I'd suggest using that instead. I've found spreadsheets get pretty unwieldy when you have a lot of data, and it's hard to keep track of what's where. I also wanted to be able to search and filter my data in a more robust way than a spreadsheet can provide. I also wanted to leave the door open for more advanced features in the future like maintenance logs, moving label generators, and more.
### Snipe-It?
-Snipe-It is the gold standard for IT management. If your use-case is to manage consumables and IT physical infrastructure I highly suggest you look at Snipe-It over Homebox, it's just more purpose built for that use case. Homebox is, in contrast, purpose built for the home user, which means that we try to focus on keeping things simple and easy to use. Lowering the friction for creating items and managing them is a key goal of Homebox which means you lose out on some of the more advanced features. In most cases this is a good trade-off.
\ No newline at end of file
+Snipe-It is the gold standard for IT management. If your use-case is to manage consumables and IT physical infrastructure, I highly suggest you look at Snipe-It over Homebox, it's just more purpose built for that use case. Homebox is, in contrast, purpose built for the home user, which means that we try to focus on keeping things simple and easy to use. Lowering the friction for creating items and managing them is a key goal of Homebox which means you lose out on some of the more advanced features. In most cases, this is a good trade-off.
\ No newline at end of file
diff --git a/docs/docs/quick-start.md b/docs/docs/quick-start.md
index f43df50..278b442 100644
--- a/docs/docs/quick-start.md
+++ b/docs/docs/quick-start.md
@@ -4,21 +4,33 @@
Great for testing out the application, but not recommended for stable use. Checkout the docker-compose for the recommended deployment.
+For each image there are two tags, respectively the regular tag and $TAG-rootless, which uses a non-root image.
+
```sh
-docker run --name=homebox \
- --restart=always \
- --publish=3100:7745 \
- ghcr.io/hay-kot/homebox:latest
+# If using the rootless image, ensure data
+# folder has correct permissions
+$ mkdir -p /path/to/data/folder
+$ chown 65532:65532 -R /path/to/data/folder
+# ---------------------------------------
+# Run the image
+$ docker run -d \
+ --name homebox \
+ --restart unless-stopped \
+ --publish 3100:7745 \
+ --env TZ=Europe/Bucharest \
+ --volume /path/to/data/folder/:/data \
+ ghcr.io/hay-kot/homebox:latest
+# ghcr.io/hay-kot/homebox:latest-rootless
+
```
## Docker-Compose
```yaml
-version: "3.4"
-
services:
homebox:
image: ghcr.io/hay-kot/homebox:latest
+# image: ghcr.io/hay-kot/homebox:latest-rootless
container_name: homebox
restart: always
environment:
@@ -35,26 +47,34 @@ volumes:
driver: local
```
+!!! note
+ If you use the `rootless` image, and instead of using named volumes you would prefer using a hostMount directly (e.g., `volumes: [ /path/to/data/folder:/data ]`) you need to `chown` the chosen directory in advance to the `65532` user (as shown in the Docker example above).
+
## Env Variables & Configuration
-| Variable | Default | Description |
-| ------------------------ | ---------------------- | ---------------------------------------------------------------------------------- |
-| HBOX_MODE | production | application mode used for runtime behavior can be one of: development, production |
-| HBOX_WEB_PORT | 7745 | port to run the web server on, if you're using docker do not change this |
-| HBOX_WEB_HOST | | host to run the web server on, if you're using docker do not change this |
-| HBOX_ALLOW_REGISTRATION | true | allow users to register themselves |
-| HBOX_WEB_MAX_UPLOAD_SIZE | 10 | maximum file upload size supported in MB |
-| HBOX_STORAGE_DATA | /data/ | path to the data directory, do not change this if you're using docker |
-| HBOX_STORAGE_SQLITE_URL | /data/homebox.db?_fk=1 | sqlite database url, in you're using docker do not change this |
-| HBOX_LOG_LEVEL | info | log level to use, can be one of: trace, debug, info, warn, error, critical |
-| HBOX_LOG_FORMAT | text | log format to use, can be one of: text, json |
-| HBOX_MAILER_HOST | | email host to use, if not set no email provider will be used |
-| HBOX_MAILER_PORT | 587 | email port to use |
-| HBOX_MAILER_USERNAME | | email user to use |
-| HBOX_MAILER_PASSWORD | | email password to use |
-| HBOX_MAILER_FROM | | email from address to use |
-| HBOX_SWAGGER_HOST | 7745 | swagger host to use, if not set swagger will be disabled |
-| HBOX_SWAGGER_SCHEMA | http | swagger schema to use, can be one of: http, https |
+| Variable | Default | Description |
+| ------------------------------------ | ---------------------- | ---------------------------------------------------------------------------------- |
+| HBOX_MODE | production | application mode used for runtime behavior can be one of: development, production |
+| HBOX_WEB_PORT | 7745 | port to run the web server on, if you're using docker do not change this |
+| HBOX_WEB_HOST | | host to run the web server on, if you're using docker do not change this |
+| HBOX_OPTIONS_ALLOW_REGISTRATION | true | allow users to register themselves |
+| HBOX_OPTIONS_AUTO_INCREMENT_ASSET_ID | true | auto increments the asset_id field for new items |
+| HBOX_OPTIONS_CURRENCY_CONFIG | | json configuration file containing additional currencie |
+| HBOX_WEB_MAX_UPLOAD_SIZE | 10 | maximum file upload size supported in MB |
+| HBOX_WEB_READ_TIMEOUT | 10 | Read timeout of HTTP sever |
+| HBOX_WEB_WRITE_TIMEOUT | 10 | Write timeout of HTTP server |
+| HBOX_WEB_IDLE_TIMEOUT | 30 | Idle timeout of HTTP server |
+| HBOX_STORAGE_DATA | /data/ | path to the data directory, do not change this if you're using docker |
+| HBOX_STORAGE_SQLITE_URL | /data/homebox.db?_fk=1 | sqlite database url, if you're using docker do not change this |
+| HBOX_LOG_LEVEL | info | log level to use, can be one of: trace, debug, info, warn, error, critical |
+| HBOX_LOG_FORMAT | text | log format to use, can be one of: text, json |
+| HBOX_MAILER_HOST | | email host to use, if not set no email provider will be used |
+| HBOX_MAILER_PORT | 587 | email port to use |
+| HBOX_MAILER_USERNAME | | email user to use |
+| HBOX_MAILER_PASSWORD | | email password to use |
+| HBOX_MAILER_FROM | | email from address to use |
+| HBOX_SWAGGER_HOST | 7745 | swagger host to use, if not set swagger will be disabled |
+| HBOX_SWAGGER_SCHEMA | http | swagger schema to use, can be one of: http, https |
!!! tip "CLI Arguments"
If you're deploying without docker you can use command line arguments to configure the application. Run `homebox --help` for more information.
@@ -63,23 +83,27 @@ volumes:
Usage: api [options] [arguments]
OPTIONS
- --mode/$HBOX_MODE (default: development)
- --web-port/$HBOX_WEB_PORT (default: 7745)
- --web-host/$HBOX_WEB_HOST
- --web-max-upload-size/$HBOX_WEB_MAX_UPLOAD_SIZE (default: 10)
- --storage-data/$HBOX_STORAGE_DATA (default: ./.data)
- --storage-sqlite-url/$HBOX_STORAGE_SQLITE_URL (default: ./.data/homebox.db?_fk=1)
- --log-level/$HBOX_LOG_LEVEL (default: info)
- --log-format/$HBOX_LOG_FORMAT (default: text)
- --mailer-host/$HBOX_MAILER_HOST
- --mailer-port/$HBOX_MAILER_PORT
- --mailer-username/$HBOX_MAILER_USERNAME
- --mailer-password/$HBOX_MAILER_PASSWORD
- --mailer-from/$HBOX_MAILER_FROM
- --swagger-host/$HBOX_SWAGGER_HOST (default: localhost:7745)
- --swagger-scheme/$HBOX_SWAGGER_SCHEME (default: http)
- --demo/$HBOX_DEMO
- --allow-registration/$HBOX_ALLOW_REGISTRATION (default: true)
+ --mode/$HBOX_MODE (default: development)
+ --web-port/$HBOX_WEB_PORT (default: 7745)
+ --web-host/$HBOX_WEB_HOST
+ --web-max-upload-size/$HBOX_WEB_MAX_UPLOAD_SIZE (default: 10)
+ --storage-data/$HBOX_STORAGE_DATA (default: ./.data)
+ --storage-sqlite-url/$HBOX_STORAGE_SQLITE_URL (default: ./.data/homebox.db?_fk=1)
+ --log-level/$HBOX_LOG_LEVEL (default: info)
+ --log-format/$HBOX_LOG_FORMAT (default: text)
+ --mailer-host/$HBOX_MAILER_HOST
+ --mailer-port/$HBOX_MAILER_PORT
+ --mailer-username/$HBOX_MAILER_USERNAME
+ --mailer-password/$HBOX_MAILER_PASSWORD
+ --mailer-from/$HBOX_MAILER_FROM
+ --swagger-host/$HBOX_SWAGGER_HOST (default: localhost:7745)
+ --swagger-scheme/$HBOX_SWAGGER_SCHEME (default: http)
+ --demo/$HBOX_DEMO
+ --debug-enabled/$HBOX_DEBUG_ENABLED (default: false)
+ --debug-port/$HBOX_DEBUG_PORT (default: 4000)
+ --options-allow-registration/$HBOX_OPTIONS_ALLOW_REGISTRATION (default: true)
+ --options-auto-increment-asset-id/$HBOX_OPTIONS_AUTO_INCREMENT_ASSET_ID (default: true)
+ --options-currency-config/$HBOX_OPTIONS_CURRENCY_CONFIG
--help/-h
display this help message
```
diff --git a/docs/docs/tips-tricks.md b/docs/docs/tips-tricks.md
index 30f9f1c..a5ed05a 100644
--- a/docs/docs/tips-tricks.md
+++ b/docs/docs/tips-tricks.md
@@ -12,5 +12,69 @@ Custom fields are a great way to add any extra information to your item. The fol
Custom fields are appended to the main details section of your item.
!!! tip
- Homebox Custom Fields also have special support for URLs. Provide a URL (`https://google.com`) and it will be automatically converted to a clickable link in the UI. Optionally, you can also use markdown syntax to add a custom text to the button. `[Google](https://google.com)`
+ Homebox Custom Fields also have special support for URLs. Provide a URL (`https://google.com`) and it will be automatically converted to a clickable link in the UI. Optionally, you can also use Markdown syntax to add a custom text to the button. `[Google](https://google.com)`
+## Managing Asset IDs
+
+Homebox provides the option to auto-set asset IDs, this is the default behavior. These can be used for tracking assets with printable tags or labels. You can disable this behavior via a command line flag or ENV variable. See [configuration](../quick-start#env-variables-configuration) for more details.
+
+Example ID: `000-001`
+
+Asset IDs are partially managed by Homebox, but have a flexible implementation to allow for unique use cases. IDs are non-unique at the database level, so there is nothing stopping a user from manually setting duplicate IDs for various items. There are two recommended approaches to manage Asset IDs:
+
+### 1. Auto Incrementing IDs
+
+This is the default behavior likely to experience the most consistency. Whenever creating or importing an item, that item receives the next available ID. This is recommended for most users.
+
+### 2. Auto Incrementing IDs with Reset
+
+In some cases, you may want to skip some items such as consumables, or items that are loosely tracked. In this case, we recommend that you leave auto-incrementing IDs enabled _however_ when you create a new item that you want to skip, you can go to that item and reset the ID to 0. This will remove it from the auto-incrementing sequence, and the next item will receive the next available ID.
+
+!!! tip
+ If you're migrating from an older version, there is an action on the user's profile page to assign IDs to all items. This will assign the next available ID to all items in order of their creation. You should __only do this once__ during the migration process. You should be especially cautious with this if you're using the reset feature described in [option number 2](#2-auto-incrementing-ids-with-reset)
+
+## QR Codes
+
+:octicons-tag-24: 0.7.0
+
+Homebox has a built-in QR code generator that can be used to generate QR codes for your items. This is useful for tracking items with a mobile device. You can generate a QR code for any item by clicking the QR code icon in the top right of the item details page. The same can be done for the Labels and Locations page. Currently, support is limited to generating one-off QR Codes.
+
+However, the API endpoint is available for generating QR codes on the fly for any item (or any other data) if you provide a valid API key in the query parameters. An example url would look like `/api/v1/qrcode?data=https://homebox.fly.dev/item/{uuid}`. Currently, the easiest way to get an API token is to use one from an existing URL of the QR Code in the API key, but this will be improved in the future.
+
+:octicons-tag-24: v0.8.0
+
+In version 0.8.0 We've added a custom label generation. On the tools page, there is now a link to the label-generator page where you can generate labels based on Asset ID for your inventory. These are still in early development, so please provide feedback. There's also more information on the implementation on the label generator page.
+
+[Demo](https://homebox.fly.dev/reports/label-generator)
+
+## Scheduled Maintenance Notifications
+
+:octicons-tag-24: v0.9.0
+
+Homebox uses [shoutrrr](https://containrrr.dev/shoutrrr/0.7/) to send notifications. This allows you to send notifications to a variety of services. On your profile page, you can add notification URLs to your profile which will be used to send notifications when a maintenance event is scheduled.
+
+**Notifications are sent on the day the maintenance is scheduled at or around 8am.**
+
+As of `v0.9.0` we have limited support for complex scheduling of maintenance events. If you have requests for extended functionality, please open an issue on GitHub or reach out on Discord. We're still gauging the demand for this feature.
+
+
+## Custom Currencies
+
+:octicons-tag-24: v0.11.0
+
+Homebox allows you to add additional currencies to your instance by specify a JSON file containing the currencies you want to add.
+
+**Environment Variable:** `HBOX_OPTIONS_CURRENCY_CONFIG`
+
+### Example
+
+```json
+[
+ {
+ "code": "AED",
+ "local": "United Arab Emirates",
+ "symbol": "د.إ",
+ "name": "United Arab Emirates Dirham"
+ },
+]
+```
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 6d0924e..65bd2e1 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -24,9 +24,13 @@ theme:
- navigation.expand
- navigation.sections
- navigation.tabs.sticky
+ - navigation.tabs
favicon: assets/img/favicon.svg
logo: assets/img/favicon.svg
+plugins:
+ - tags
+
extra_css:
- assets/stylesheets/extras.css
@@ -41,12 +45,13 @@ markdown_extensions:
custom_checkbox: true
- admonition
- attr_list
- - pymdownx.tabbed
- pymdownx.superfences
nav:
- - Home: index.md
- - Quick Start: quick-start.md
- - Tips and Tricks: tips-tricks.md
- - Importing Data: import-csv.md
- - Building The Binary: build.md
+ - Home:
+ - Home: index.md
+ - Quick Start: quick-start.md
+ - Tips and Tricks: tips-tricks.md
+ - Import and Export: import-csv.md
+ - Building The Binary: build.md
+ - API: "https://redocly.github.io/redoc/?url=https://hay-kot.github.io/homebox/api/openapi-2.0.json"
diff --git a/docs/poetry.lock b/docs/poetry.lock
deleted file mode 100644
index 1ea487f..0000000
--- a/docs/poetry.lock
+++ /dev/null
@@ -1,424 +0,0 @@
-[[package]]
-name = "click"
-version = "8.1.3"
-description = "Composable command line interface toolkit"
-category = "main"
-optional = false
-python-versions = ">=3.7"
-
-[package.dependencies]
-colorama = {version = "*", markers = "platform_system == \"Windows\""}
-
-[[package]]
-name = "colorama"
-version = "0.4.5"
-description = "Cross-platform colored terminal text."
-category = "main"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
-
-[[package]]
-name = "ghp-import"
-version = "2.1.0"
-description = "Copy your docs directly to the gh-pages branch."
-category = "main"
-optional = false
-python-versions = "*"
-
-[package.dependencies]
-python-dateutil = ">=2.8.1"
-
-[package.extras]
-dev = ["flake8", "markdown", "twine", "wheel"]
-
-[[package]]
-name = "importlib-metadata"
-version = "4.12.0"
-description = "Read metadata from Python packages"
-category = "main"
-optional = false
-python-versions = ">=3.7"
-
-[package.dependencies]
-zipp = ">=0.5"
-
-[package.extras]
-docs = ["jaraco.packaging (>=9)", "rst.linker (>=1.9)", "sphinx"]
-perf = ["ipython"]
-testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"]
-
-[[package]]
-name = "Jinja2"
-version = "3.1.2"
-description = "A very fast and expressive template engine."
-category = "main"
-optional = false
-python-versions = ">=3.7"
-
-[package.dependencies]
-MarkupSafe = ">=2.0"
-
-[package.extras]
-i18n = ["Babel (>=2.7)"]
-
-[[package]]
-name = "Markdown"
-version = "3.3.7"
-description = "Python implementation of Markdown."
-category = "main"
-optional = false
-python-versions = ">=3.6"
-
-[package.extras]
-testing = ["coverage", "pyyaml"]
-
-[[package]]
-name = "MarkupSafe"
-version = "2.1.1"
-description = "Safely add untrusted strings to HTML/XML markup."
-category = "main"
-optional = false
-python-versions = ">=3.7"
-
-[[package]]
-name = "mergedeep"
-version = "1.3.4"
-description = "A deep merge function for 🐍."
-category = "main"
-optional = false
-python-versions = ">=3.6"
-
-[[package]]
-name = "mkdocs"
-version = "1.3.1"
-description = "Project documentation with Markdown."
-category = "main"
-optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-click = ">=3.3"
-ghp-import = ">=1.0"
-importlib-metadata = ">=4.3"
-Jinja2 = ">=2.10.2"
-Markdown = ">=3.2.1,<3.4"
-mergedeep = ">=1.3.4"
-packaging = ">=20.5"
-PyYAML = ">=3.10"
-pyyaml-env-tag = ">=0.1"
-watchdog = ">=2.0"
-
-[package.extras]
-i18n = ["babel (>=2.9.0)"]
-
-[[package]]
-name = "mkdocs-material"
-version = "8.4.3"
-description = "Documentation that simply works"
-category = "main"
-optional = false
-python-versions = ">=3.7"
-
-[package.dependencies]
-jinja2 = ">=3.0.2"
-markdown = ">=3.2"
-mkdocs = ">=1.3.0"
-mkdocs-material-extensions = ">=1.0.3"
-pygments = ">=2.12"
-pymdown-extensions = ">=9.4"
-
-[[package]]
-name = "mkdocs-material-extensions"
-version = "1.0.3"
-description = "Extension pack for Python Markdown."
-category = "main"
-optional = false
-python-versions = ">=3.6"
-
-[[package]]
-name = "packaging"
-version = "21.3"
-description = "Core utilities for Python packages"
-category = "main"
-optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-pyparsing = ">=2.0.2,<3.0.5 || >3.0.5"
-
-[[package]]
-name = "Pygments"
-version = "2.13.0"
-description = "Pygments is a syntax highlighting package written in Python."
-category = "main"
-optional = false
-python-versions = ">=3.6"
-
-[package.extras]
-plugins = ["importlib-metadata"]
-
-[[package]]
-name = "pymdown-extensions"
-version = "9.5"
-description = "Extension pack for Python Markdown."
-category = "main"
-optional = false
-python-versions = ">=3.7"
-
-[package.dependencies]
-markdown = ">=3.2"
-
-[[package]]
-name = "pyparsing"
-version = "3.0.9"
-description = "pyparsing module - Classes and methods to define and execute parsing grammars"
-category = "main"
-optional = false
-python-versions = ">=3.6.8"
-
-[package.extras]
-diagrams = ["jinja2", "railroad-diagrams"]
-
-[[package]]
-name = "python-dateutil"
-version = "2.8.2"
-description = "Extensions to the standard Python datetime module"
-category = "main"
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
-
-[package.dependencies]
-six = ">=1.5"
-
-[[package]]
-name = "PyYAML"
-version = "6.0"
-description = "YAML parser and emitter for Python"
-category = "main"
-optional = false
-python-versions = ">=3.6"
-
-[[package]]
-name = "pyyaml_env_tag"
-version = "0.1"
-description = "A custom YAML tag for referencing environment variables in YAML files. "
-category = "main"
-optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-pyyaml = "*"
-
-[[package]]
-name = "six"
-version = "1.16.0"
-description = "Python 2 and 3 compatibility utilities"
-category = "main"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
-
-[[package]]
-name = "watchdog"
-version = "2.1.9"
-description = "Filesystem events monitoring"
-category = "main"
-optional = false
-python-versions = ">=3.6"
-
-[package.extras]
-watchmedo = ["PyYAML (>=3.10)"]
-
-[[package]]
-name = "zipp"
-version = "3.8.1"
-description = "Backport of pathlib-compatible object wrapper for zip files"
-category = "main"
-optional = false
-python-versions = ">=3.7"
-
-[package.extras]
-docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"]
-testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
-
-[metadata]
-lock-version = "1.1"
-python-versions = "^3.10"
-content-hash = "80651998c1a6a3d37af9c555486b1616042cb9c5858ea46cd34abfb3d0e25b4f"
-
-[metadata.files]
-click = [
- {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
- {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
-]
-colorama = [
- {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"},
- {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"},
-]
-ghp-import = [
- {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"},
- {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"},
-]
-importlib-metadata = [
- {file = "importlib_metadata-4.12.0-py3-none-any.whl", hash = "sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23"},
- {file = "importlib_metadata-4.12.0.tar.gz", hash = "sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670"},
-]
-Jinja2 = [
- {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"},
- {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
-]
-Markdown = [
- {file = "Markdown-3.3.7-py3-none-any.whl", hash = "sha256:f5da449a6e1c989a4cea2631aa8ee67caa5a2ef855d551c88f9e309f4634c621"},
- {file = "Markdown-3.3.7.tar.gz", hash = "sha256:cbb516f16218e643d8e0a95b309f77eb118cb138d39a4f27851e6a63581db874"},
-]
-MarkupSafe = [
- {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-win32.whl", hash = "sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-win32.whl", hash = "sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-win32.whl", hash = "sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-win32.whl", hash = "sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247"},
- {file = "MarkupSafe-2.1.1.tar.gz", hash = "sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b"},
-]
-mergedeep = [
- {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"},
- {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"},
-]
-mkdocs = [
- {file = "mkdocs-1.3.1-py3-none-any.whl", hash = "sha256:fda92466393127d2da830bc6edc3a625a14b436316d1caf347690648e774c4f0"},
- {file = "mkdocs-1.3.1.tar.gz", hash = "sha256:a41a2ff25ce3bbacc953f9844ba07d106233cd76c88bac1f59cb1564ac0d87ed"},
-]
-mkdocs-material = [
- {file = "mkdocs-material-8.4.3.tar.gz", hash = "sha256:f39af3234ce0b60024b7712995af0de5b5227ab6504f0b9c8709c9a770bd94bf"},
- {file = "mkdocs_material-8.4.3-py2.py3-none-any.whl", hash = "sha256:d5cc6f5023061a663514f61810052ad266f5199feafcf15ad23ea4891b21e6bc"},
-]
-mkdocs-material-extensions = [
- {file = "mkdocs-material-extensions-1.0.3.tar.gz", hash = "sha256:bfd24dfdef7b41c312ede42648f9eb83476ea168ec163b613f9abd12bbfddba2"},
- {file = "mkdocs_material_extensions-1.0.3-py3-none-any.whl", hash = "sha256:a82b70e533ce060b2a5d9eb2bc2e1be201cf61f901f93704b4acf6e3d5983a44"},
-]
-packaging = [
- {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"},
- {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"},
-]
-Pygments = [
- {file = "Pygments-2.13.0-py3-none-any.whl", hash = "sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42"},
- {file = "Pygments-2.13.0.tar.gz", hash = "sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1"},
-]
-pymdown-extensions = [
- {file = "pymdown_extensions-9.5-py3-none-any.whl", hash = "sha256:ec141c0f4983755349f0c8710416348d1a13753976c028186ed14f190c8061c4"},
- {file = "pymdown_extensions-9.5.tar.gz", hash = "sha256:3ef2d998c0d5fa7eb09291926d90d69391283561cf6306f85cd588a5eb5befa0"},
-]
-pyparsing = [
- {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"},
- {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"},
-]
-python-dateutil = [
- {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
- {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
-]
-PyYAML = [
- {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"},
- {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"},
- {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"},
- {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"},
- {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"},
- {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"},
- {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"},
- {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"},
- {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"},
- {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"},
- {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"},
- {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"},
- {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"},
- {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"},
- {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"},
- {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"},
- {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"},
- {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"},
- {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"},
- {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"},
- {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"},
- {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"},
- {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"},
- {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"},
- {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"},
- {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"},
- {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"},
- {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"},
- {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"},
- {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"},
- {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"},
- {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"},
- {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
-]
-pyyaml_env_tag = [
- {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"},
- {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"},
-]
-six = [
- {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
- {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
-]
-watchdog = [
- {file = "watchdog-2.1.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a735a990a1095f75ca4f36ea2ef2752c99e6ee997c46b0de507ba40a09bf7330"},
- {file = "watchdog-2.1.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b17d302850c8d412784d9246cfe8d7e3af6bcd45f958abb2d08a6f8bedf695d"},
- {file = "watchdog-2.1.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ee3e38a6cc050a8830089f79cbec8a3878ec2fe5160cdb2dc8ccb6def8552658"},
- {file = "watchdog-2.1.9-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:64a27aed691408a6abd83394b38503e8176f69031ca25d64131d8d640a307591"},
- {file = "watchdog-2.1.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:195fc70c6e41237362ba720e9aaf394f8178bfc7fa68207f112d108edef1af33"},
- {file = "watchdog-2.1.9-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:bfc4d351e6348d6ec51df007432e6fe80adb53fd41183716017026af03427846"},
- {file = "watchdog-2.1.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8250546a98388cbc00c3ee3cc5cf96799b5a595270dfcfa855491a64b86ef8c3"},
- {file = "watchdog-2.1.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:117ffc6ec261639a0209a3252546b12800670d4bf5f84fbd355957a0595fe654"},
- {file = "watchdog-2.1.9-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:97f9752208f5154e9e7b76acc8c4f5a58801b338de2af14e7e181ee3b28a5d39"},
- {file = "watchdog-2.1.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:247dcf1df956daa24828bfea5a138d0e7a7c98b1a47cf1fa5b0c3c16241fcbb7"},
- {file = "watchdog-2.1.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:226b3c6c468ce72051a4c15a4cc2ef317c32590d82ba0b330403cafd98a62cfd"},
- {file = "watchdog-2.1.9-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d9820fe47c20c13e3c9dd544d3706a2a26c02b2b43c993b62fcd8011bcc0adb3"},
- {file = "watchdog-2.1.9-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:70af927aa1613ded6a68089a9262a009fbdf819f46d09c1a908d4b36e1ba2b2d"},
- {file = "watchdog-2.1.9-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ed80a1628cee19f5cfc6bb74e173f1b4189eb532e705e2a13e3250312a62e0c9"},
- {file = "watchdog-2.1.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9f05a5f7c12452f6a27203f76779ae3f46fa30f1dd833037ea8cbc2887c60213"},
- {file = "watchdog-2.1.9-py3-none-manylinux2014_armv7l.whl", hash = "sha256:255bb5758f7e89b1a13c05a5bceccec2219f8995a3a4c4d6968fe1de6a3b2892"},
- {file = "watchdog-2.1.9-py3-none-manylinux2014_i686.whl", hash = "sha256:d3dda00aca282b26194bdd0adec21e4c21e916956d972369359ba63ade616153"},
- {file = "watchdog-2.1.9-py3-none-manylinux2014_ppc64.whl", hash = "sha256:186f6c55abc5e03872ae14c2f294a153ec7292f807af99f57611acc8caa75306"},
- {file = "watchdog-2.1.9-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:083171652584e1b8829581f965b9b7723ca5f9a2cd7e20271edf264cfd7c1412"},
- {file = "watchdog-2.1.9-py3-none-manylinux2014_s390x.whl", hash = "sha256:b530ae007a5f5d50b7fbba96634c7ee21abec70dc3e7f0233339c81943848dc1"},
- {file = "watchdog-2.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:4f4e1c4aa54fb86316a62a87b3378c025e228178d55481d30d857c6c438897d6"},
- {file = "watchdog-2.1.9-py3-none-win32.whl", hash = "sha256:5952135968519e2447a01875a6f5fc8c03190b24d14ee52b0f4b1682259520b1"},
- {file = "watchdog-2.1.9-py3-none-win_amd64.whl", hash = "sha256:7a833211f49143c3d336729b0020ffd1274078e94b0ae42e22f596999f50279c"},
- {file = "watchdog-2.1.9-py3-none-win_ia64.whl", hash = "sha256:ad576a565260d8f99d97f2e64b0f97a48228317095908568a9d5c786c829d428"},
- {file = "watchdog-2.1.9.tar.gz", hash = "sha256:43ce20ebb36a51f21fa376f76d1d4692452b2527ccd601950d69ed36b9e21609"},
-]
-zipp = [
- {file = "zipp-3.8.1-py3-none-any.whl", hash = "sha256:47c40d7fe183a6f21403a199b3e4192cca5774656965b0a4988ad2f8feb5f009"},
- {file = "zipp-3.8.1.tar.gz", hash = "sha256:05b45f1ee8f807d0cc928485ca40a07cb491cf092ff587c0df9cb1fd154848d2"},
-]
diff --git a/docs/pyproject.toml b/docs/pyproject.toml
deleted file mode 100644
index dbf93cf..0000000
--- a/docs/pyproject.toml
+++ /dev/null
@@ -1,15 +0,0 @@
-[tool.poetry]
-name = "docs"
-version = "0.1.0"
-description = ""
-authors = ["Hayden <64056131+hay-kot@users.noreply.github.com>"]
-readme = "README.md"
-
-[tool.poetry.dependencies]
-python = "^3.10"
-mkdocs-material = "^8.4.3"
-
-
-[build-system]
-requires = ["poetry-core"]
-build-backend = "poetry.core.masonry.api"
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000..d7301ed
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1 @@
+mkdocs-material==9.5.12
\ No newline at end of file
diff --git a/frontend/.eslintrc.js b/frontend/.eslintrc.js
index fa57669..c567952 100644
--- a/frontend/.eslintrc.js
+++ b/frontend/.eslintrc.js
@@ -25,6 +25,7 @@ module.exports = {
"vue/no-setup-props-destructure": 0,
"vue/no-multiple-template-root": 0,
"vue/no-v-model-argument": 0,
+ "@typescript-eslint/consistent-type-imports": "error",
"@typescript-eslint/ban-ts-comment": 0,
"@typescript-eslint/no-unused-vars": [
"error",
diff --git a/frontend/.nuxtignore b/frontend/.nuxtignore
new file mode 100644
index 0000000..5e5ef76
--- /dev/null
+++ b/frontend/.nuxtignore
@@ -0,0 +1 @@
+pages/**/*.ts
\ No newline at end of file
diff --git a/frontend/app.vue b/frontend/app.vue
index ca390af..8c48eb8 100644
--- a/frontend/app.vue
+++ b/frontend/app.vue
@@ -1,6 +1,11 @@
-
+
+
+
+
+
+
diff --git a/frontend/assets/css/main.css b/frontend/assets/css/main.css
new file mode 100644
index 0000000..89b2e1c
--- /dev/null
+++ b/frontend/assets/css/main.css
@@ -0,0 +1,26 @@
+.text-no-transform {
+ text-transform: none !important;
+}
+
+.btn {
+ text-transform: none !important;
+}
+
+/* transparent subtle scrollbar */
+::-webkit-scrollbar {
+ width: 0.2em;
+ background-color: #F5F5F5;
+}
+
+::-webkit-scrollbar-thumb {
+ background-color: rgba(0,0,0,.2);
+}
+
+::-webkit-scrollbar-track {
+ -webkit-box-shadow: inset 0 0 6px rgba(0,0,0,0.3);
+ background-color: #F5F5F5;
+}
+
+::-webkit-scrollbar-thumb:hover {
+ background-color: #9B9B9B;
+}
\ No newline at end of file
diff --git a/frontend/components/App/Header.vue b/frontend/components/App/Header.vue
index 1561c87..b42da7f 100644
--- a/frontend/components/App/Header.vue
+++ b/frontend/components/App/Header.vue
@@ -1,11 +1,11 @@
+
+
+
diff --git a/frontend/components/App/ImportDialog.vue b/frontend/components/App/ImportDialog.vue
new file mode 100644
index 0000000..3e4002c
--- /dev/null
+++ b/frontend/components/App/ImportDialog.vue
@@ -0,0 +1,115 @@
+
+
+ Import CSV File
+
+ Import a CSV file containing your items, labels, and locations. See documentation for more information on the
+ required format.
+
+
+
+
+
+ Behavior for imports with existing import_refs has changed. If an import_ref is present in the CSV file, the
+ item will be updated with the values in the CSV file.
+
+