chore: upgrade deps + code-gen (#249)

This commit is contained in:
Hayden 2023-01-28 12:03:51 -09:00 committed by GitHub
parent 3d295b5132
commit 6ed1f3695a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
42 changed files with 664 additions and 563 deletions

View file

@ -3,9 +3,9 @@ module github.com/hay-kot/homebox/backend
go 1.19 go 1.19
require ( require (
ariga.io/atlas v0.9.0 ariga.io/atlas v0.9.1-0.20230119145809-92243f7c55cb
entgo.io/ent v0.11.5 entgo.io/ent v0.11.7
github.com/ardanlabs/conf/v2 v2.2.0 github.com/ardanlabs/conf/v3 v3.1.3
github.com/go-chi/chi/v5 v5.0.8 github.com/go-chi/chi/v5 v5.0.8
github.com/go-playground/validator/v10 v10.11.1 github.com/go-playground/validator/v10 v10.11.1
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0

View file

@ -1,7 +1,11 @@
ariga.io/atlas v0.9.0 h1:q0JMtqyA3X1YWtPcn+E/kVPwLDslb+jAC8Ejl/vW6d0= ariga.io/atlas v0.9.0 h1:q0JMtqyA3X1YWtPcn+E/kVPwLDslb+jAC8Ejl/vW6d0=
ariga.io/atlas v0.9.0/go.mod h1:T230JFcENj4ZZzMkZrXFDSkv+2kXkUgpJ5FQQ5hMcKU= ariga.io/atlas v0.9.0/go.mod h1:T230JFcENj4ZZzMkZrXFDSkv+2kXkUgpJ5FQQ5hMcKU=
ariga.io/atlas v0.9.1-0.20230119145809-92243f7c55cb h1:mbsFtavDqGdYwdDpP50LGOOZ2hgyGoJcZeOpbgKMyu4=
ariga.io/atlas v0.9.1-0.20230119145809-92243f7c55cb/go.mod h1:T230JFcENj4ZZzMkZrXFDSkv+2kXkUgpJ5FQQ5hMcKU=
entgo.io/ent v0.11.5 h1:V2qhG91C4PMQTa82Q4StoESMQ4dzkMNeStCzszxi0jQ= entgo.io/ent v0.11.5 h1:V2qhG91C4PMQTa82Q4StoESMQ4dzkMNeStCzszxi0jQ=
entgo.io/ent v0.11.5/go.mod h1:u7eKwNWAo/VlHIKxgwbmsFy3J7cKDxwi3jyF5TW/okY= entgo.io/ent v0.11.5/go.mod h1:u7eKwNWAo/VlHIKxgwbmsFy3J7cKDxwi3jyF5TW/okY=
entgo.io/ent v0.11.7 h1:V+wKFh0jhAbY/FoU+PPbdMOf2Ma5vh07R/IdF+N/nFg=
entgo.io/ent v0.11.7/go.mod h1:ericBi6Q8l3wBH1wEIDfKxw7rcQEuRPyBfbIzjtxJ18=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
@ -9,8 +13,8 @@ github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7l
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
github.com/ardanlabs/conf/v2 v2.2.0 h1:ar1+TYIYAh2Tdeg2DQroh7ruR56/vJR8BDfzDIrXgtk= github.com/ardanlabs/conf/v3 v3.1.3 h1:16+Nzfc4PBd/ERtYERUFL/75eVKNyW15Y+vn3W1XZzQ=
github.com/ardanlabs/conf/v2 v2.2.0/go.mod h1:m37ZKdW9jwMUEhGX36jRNt8VzSQ/HVmSziLZH2p33nY= github.com/ardanlabs/conf/v3 v3.1.3/go.mod h1:bIacyuGeZjkTdtszdbvOcuq49VhHpV3+IPZ2ewOAK4I=
github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -93,8 +97,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY=
github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0=
github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w=
github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
@ -114,8 +116,6 @@ github.com/swaggo/files v1.0.0 h1:1gGXVIeUFCS/dta17rnP0iOpr6CXFwKD7EO5ID233e4=
github.com/swaggo/files v1.0.0/go.mod h1:N59U6URJLyU1PQgFqPM7wXLMhJx7QAolnvfQkqO13kc= github.com/swaggo/files v1.0.0/go.mod h1:N59U6URJLyU1PQgFqPM7wXLMhJx7QAolnvfQkqO13kc=
github.com/swaggo/http-swagger v1.3.3 h1:Hu5Z0L9ssyBLofaama21iYaF2VbWyA8jdohaaCGpHsc= github.com/swaggo/http-swagger v1.3.3 h1:Hu5Z0L9ssyBLofaama21iYaF2VbWyA8jdohaaCGpHsc=
github.com/swaggo/http-swagger v1.3.3/go.mod h1:sE+4PjD89IxMPm77FnkDz0sdO+p5lbXzrVWT6OTVVGo= github.com/swaggo/http-swagger v1.3.3/go.mod h1:sE+4PjD89IxMPm77FnkDz0sdO+p5lbXzrVWT6OTVVGo=
github.com/swaggo/swag v1.8.9 h1:kHtaBe/Ob9AZzAANfcn5c6RyCke9gG9QpH0jky0I/sA=
github.com/swaggo/swag v1.8.9/go.mod h1:ezQVUUhly8dludpVk+/PuwJWvLLanB13ygV5Pr9enSk=
github.com/swaggo/swag v1.8.10 h1:eExW4bFa52WOjqRzRD58bgWsWfdFJso50lpbeTcmTfo= github.com/swaggo/swag v1.8.10 h1:eExW4bFa52WOjqRzRD58bgWsWfdFJso50lpbeTcmTfo=
github.com/swaggo/swag v1.8.10/go.mod h1:ezQVUUhly8dludpVk+/PuwJWvLLanB13ygV5Pr9enSk= github.com/swaggo/swag v1.8.10/go.mod h1:ezQVUUhly8dludpVk+/PuwJWvLLanB13ygV5Pr9enSk=
github.com/yeqown/go-qrcode/v2 v2.2.1 h1:Jc1Q916fwC05R8C7mpWDbrT9tyLPaLLKDABoC5XBCe8= github.com/yeqown/go-qrcode/v2 v2.2.1 h1:Jc1Q916fwC05R8C7mpWDbrT9tyLPaLLKDABoC5XBCe8=

View file

@ -144,19 +144,19 @@ func (a *Attachment) assignValues(columns []string, values []any) error {
// QueryItem queries the "item" edge of the Attachment entity. // QueryItem queries the "item" edge of the Attachment entity.
func (a *Attachment) QueryItem() *ItemQuery { func (a *Attachment) QueryItem() *ItemQuery {
return (&AttachmentClient{config: a.config}).QueryItem(a) return NewAttachmentClient(a.config).QueryItem(a)
} }
// QueryDocument queries the "document" edge of the Attachment entity. // QueryDocument queries the "document" edge of the Attachment entity.
func (a *Attachment) QueryDocument() *DocumentQuery { func (a *Attachment) QueryDocument() *DocumentQuery {
return (&AttachmentClient{config: a.config}).QueryDocument(a) return NewAttachmentClient(a.config).QueryDocument(a)
} }
// Update returns a builder for updating this Attachment. // Update returns a builder for updating this Attachment.
// Note that you need to call Attachment.Unwrap() before calling this method if this Attachment // Note that you need to call Attachment.Unwrap() before calling this method if this Attachment
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (a *Attachment) Update() *AttachmentUpdateOne { func (a *Attachment) Update() *AttachmentUpdateOne {
return (&AttachmentClient{config: a.config}).UpdateOne(a) return NewAttachmentClient(a.config).UpdateOne(a)
} }
// Unwrap unwraps the Attachment entity that was returned from a transaction after it was closed, // Unwrap unwraps the Attachment entity that was returned from a transaction after it was closed,

View file

@ -69,6 +69,12 @@ type AttachmentDeleteOne struct {
ad *AttachmentDelete ad *AttachmentDelete
} }
// Where appends a list predicates to the AttachmentDelete builder.
func (ado *AttachmentDeleteOne) Where(ps ...predicate.Attachment) *AttachmentDeleteOne {
ado.ad.mutation.Where(ps...)
return ado
}
// Exec executes the deletion query. // Exec executes the deletion query.
func (ado *AttachmentDeleteOne) Exec(ctx context.Context) error { func (ado *AttachmentDeleteOne) Exec(ctx context.Context) error {
n, err := ado.ad.Exec(ctx) n, err := ado.ad.Exec(ctx)
@ -84,5 +90,7 @@ func (ado *AttachmentDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (ado *AttachmentDeleteOne) ExecX(ctx context.Context) { func (ado *AttachmentDeleteOne) ExecX(ctx context.Context) {
ado.ad.ExecX(ctx) if err := ado.Exec(ctx); err != nil {
panic(err)
}
} }

View file

@ -20,11 +20,8 @@ import (
// AttachmentQuery is the builder for querying Attachment entities. // AttachmentQuery is the builder for querying Attachment entities.
type AttachmentQuery struct { type AttachmentQuery struct {
config config
limit *int ctx *QueryContext
offset *int
unique *bool
order []OrderFunc order []OrderFunc
fields []string
inters []Interceptor inters []Interceptor
predicates []predicate.Attachment predicates []predicate.Attachment
withItem *ItemQuery withItem *ItemQuery
@ -43,20 +40,20 @@ func (aq *AttachmentQuery) Where(ps ...predicate.Attachment) *AttachmentQuery {
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (aq *AttachmentQuery) Limit(limit int) *AttachmentQuery { func (aq *AttachmentQuery) Limit(limit int) *AttachmentQuery {
aq.limit = &limit aq.ctx.Limit = &limit
return aq return aq
} }
// Offset to start from. // Offset to start from.
func (aq *AttachmentQuery) Offset(offset int) *AttachmentQuery { func (aq *AttachmentQuery) Offset(offset int) *AttachmentQuery {
aq.offset = &offset aq.ctx.Offset = &offset
return aq return aq
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (aq *AttachmentQuery) Unique(unique bool) *AttachmentQuery { func (aq *AttachmentQuery) Unique(unique bool) *AttachmentQuery {
aq.unique = &unique aq.ctx.Unique = &unique
return aq return aq
} }
@ -113,7 +110,7 @@ func (aq *AttachmentQuery) QueryDocument() *DocumentQuery {
// First returns the first Attachment entity from the query. // First returns the first Attachment entity from the query.
// Returns a *NotFoundError when no Attachment was found. // Returns a *NotFoundError when no Attachment was found.
func (aq *AttachmentQuery) First(ctx context.Context) (*Attachment, error) { func (aq *AttachmentQuery) First(ctx context.Context) (*Attachment, error) {
nodes, err := aq.Limit(1).All(newQueryContext(ctx, TypeAttachment, "First")) nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, "First"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -136,7 +133,7 @@ func (aq *AttachmentQuery) FirstX(ctx context.Context) *Attachment {
// Returns a *NotFoundError when no Attachment ID was found. // Returns a *NotFoundError when no Attachment ID was found.
func (aq *AttachmentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { func (aq *AttachmentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = aq.Limit(1).IDs(newQueryContext(ctx, TypeAttachment, "FirstID")); err != nil { if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, "FirstID")); err != nil {
return return
} }
if len(ids) == 0 { if len(ids) == 0 {
@ -159,7 +156,7 @@ func (aq *AttachmentQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one Attachment entity is found. // Returns a *NotSingularError when more than one Attachment entity is found.
// Returns a *NotFoundError when no Attachment entities are found. // Returns a *NotFoundError when no Attachment entities are found.
func (aq *AttachmentQuery) Only(ctx context.Context) (*Attachment, error) { func (aq *AttachmentQuery) Only(ctx context.Context) (*Attachment, error) {
nodes, err := aq.Limit(2).All(newQueryContext(ctx, TypeAttachment, "Only")) nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, "Only"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -187,7 +184,7 @@ func (aq *AttachmentQuery) OnlyX(ctx context.Context) *Attachment {
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (aq *AttachmentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { func (aq *AttachmentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = aq.Limit(2).IDs(newQueryContext(ctx, TypeAttachment, "OnlyID")); err != nil { if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, "OnlyID")); err != nil {
return return
} }
switch len(ids) { switch len(ids) {
@ -212,7 +209,7 @@ func (aq *AttachmentQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Attachments. // All executes the query and returns a list of Attachments.
func (aq *AttachmentQuery) All(ctx context.Context) ([]*Attachment, error) { func (aq *AttachmentQuery) All(ctx context.Context) ([]*Attachment, error) {
ctx = newQueryContext(ctx, TypeAttachment, "All") ctx = setContextOp(ctx, aq.ctx, "All")
if err := aq.prepareQuery(ctx); err != nil { if err := aq.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
@ -232,7 +229,7 @@ func (aq *AttachmentQuery) AllX(ctx context.Context) []*Attachment {
// IDs executes the query and returns a list of Attachment IDs. // IDs executes the query and returns a list of Attachment IDs.
func (aq *AttachmentQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { func (aq *AttachmentQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
var ids []uuid.UUID var ids []uuid.UUID
ctx = newQueryContext(ctx, TypeAttachment, "IDs") ctx = setContextOp(ctx, aq.ctx, "IDs")
if err := aq.Select(attachment.FieldID).Scan(ctx, &ids); err != nil { if err := aq.Select(attachment.FieldID).Scan(ctx, &ids); err != nil {
return nil, err return nil, err
} }
@ -250,7 +247,7 @@ func (aq *AttachmentQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query. // Count returns the count of the given query.
func (aq *AttachmentQuery) Count(ctx context.Context) (int, error) { func (aq *AttachmentQuery) Count(ctx context.Context) (int, error) {
ctx = newQueryContext(ctx, TypeAttachment, "Count") ctx = setContextOp(ctx, aq.ctx, "Count")
if err := aq.prepareQuery(ctx); err != nil { if err := aq.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
@ -268,7 +265,7 @@ func (aq *AttachmentQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (aq *AttachmentQuery) Exist(ctx context.Context) (bool, error) { func (aq *AttachmentQuery) Exist(ctx context.Context) (bool, error) {
ctx = newQueryContext(ctx, TypeAttachment, "Exist") ctx = setContextOp(ctx, aq.ctx, "Exist")
switch _, err := aq.FirstID(ctx); { switch _, err := aq.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
return false, nil return false, nil
@ -296,8 +293,7 @@ func (aq *AttachmentQuery) Clone() *AttachmentQuery {
} }
return &AttachmentQuery{ return &AttachmentQuery{
config: aq.config, config: aq.config,
limit: aq.limit, ctx: aq.ctx.Clone(),
offset: aq.offset,
order: append([]OrderFunc{}, aq.order...), order: append([]OrderFunc{}, aq.order...),
inters: append([]Interceptor{}, aq.inters...), inters: append([]Interceptor{}, aq.inters...),
predicates: append([]predicate.Attachment{}, aq.predicates...), predicates: append([]predicate.Attachment{}, aq.predicates...),
@ -306,7 +302,6 @@ func (aq *AttachmentQuery) Clone() *AttachmentQuery {
// clone intermediate query. // clone intermediate query.
sql: aq.sql.Clone(), sql: aq.sql.Clone(),
path: aq.path, path: aq.path,
unique: aq.unique,
} }
} }
@ -347,9 +342,9 @@ func (aq *AttachmentQuery) WithDocument(opts ...func(*DocumentQuery)) *Attachmen
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (aq *AttachmentQuery) GroupBy(field string, fields ...string) *AttachmentGroupBy { func (aq *AttachmentQuery) GroupBy(field string, fields ...string) *AttachmentGroupBy {
aq.fields = append([]string{field}, fields...) aq.ctx.Fields = append([]string{field}, fields...)
grbuild := &AttachmentGroupBy{build: aq} grbuild := &AttachmentGroupBy{build: aq}
grbuild.flds = &aq.fields grbuild.flds = &aq.ctx.Fields
grbuild.label = attachment.Label grbuild.label = attachment.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
return grbuild return grbuild
@ -368,10 +363,10 @@ func (aq *AttachmentQuery) GroupBy(field string, fields ...string) *AttachmentGr
// Select(attachment.FieldCreatedAt). // Select(attachment.FieldCreatedAt).
// Scan(ctx, &v) // Scan(ctx, &v)
func (aq *AttachmentQuery) Select(fields ...string) *AttachmentSelect { func (aq *AttachmentQuery) Select(fields ...string) *AttachmentSelect {
aq.fields = append(aq.fields, fields...) aq.ctx.Fields = append(aq.ctx.Fields, fields...)
sbuild := &AttachmentSelect{AttachmentQuery: aq} sbuild := &AttachmentSelect{AttachmentQuery: aq}
sbuild.label = attachment.Label sbuild.label = attachment.Label
sbuild.flds, sbuild.scan = &aq.fields, sbuild.Scan sbuild.flds, sbuild.scan = &aq.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
@ -391,7 +386,7 @@ func (aq *AttachmentQuery) prepareQuery(ctx context.Context) error {
} }
} }
} }
for _, f := range aq.fields { for _, f := range aq.ctx.Fields {
if !attachment.ValidColumn(f) { if !attachment.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
} }
@ -468,6 +463,9 @@ func (aq *AttachmentQuery) loadItem(ctx context.Context, query *ItemQuery, nodes
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(item.IDIn(ids...)) query.Where(item.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -497,6 +495,9 @@ func (aq *AttachmentQuery) loadDocument(ctx context.Context, query *DocumentQuer
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(document.IDIn(ids...)) query.Where(document.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -516,9 +517,9 @@ func (aq *AttachmentQuery) loadDocument(ctx context.Context, query *DocumentQuer
func (aq *AttachmentQuery) sqlCount(ctx context.Context) (int, error) { func (aq *AttachmentQuery) sqlCount(ctx context.Context) (int, error) {
_spec := aq.querySpec() _spec := aq.querySpec()
_spec.Node.Columns = aq.fields _spec.Node.Columns = aq.ctx.Fields
if len(aq.fields) > 0 { if len(aq.ctx.Fields) > 0 {
_spec.Unique = aq.unique != nil && *aq.unique _spec.Unique = aq.ctx.Unique != nil && *aq.ctx.Unique
} }
return sqlgraph.CountNodes(ctx, aq.driver, _spec) return sqlgraph.CountNodes(ctx, aq.driver, _spec)
} }
@ -536,10 +537,10 @@ func (aq *AttachmentQuery) querySpec() *sqlgraph.QuerySpec {
From: aq.sql, From: aq.sql,
Unique: true, Unique: true,
} }
if unique := aq.unique; unique != nil { if unique := aq.ctx.Unique; unique != nil {
_spec.Unique = *unique _spec.Unique = *unique
} }
if fields := aq.fields; len(fields) > 0 { if fields := aq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, attachment.FieldID) _spec.Node.Columns = append(_spec.Node.Columns, attachment.FieldID)
for i := range fields { for i := range fields {
@ -555,10 +556,10 @@ func (aq *AttachmentQuery) querySpec() *sqlgraph.QuerySpec {
} }
} }
} }
if limit := aq.limit; limit != nil { if limit := aq.ctx.Limit; limit != nil {
_spec.Limit = *limit _spec.Limit = *limit
} }
if offset := aq.offset; offset != nil { if offset := aq.ctx.Offset; offset != nil {
_spec.Offset = *offset _spec.Offset = *offset
} }
if ps := aq.order; len(ps) > 0 { if ps := aq.order; len(ps) > 0 {
@ -574,7 +575,7 @@ func (aq *AttachmentQuery) querySpec() *sqlgraph.QuerySpec {
func (aq *AttachmentQuery) sqlQuery(ctx context.Context) *sql.Selector { func (aq *AttachmentQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(aq.driver.Dialect()) builder := sql.Dialect(aq.driver.Dialect())
t1 := builder.Table(attachment.Table) t1 := builder.Table(attachment.Table)
columns := aq.fields columns := aq.ctx.Fields
if len(columns) == 0 { if len(columns) == 0 {
columns = attachment.Columns columns = attachment.Columns
} }
@ -583,7 +584,7 @@ func (aq *AttachmentQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = aq.sql selector = aq.sql
selector.Select(selector.Columns(columns...)...) selector.Select(selector.Columns(columns...)...)
} }
if aq.unique != nil && *aq.unique { if aq.ctx.Unique != nil && *aq.ctx.Unique {
selector.Distinct() selector.Distinct()
} }
for _, p := range aq.predicates { for _, p := range aq.predicates {
@ -592,12 +593,12 @@ func (aq *AttachmentQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range aq.order { for _, p := range aq.order {
p(selector) p(selector)
} }
if offset := aq.offset; offset != nil { if offset := aq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start // limit is mandatory for offset clause. We start
// with default value, and override it below if needed. // with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32) selector.Offset(*offset).Limit(math.MaxInt32)
} }
if limit := aq.limit; limit != nil { if limit := aq.ctx.Limit; limit != nil {
selector.Limit(*limit) selector.Limit(*limit)
} }
return selector return selector
@ -617,7 +618,7 @@ func (agb *AttachmentGroupBy) Aggregate(fns ...AggregateFunc) *AttachmentGroupBy
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (agb *AttachmentGroupBy) Scan(ctx context.Context, v any) error { func (agb *AttachmentGroupBy) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeAttachment, "GroupBy") ctx = setContextOp(ctx, agb.build.ctx, "GroupBy")
if err := agb.build.prepareQuery(ctx); err != nil { if err := agb.build.prepareQuery(ctx); err != nil {
return err return err
} }
@ -665,7 +666,7 @@ func (as *AttachmentSelect) Aggregate(fns ...AggregateFunc) *AttachmentSelect {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (as *AttachmentSelect) Scan(ctx context.Context, v any) error { func (as *AttachmentSelect) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeAttachment, "Select") ctx = setContextOp(ctx, as.ctx, "Select")
if err := as.prepareQuery(ctx); err != nil { if err := as.prepareQuery(ctx); err != nil {
return err return err
} }

View file

@ -99,14 +99,14 @@ func (ar *AuthRoles) assignValues(columns []string, values []any) error {
// QueryToken queries the "token" edge of the AuthRoles entity. // QueryToken queries the "token" edge of the AuthRoles entity.
func (ar *AuthRoles) QueryToken() *AuthTokensQuery { func (ar *AuthRoles) QueryToken() *AuthTokensQuery {
return (&AuthRolesClient{config: ar.config}).QueryToken(ar) return NewAuthRolesClient(ar.config).QueryToken(ar)
} }
// Update returns a builder for updating this AuthRoles. // Update returns a builder for updating this AuthRoles.
// Note that you need to call AuthRoles.Unwrap() before calling this method if this AuthRoles // Note that you need to call AuthRoles.Unwrap() before calling this method if this AuthRoles
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (ar *AuthRoles) Update() *AuthRolesUpdateOne { func (ar *AuthRoles) Update() *AuthRolesUpdateOne {
return (&AuthRolesClient{config: ar.config}).UpdateOne(ar) return NewAuthRolesClient(ar.config).UpdateOne(ar)
} }
// Unwrap unwraps the AuthRoles entity that was returned from a transaction after it was closed, // Unwrap unwraps the AuthRoles entity that was returned from a transaction after it was closed,

View file

@ -69,6 +69,12 @@ type AuthRolesDeleteOne struct {
ard *AuthRolesDelete ard *AuthRolesDelete
} }
// Where appends a list predicates to the AuthRolesDelete builder.
func (ardo *AuthRolesDeleteOne) Where(ps ...predicate.AuthRoles) *AuthRolesDeleteOne {
ardo.ard.mutation.Where(ps...)
return ardo
}
// Exec executes the deletion query. // Exec executes the deletion query.
func (ardo *AuthRolesDeleteOne) Exec(ctx context.Context) error { func (ardo *AuthRolesDeleteOne) Exec(ctx context.Context) error {
n, err := ardo.ard.Exec(ctx) n, err := ardo.ard.Exec(ctx)
@ -84,5 +90,7 @@ func (ardo *AuthRolesDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (ardo *AuthRolesDeleteOne) ExecX(ctx context.Context) { func (ardo *AuthRolesDeleteOne) ExecX(ctx context.Context) {
ardo.ard.ExecX(ctx) if err := ardo.Exec(ctx); err != nil {
panic(err)
}
} }

View file

@ -19,11 +19,8 @@ import (
// AuthRolesQuery is the builder for querying AuthRoles entities. // AuthRolesQuery is the builder for querying AuthRoles entities.
type AuthRolesQuery struct { type AuthRolesQuery struct {
config config
limit *int ctx *QueryContext
offset *int
unique *bool
order []OrderFunc order []OrderFunc
fields []string
inters []Interceptor inters []Interceptor
predicates []predicate.AuthRoles predicates []predicate.AuthRoles
withToken *AuthTokensQuery withToken *AuthTokensQuery
@ -41,20 +38,20 @@ func (arq *AuthRolesQuery) Where(ps ...predicate.AuthRoles) *AuthRolesQuery {
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (arq *AuthRolesQuery) Limit(limit int) *AuthRolesQuery { func (arq *AuthRolesQuery) Limit(limit int) *AuthRolesQuery {
arq.limit = &limit arq.ctx.Limit = &limit
return arq return arq
} }
// Offset to start from. // Offset to start from.
func (arq *AuthRolesQuery) Offset(offset int) *AuthRolesQuery { func (arq *AuthRolesQuery) Offset(offset int) *AuthRolesQuery {
arq.offset = &offset arq.ctx.Offset = &offset
return arq return arq
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (arq *AuthRolesQuery) Unique(unique bool) *AuthRolesQuery { func (arq *AuthRolesQuery) Unique(unique bool) *AuthRolesQuery {
arq.unique = &unique arq.ctx.Unique = &unique
return arq return arq
} }
@ -89,7 +86,7 @@ func (arq *AuthRolesQuery) QueryToken() *AuthTokensQuery {
// First returns the first AuthRoles entity from the query. // First returns the first AuthRoles entity from the query.
// Returns a *NotFoundError when no AuthRoles was found. // Returns a *NotFoundError when no AuthRoles was found.
func (arq *AuthRolesQuery) First(ctx context.Context) (*AuthRoles, error) { func (arq *AuthRolesQuery) First(ctx context.Context) (*AuthRoles, error) {
nodes, err := arq.Limit(1).All(newQueryContext(ctx, TypeAuthRoles, "First")) nodes, err := arq.Limit(1).All(setContextOp(ctx, arq.ctx, "First"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -112,7 +109,7 @@ func (arq *AuthRolesQuery) FirstX(ctx context.Context) *AuthRoles {
// Returns a *NotFoundError when no AuthRoles ID was found. // Returns a *NotFoundError when no AuthRoles ID was found.
func (arq *AuthRolesQuery) FirstID(ctx context.Context) (id int, err error) { func (arq *AuthRolesQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int var ids []int
if ids, err = arq.Limit(1).IDs(newQueryContext(ctx, TypeAuthRoles, "FirstID")); err != nil { if ids, err = arq.Limit(1).IDs(setContextOp(ctx, arq.ctx, "FirstID")); err != nil {
return return
} }
if len(ids) == 0 { if len(ids) == 0 {
@ -135,7 +132,7 @@ func (arq *AuthRolesQuery) FirstIDX(ctx context.Context) int {
// Returns a *NotSingularError when more than one AuthRoles entity is found. // Returns a *NotSingularError when more than one AuthRoles entity is found.
// Returns a *NotFoundError when no AuthRoles entities are found. // Returns a *NotFoundError when no AuthRoles entities are found.
func (arq *AuthRolesQuery) Only(ctx context.Context) (*AuthRoles, error) { func (arq *AuthRolesQuery) Only(ctx context.Context) (*AuthRoles, error) {
nodes, err := arq.Limit(2).All(newQueryContext(ctx, TypeAuthRoles, "Only")) nodes, err := arq.Limit(2).All(setContextOp(ctx, arq.ctx, "Only"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -163,7 +160,7 @@ func (arq *AuthRolesQuery) OnlyX(ctx context.Context) *AuthRoles {
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (arq *AuthRolesQuery) OnlyID(ctx context.Context) (id int, err error) { func (arq *AuthRolesQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int var ids []int
if ids, err = arq.Limit(2).IDs(newQueryContext(ctx, TypeAuthRoles, "OnlyID")); err != nil { if ids, err = arq.Limit(2).IDs(setContextOp(ctx, arq.ctx, "OnlyID")); err != nil {
return return
} }
switch len(ids) { switch len(ids) {
@ -188,7 +185,7 @@ func (arq *AuthRolesQuery) OnlyIDX(ctx context.Context) int {
// All executes the query and returns a list of AuthRolesSlice. // All executes the query and returns a list of AuthRolesSlice.
func (arq *AuthRolesQuery) All(ctx context.Context) ([]*AuthRoles, error) { func (arq *AuthRolesQuery) All(ctx context.Context) ([]*AuthRoles, error) {
ctx = newQueryContext(ctx, TypeAuthRoles, "All") ctx = setContextOp(ctx, arq.ctx, "All")
if err := arq.prepareQuery(ctx); err != nil { if err := arq.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
@ -208,7 +205,7 @@ func (arq *AuthRolesQuery) AllX(ctx context.Context) []*AuthRoles {
// IDs executes the query and returns a list of AuthRoles IDs. // IDs executes the query and returns a list of AuthRoles IDs.
func (arq *AuthRolesQuery) IDs(ctx context.Context) ([]int, error) { func (arq *AuthRolesQuery) IDs(ctx context.Context) ([]int, error) {
var ids []int var ids []int
ctx = newQueryContext(ctx, TypeAuthRoles, "IDs") ctx = setContextOp(ctx, arq.ctx, "IDs")
if err := arq.Select(authroles.FieldID).Scan(ctx, &ids); err != nil { if err := arq.Select(authroles.FieldID).Scan(ctx, &ids); err != nil {
return nil, err return nil, err
} }
@ -226,7 +223,7 @@ func (arq *AuthRolesQuery) IDsX(ctx context.Context) []int {
// Count returns the count of the given query. // Count returns the count of the given query.
func (arq *AuthRolesQuery) Count(ctx context.Context) (int, error) { func (arq *AuthRolesQuery) Count(ctx context.Context) (int, error) {
ctx = newQueryContext(ctx, TypeAuthRoles, "Count") ctx = setContextOp(ctx, arq.ctx, "Count")
if err := arq.prepareQuery(ctx); err != nil { if err := arq.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
@ -244,7 +241,7 @@ func (arq *AuthRolesQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (arq *AuthRolesQuery) Exist(ctx context.Context) (bool, error) { func (arq *AuthRolesQuery) Exist(ctx context.Context) (bool, error) {
ctx = newQueryContext(ctx, TypeAuthRoles, "Exist") ctx = setContextOp(ctx, arq.ctx, "Exist")
switch _, err := arq.FirstID(ctx); { switch _, err := arq.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
return false, nil return false, nil
@ -272,8 +269,7 @@ func (arq *AuthRolesQuery) Clone() *AuthRolesQuery {
} }
return &AuthRolesQuery{ return &AuthRolesQuery{
config: arq.config, config: arq.config,
limit: arq.limit, ctx: arq.ctx.Clone(),
offset: arq.offset,
order: append([]OrderFunc{}, arq.order...), order: append([]OrderFunc{}, arq.order...),
inters: append([]Interceptor{}, arq.inters...), inters: append([]Interceptor{}, arq.inters...),
predicates: append([]predicate.AuthRoles{}, arq.predicates...), predicates: append([]predicate.AuthRoles{}, arq.predicates...),
@ -281,7 +277,6 @@ func (arq *AuthRolesQuery) Clone() *AuthRolesQuery {
// clone intermediate query. // clone intermediate query.
sql: arq.sql.Clone(), sql: arq.sql.Clone(),
path: arq.path, path: arq.path,
unique: arq.unique,
} }
} }
@ -311,9 +306,9 @@ func (arq *AuthRolesQuery) WithToken(opts ...func(*AuthTokensQuery)) *AuthRolesQ
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (arq *AuthRolesQuery) GroupBy(field string, fields ...string) *AuthRolesGroupBy { func (arq *AuthRolesQuery) GroupBy(field string, fields ...string) *AuthRolesGroupBy {
arq.fields = append([]string{field}, fields...) arq.ctx.Fields = append([]string{field}, fields...)
grbuild := &AuthRolesGroupBy{build: arq} grbuild := &AuthRolesGroupBy{build: arq}
grbuild.flds = &arq.fields grbuild.flds = &arq.ctx.Fields
grbuild.label = authroles.Label grbuild.label = authroles.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
return grbuild return grbuild
@ -332,10 +327,10 @@ func (arq *AuthRolesQuery) GroupBy(field string, fields ...string) *AuthRolesGro
// Select(authroles.FieldRole). // Select(authroles.FieldRole).
// Scan(ctx, &v) // Scan(ctx, &v)
func (arq *AuthRolesQuery) Select(fields ...string) *AuthRolesSelect { func (arq *AuthRolesQuery) Select(fields ...string) *AuthRolesSelect {
arq.fields = append(arq.fields, fields...) arq.ctx.Fields = append(arq.ctx.Fields, fields...)
sbuild := &AuthRolesSelect{AuthRolesQuery: arq} sbuild := &AuthRolesSelect{AuthRolesQuery: arq}
sbuild.label = authroles.Label sbuild.label = authroles.Label
sbuild.flds, sbuild.scan = &arq.fields, sbuild.Scan sbuild.flds, sbuild.scan = &arq.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
@ -355,7 +350,7 @@ func (arq *AuthRolesQuery) prepareQuery(ctx context.Context) error {
} }
} }
} }
for _, f := range arq.fields { for _, f := range arq.ctx.Fields {
if !authroles.ValidColumn(f) { if !authroles.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
} }
@ -425,6 +420,9 @@ func (arq *AuthRolesQuery) loadToken(ctx context.Context, query *AuthTokensQuery
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(authtokens.IDIn(ids...)) query.Where(authtokens.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -444,9 +442,9 @@ func (arq *AuthRolesQuery) loadToken(ctx context.Context, query *AuthTokensQuery
func (arq *AuthRolesQuery) sqlCount(ctx context.Context) (int, error) { func (arq *AuthRolesQuery) sqlCount(ctx context.Context) (int, error) {
_spec := arq.querySpec() _spec := arq.querySpec()
_spec.Node.Columns = arq.fields _spec.Node.Columns = arq.ctx.Fields
if len(arq.fields) > 0 { if len(arq.ctx.Fields) > 0 {
_spec.Unique = arq.unique != nil && *arq.unique _spec.Unique = arq.ctx.Unique != nil && *arq.ctx.Unique
} }
return sqlgraph.CountNodes(ctx, arq.driver, _spec) return sqlgraph.CountNodes(ctx, arq.driver, _spec)
} }
@ -464,10 +462,10 @@ func (arq *AuthRolesQuery) querySpec() *sqlgraph.QuerySpec {
From: arq.sql, From: arq.sql,
Unique: true, Unique: true,
} }
if unique := arq.unique; unique != nil { if unique := arq.ctx.Unique; unique != nil {
_spec.Unique = *unique _spec.Unique = *unique
} }
if fields := arq.fields; len(fields) > 0 { if fields := arq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, authroles.FieldID) _spec.Node.Columns = append(_spec.Node.Columns, authroles.FieldID)
for i := range fields { for i := range fields {
@ -483,10 +481,10 @@ func (arq *AuthRolesQuery) querySpec() *sqlgraph.QuerySpec {
} }
} }
} }
if limit := arq.limit; limit != nil { if limit := arq.ctx.Limit; limit != nil {
_spec.Limit = *limit _spec.Limit = *limit
} }
if offset := arq.offset; offset != nil { if offset := arq.ctx.Offset; offset != nil {
_spec.Offset = *offset _spec.Offset = *offset
} }
if ps := arq.order; len(ps) > 0 { if ps := arq.order; len(ps) > 0 {
@ -502,7 +500,7 @@ func (arq *AuthRolesQuery) querySpec() *sqlgraph.QuerySpec {
func (arq *AuthRolesQuery) sqlQuery(ctx context.Context) *sql.Selector { func (arq *AuthRolesQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(arq.driver.Dialect()) builder := sql.Dialect(arq.driver.Dialect())
t1 := builder.Table(authroles.Table) t1 := builder.Table(authroles.Table)
columns := arq.fields columns := arq.ctx.Fields
if len(columns) == 0 { if len(columns) == 0 {
columns = authroles.Columns columns = authroles.Columns
} }
@ -511,7 +509,7 @@ func (arq *AuthRolesQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = arq.sql selector = arq.sql
selector.Select(selector.Columns(columns...)...) selector.Select(selector.Columns(columns...)...)
} }
if arq.unique != nil && *arq.unique { if arq.ctx.Unique != nil && *arq.ctx.Unique {
selector.Distinct() selector.Distinct()
} }
for _, p := range arq.predicates { for _, p := range arq.predicates {
@ -520,12 +518,12 @@ func (arq *AuthRolesQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range arq.order { for _, p := range arq.order {
p(selector) p(selector)
} }
if offset := arq.offset; offset != nil { if offset := arq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start // limit is mandatory for offset clause. We start
// with default value, and override it below if needed. // with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32) selector.Offset(*offset).Limit(math.MaxInt32)
} }
if limit := arq.limit; limit != nil { if limit := arq.ctx.Limit; limit != nil {
selector.Limit(*limit) selector.Limit(*limit)
} }
return selector return selector
@ -545,7 +543,7 @@ func (argb *AuthRolesGroupBy) Aggregate(fns ...AggregateFunc) *AuthRolesGroupBy
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (argb *AuthRolesGroupBy) Scan(ctx context.Context, v any) error { func (argb *AuthRolesGroupBy) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeAuthRoles, "GroupBy") ctx = setContextOp(ctx, argb.build.ctx, "GroupBy")
if err := argb.build.prepareQuery(ctx); err != nil { if err := argb.build.prepareQuery(ctx); err != nil {
return err return err
} }
@ -593,7 +591,7 @@ func (ars *AuthRolesSelect) Aggregate(fns ...AggregateFunc) *AuthRolesSelect {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (ars *AuthRolesSelect) Scan(ctx context.Context, v any) error { func (ars *AuthRolesSelect) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeAuthRoles, "Select") ctx = setContextOp(ctx, ars.ctx, "Select")
if err := ars.prepareQuery(ctx); err != nil { if err := ars.prepareQuery(ctx); err != nil {
return err return err
} }

View file

@ -142,19 +142,19 @@ func (at *AuthTokens) assignValues(columns []string, values []any) error {
// QueryUser queries the "user" edge of the AuthTokens entity. // QueryUser queries the "user" edge of the AuthTokens entity.
func (at *AuthTokens) QueryUser() *UserQuery { func (at *AuthTokens) QueryUser() *UserQuery {
return (&AuthTokensClient{config: at.config}).QueryUser(at) return NewAuthTokensClient(at.config).QueryUser(at)
} }
// QueryRoles queries the "roles" edge of the AuthTokens entity. // QueryRoles queries the "roles" edge of the AuthTokens entity.
func (at *AuthTokens) QueryRoles() *AuthRolesQuery { func (at *AuthTokens) QueryRoles() *AuthRolesQuery {
return (&AuthTokensClient{config: at.config}).QueryRoles(at) return NewAuthTokensClient(at.config).QueryRoles(at)
} }
// Update returns a builder for updating this AuthTokens. // Update returns a builder for updating this AuthTokens.
// Note that you need to call AuthTokens.Unwrap() before calling this method if this AuthTokens // Note that you need to call AuthTokens.Unwrap() before calling this method if this AuthTokens
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (at *AuthTokens) Update() *AuthTokensUpdateOne { func (at *AuthTokens) Update() *AuthTokensUpdateOne {
return (&AuthTokensClient{config: at.config}).UpdateOne(at) return NewAuthTokensClient(at.config).UpdateOne(at)
} }
// Unwrap unwraps the AuthTokens entity that was returned from a transaction after it was closed, // Unwrap unwraps the AuthTokens entity that was returned from a transaction after it was closed,

View file

@ -69,6 +69,12 @@ type AuthTokensDeleteOne struct {
atd *AuthTokensDelete atd *AuthTokensDelete
} }
// Where appends a list predicates to the AuthTokensDelete builder.
func (atdo *AuthTokensDeleteOne) Where(ps ...predicate.AuthTokens) *AuthTokensDeleteOne {
atdo.atd.mutation.Where(ps...)
return atdo
}
// Exec executes the deletion query. // Exec executes the deletion query.
func (atdo *AuthTokensDeleteOne) Exec(ctx context.Context) error { func (atdo *AuthTokensDeleteOne) Exec(ctx context.Context) error {
n, err := atdo.atd.Exec(ctx) n, err := atdo.atd.Exec(ctx)
@ -84,5 +90,7 @@ func (atdo *AuthTokensDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (atdo *AuthTokensDeleteOne) ExecX(ctx context.Context) { func (atdo *AuthTokensDeleteOne) ExecX(ctx context.Context) {
atdo.atd.ExecX(ctx) if err := atdo.Exec(ctx); err != nil {
panic(err)
}
} }

View file

@ -21,11 +21,8 @@ import (
// AuthTokensQuery is the builder for querying AuthTokens entities. // AuthTokensQuery is the builder for querying AuthTokens entities.
type AuthTokensQuery struct { type AuthTokensQuery struct {
config config
limit *int ctx *QueryContext
offset *int
unique *bool
order []OrderFunc order []OrderFunc
fields []string
inters []Interceptor inters []Interceptor
predicates []predicate.AuthTokens predicates []predicate.AuthTokens
withUser *UserQuery withUser *UserQuery
@ -44,20 +41,20 @@ func (atq *AuthTokensQuery) Where(ps ...predicate.AuthTokens) *AuthTokensQuery {
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (atq *AuthTokensQuery) Limit(limit int) *AuthTokensQuery { func (atq *AuthTokensQuery) Limit(limit int) *AuthTokensQuery {
atq.limit = &limit atq.ctx.Limit = &limit
return atq return atq
} }
// Offset to start from. // Offset to start from.
func (atq *AuthTokensQuery) Offset(offset int) *AuthTokensQuery { func (atq *AuthTokensQuery) Offset(offset int) *AuthTokensQuery {
atq.offset = &offset atq.ctx.Offset = &offset
return atq return atq
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (atq *AuthTokensQuery) Unique(unique bool) *AuthTokensQuery { func (atq *AuthTokensQuery) Unique(unique bool) *AuthTokensQuery {
atq.unique = &unique atq.ctx.Unique = &unique
return atq return atq
} }
@ -114,7 +111,7 @@ func (atq *AuthTokensQuery) QueryRoles() *AuthRolesQuery {
// First returns the first AuthTokens entity from the query. // First returns the first AuthTokens entity from the query.
// Returns a *NotFoundError when no AuthTokens was found. // Returns a *NotFoundError when no AuthTokens was found.
func (atq *AuthTokensQuery) First(ctx context.Context) (*AuthTokens, error) { func (atq *AuthTokensQuery) First(ctx context.Context) (*AuthTokens, error) {
nodes, err := atq.Limit(1).All(newQueryContext(ctx, TypeAuthTokens, "First")) nodes, err := atq.Limit(1).All(setContextOp(ctx, atq.ctx, "First"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -137,7 +134,7 @@ func (atq *AuthTokensQuery) FirstX(ctx context.Context) *AuthTokens {
// Returns a *NotFoundError when no AuthTokens ID was found. // Returns a *NotFoundError when no AuthTokens ID was found.
func (atq *AuthTokensQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { func (atq *AuthTokensQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = atq.Limit(1).IDs(newQueryContext(ctx, TypeAuthTokens, "FirstID")); err != nil { if ids, err = atq.Limit(1).IDs(setContextOp(ctx, atq.ctx, "FirstID")); err != nil {
return return
} }
if len(ids) == 0 { if len(ids) == 0 {
@ -160,7 +157,7 @@ func (atq *AuthTokensQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one AuthTokens entity is found. // Returns a *NotSingularError when more than one AuthTokens entity is found.
// Returns a *NotFoundError when no AuthTokens entities are found. // Returns a *NotFoundError when no AuthTokens entities are found.
func (atq *AuthTokensQuery) Only(ctx context.Context) (*AuthTokens, error) { func (atq *AuthTokensQuery) Only(ctx context.Context) (*AuthTokens, error) {
nodes, err := atq.Limit(2).All(newQueryContext(ctx, TypeAuthTokens, "Only")) nodes, err := atq.Limit(2).All(setContextOp(ctx, atq.ctx, "Only"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -188,7 +185,7 @@ func (atq *AuthTokensQuery) OnlyX(ctx context.Context) *AuthTokens {
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (atq *AuthTokensQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { func (atq *AuthTokensQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = atq.Limit(2).IDs(newQueryContext(ctx, TypeAuthTokens, "OnlyID")); err != nil { if ids, err = atq.Limit(2).IDs(setContextOp(ctx, atq.ctx, "OnlyID")); err != nil {
return return
} }
switch len(ids) { switch len(ids) {
@ -213,7 +210,7 @@ func (atq *AuthTokensQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of AuthTokensSlice. // All executes the query and returns a list of AuthTokensSlice.
func (atq *AuthTokensQuery) All(ctx context.Context) ([]*AuthTokens, error) { func (atq *AuthTokensQuery) All(ctx context.Context) ([]*AuthTokens, error) {
ctx = newQueryContext(ctx, TypeAuthTokens, "All") ctx = setContextOp(ctx, atq.ctx, "All")
if err := atq.prepareQuery(ctx); err != nil { if err := atq.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
@ -233,7 +230,7 @@ func (atq *AuthTokensQuery) AllX(ctx context.Context) []*AuthTokens {
// IDs executes the query and returns a list of AuthTokens IDs. // IDs executes the query and returns a list of AuthTokens IDs.
func (atq *AuthTokensQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { func (atq *AuthTokensQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
var ids []uuid.UUID var ids []uuid.UUID
ctx = newQueryContext(ctx, TypeAuthTokens, "IDs") ctx = setContextOp(ctx, atq.ctx, "IDs")
if err := atq.Select(authtokens.FieldID).Scan(ctx, &ids); err != nil { if err := atq.Select(authtokens.FieldID).Scan(ctx, &ids); err != nil {
return nil, err return nil, err
} }
@ -251,7 +248,7 @@ func (atq *AuthTokensQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query. // Count returns the count of the given query.
func (atq *AuthTokensQuery) Count(ctx context.Context) (int, error) { func (atq *AuthTokensQuery) Count(ctx context.Context) (int, error) {
ctx = newQueryContext(ctx, TypeAuthTokens, "Count") ctx = setContextOp(ctx, atq.ctx, "Count")
if err := atq.prepareQuery(ctx); err != nil { if err := atq.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
@ -269,7 +266,7 @@ func (atq *AuthTokensQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (atq *AuthTokensQuery) Exist(ctx context.Context) (bool, error) { func (atq *AuthTokensQuery) Exist(ctx context.Context) (bool, error) {
ctx = newQueryContext(ctx, TypeAuthTokens, "Exist") ctx = setContextOp(ctx, atq.ctx, "Exist")
switch _, err := atq.FirstID(ctx); { switch _, err := atq.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
return false, nil return false, nil
@ -297,8 +294,7 @@ func (atq *AuthTokensQuery) Clone() *AuthTokensQuery {
} }
return &AuthTokensQuery{ return &AuthTokensQuery{
config: atq.config, config: atq.config,
limit: atq.limit, ctx: atq.ctx.Clone(),
offset: atq.offset,
order: append([]OrderFunc{}, atq.order...), order: append([]OrderFunc{}, atq.order...),
inters: append([]Interceptor{}, atq.inters...), inters: append([]Interceptor{}, atq.inters...),
predicates: append([]predicate.AuthTokens{}, atq.predicates...), predicates: append([]predicate.AuthTokens{}, atq.predicates...),
@ -307,7 +303,6 @@ func (atq *AuthTokensQuery) Clone() *AuthTokensQuery {
// clone intermediate query. // clone intermediate query.
sql: atq.sql.Clone(), sql: atq.sql.Clone(),
path: atq.path, path: atq.path,
unique: atq.unique,
} }
} }
@ -348,9 +343,9 @@ func (atq *AuthTokensQuery) WithRoles(opts ...func(*AuthRolesQuery)) *AuthTokens
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (atq *AuthTokensQuery) GroupBy(field string, fields ...string) *AuthTokensGroupBy { func (atq *AuthTokensQuery) GroupBy(field string, fields ...string) *AuthTokensGroupBy {
atq.fields = append([]string{field}, fields...) atq.ctx.Fields = append([]string{field}, fields...)
grbuild := &AuthTokensGroupBy{build: atq} grbuild := &AuthTokensGroupBy{build: atq}
grbuild.flds = &atq.fields grbuild.flds = &atq.ctx.Fields
grbuild.label = authtokens.Label grbuild.label = authtokens.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
return grbuild return grbuild
@ -369,10 +364,10 @@ func (atq *AuthTokensQuery) GroupBy(field string, fields ...string) *AuthTokensG
// Select(authtokens.FieldCreatedAt). // Select(authtokens.FieldCreatedAt).
// Scan(ctx, &v) // Scan(ctx, &v)
func (atq *AuthTokensQuery) Select(fields ...string) *AuthTokensSelect { func (atq *AuthTokensQuery) Select(fields ...string) *AuthTokensSelect {
atq.fields = append(atq.fields, fields...) atq.ctx.Fields = append(atq.ctx.Fields, fields...)
sbuild := &AuthTokensSelect{AuthTokensQuery: atq} sbuild := &AuthTokensSelect{AuthTokensQuery: atq}
sbuild.label = authtokens.Label sbuild.label = authtokens.Label
sbuild.flds, sbuild.scan = &atq.fields, sbuild.Scan sbuild.flds, sbuild.scan = &atq.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
@ -392,7 +387,7 @@ func (atq *AuthTokensQuery) prepareQuery(ctx context.Context) error {
} }
} }
} }
for _, f := range atq.fields { for _, f := range atq.ctx.Fields {
if !authtokens.ValidColumn(f) { if !authtokens.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
} }
@ -469,6 +464,9 @@ func (atq *AuthTokensQuery) loadUser(ctx context.Context, query *UserQuery, node
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(user.IDIn(ids...)) query.Where(user.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -516,9 +514,9 @@ func (atq *AuthTokensQuery) loadRoles(ctx context.Context, query *AuthRolesQuery
func (atq *AuthTokensQuery) sqlCount(ctx context.Context) (int, error) { func (atq *AuthTokensQuery) sqlCount(ctx context.Context) (int, error) {
_spec := atq.querySpec() _spec := atq.querySpec()
_spec.Node.Columns = atq.fields _spec.Node.Columns = atq.ctx.Fields
if len(atq.fields) > 0 { if len(atq.ctx.Fields) > 0 {
_spec.Unique = atq.unique != nil && *atq.unique _spec.Unique = atq.ctx.Unique != nil && *atq.ctx.Unique
} }
return sqlgraph.CountNodes(ctx, atq.driver, _spec) return sqlgraph.CountNodes(ctx, atq.driver, _spec)
} }
@ -536,10 +534,10 @@ func (atq *AuthTokensQuery) querySpec() *sqlgraph.QuerySpec {
From: atq.sql, From: atq.sql,
Unique: true, Unique: true,
} }
if unique := atq.unique; unique != nil { if unique := atq.ctx.Unique; unique != nil {
_spec.Unique = *unique _spec.Unique = *unique
} }
if fields := atq.fields; len(fields) > 0 { if fields := atq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, authtokens.FieldID) _spec.Node.Columns = append(_spec.Node.Columns, authtokens.FieldID)
for i := range fields { for i := range fields {
@ -555,10 +553,10 @@ func (atq *AuthTokensQuery) querySpec() *sqlgraph.QuerySpec {
} }
} }
} }
if limit := atq.limit; limit != nil { if limit := atq.ctx.Limit; limit != nil {
_spec.Limit = *limit _spec.Limit = *limit
} }
if offset := atq.offset; offset != nil { if offset := atq.ctx.Offset; offset != nil {
_spec.Offset = *offset _spec.Offset = *offset
} }
if ps := atq.order; len(ps) > 0 { if ps := atq.order; len(ps) > 0 {
@ -574,7 +572,7 @@ func (atq *AuthTokensQuery) querySpec() *sqlgraph.QuerySpec {
func (atq *AuthTokensQuery) sqlQuery(ctx context.Context) *sql.Selector { func (atq *AuthTokensQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(atq.driver.Dialect()) builder := sql.Dialect(atq.driver.Dialect())
t1 := builder.Table(authtokens.Table) t1 := builder.Table(authtokens.Table)
columns := atq.fields columns := atq.ctx.Fields
if len(columns) == 0 { if len(columns) == 0 {
columns = authtokens.Columns columns = authtokens.Columns
} }
@ -583,7 +581,7 @@ func (atq *AuthTokensQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = atq.sql selector = atq.sql
selector.Select(selector.Columns(columns...)...) selector.Select(selector.Columns(columns...)...)
} }
if atq.unique != nil && *atq.unique { if atq.ctx.Unique != nil && *atq.ctx.Unique {
selector.Distinct() selector.Distinct()
} }
for _, p := range atq.predicates { for _, p := range atq.predicates {
@ -592,12 +590,12 @@ func (atq *AuthTokensQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range atq.order { for _, p := range atq.order {
p(selector) p(selector)
} }
if offset := atq.offset; offset != nil { if offset := atq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start // limit is mandatory for offset clause. We start
// with default value, and override it below if needed. // with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32) selector.Offset(*offset).Limit(math.MaxInt32)
} }
if limit := atq.limit; limit != nil { if limit := atq.ctx.Limit; limit != nil {
selector.Limit(*limit) selector.Limit(*limit)
} }
return selector return selector
@ -617,7 +615,7 @@ func (atgb *AuthTokensGroupBy) Aggregate(fns ...AggregateFunc) *AuthTokensGroupB
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (atgb *AuthTokensGroupBy) Scan(ctx context.Context, v any) error { func (atgb *AuthTokensGroupBy) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeAuthTokens, "GroupBy") ctx = setContextOp(ctx, atgb.build.ctx, "GroupBy")
if err := atgb.build.prepareQuery(ctx); err != nil { if err := atgb.build.prepareQuery(ctx); err != nil {
return err return err
} }
@ -665,7 +663,7 @@ func (ats *AuthTokensSelect) Aggregate(fns ...AggregateFunc) *AuthTokensSelect {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (ats *AuthTokensSelect) Scan(ctx context.Context, v any) error { func (ats *AuthTokensSelect) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeAuthTokens, "Select") ctx = setContextOp(ctx, ats.ctx, "Select")
if err := ats.prepareQuery(ctx); err != nil { if err := ats.prepareQuery(ctx); err != nil {
return err return err
} }

View file

@ -324,6 +324,7 @@ func (c *AttachmentClient) DeleteOneID(id uuid.UUID) *AttachmentDeleteOne {
func (c *AttachmentClient) Query() *AttachmentQuery { func (c *AttachmentClient) Query() *AttachmentQuery {
return &AttachmentQuery{ return &AttachmentQuery{
config: c.config, config: c.config,
ctx: &QueryContext{Type: TypeAttachment},
inters: c.Interceptors(), inters: c.Interceptors(),
} }
} }
@ -473,6 +474,7 @@ func (c *AuthRolesClient) DeleteOneID(id int) *AuthRolesDeleteOne {
func (c *AuthRolesClient) Query() *AuthRolesQuery { func (c *AuthRolesClient) Query() *AuthRolesQuery {
return &AuthRolesQuery{ return &AuthRolesQuery{
config: c.config, config: c.config,
ctx: &QueryContext{Type: TypeAuthRoles},
inters: c.Interceptors(), inters: c.Interceptors(),
} }
} }
@ -606,6 +608,7 @@ func (c *AuthTokensClient) DeleteOneID(id uuid.UUID) *AuthTokensDeleteOne {
func (c *AuthTokensClient) Query() *AuthTokensQuery { func (c *AuthTokensClient) Query() *AuthTokensQuery {
return &AuthTokensQuery{ return &AuthTokensQuery{
config: c.config, config: c.config,
ctx: &QueryContext{Type: TypeAuthTokens},
inters: c.Interceptors(), inters: c.Interceptors(),
} }
} }
@ -755,6 +758,7 @@ func (c *DocumentClient) DeleteOneID(id uuid.UUID) *DocumentDeleteOne {
func (c *DocumentClient) Query() *DocumentQuery { func (c *DocumentClient) Query() *DocumentQuery {
return &DocumentQuery{ return &DocumentQuery{
config: c.config, config: c.config,
ctx: &QueryContext{Type: TypeDocument},
inters: c.Interceptors(), inters: c.Interceptors(),
} }
} }
@ -904,6 +908,7 @@ func (c *GroupClient) DeleteOneID(id uuid.UUID) *GroupDeleteOne {
func (c *GroupClient) Query() *GroupQuery { func (c *GroupClient) Query() *GroupQuery {
return &GroupQuery{ return &GroupQuery{
config: c.config, config: c.config,
ctx: &QueryContext{Type: TypeGroup},
inters: c.Interceptors(), inters: c.Interceptors(),
} }
} }
@ -1117,6 +1122,7 @@ func (c *GroupInvitationTokenClient) DeleteOneID(id uuid.UUID) *GroupInvitationT
func (c *GroupInvitationTokenClient) Query() *GroupInvitationTokenQuery { func (c *GroupInvitationTokenClient) Query() *GroupInvitationTokenQuery {
return &GroupInvitationTokenQuery{ return &GroupInvitationTokenQuery{
config: c.config, config: c.config,
ctx: &QueryContext{Type: TypeGroupInvitationToken},
inters: c.Interceptors(), inters: c.Interceptors(),
} }
} }
@ -1250,6 +1256,7 @@ func (c *ItemClient) DeleteOneID(id uuid.UUID) *ItemDeleteOne {
func (c *ItemClient) Query() *ItemQuery { func (c *ItemClient) Query() *ItemQuery {
return &ItemQuery{ return &ItemQuery{
config: c.config, config: c.config,
ctx: &QueryContext{Type: TypeItem},
inters: c.Interceptors(), inters: c.Interceptors(),
} }
} }
@ -1495,6 +1502,7 @@ func (c *ItemFieldClient) DeleteOneID(id uuid.UUID) *ItemFieldDeleteOne {
func (c *ItemFieldClient) Query() *ItemFieldQuery { func (c *ItemFieldClient) Query() *ItemFieldQuery {
return &ItemFieldQuery{ return &ItemFieldQuery{
config: c.config, config: c.config,
ctx: &QueryContext{Type: TypeItemField},
inters: c.Interceptors(), inters: c.Interceptors(),
} }
} }
@ -1628,6 +1636,7 @@ func (c *LabelClient) DeleteOneID(id uuid.UUID) *LabelDeleteOne {
func (c *LabelClient) Query() *LabelQuery { func (c *LabelClient) Query() *LabelQuery {
return &LabelQuery{ return &LabelQuery{
config: c.config, config: c.config,
ctx: &QueryContext{Type: TypeLabel},
inters: c.Interceptors(), inters: c.Interceptors(),
} }
} }
@ -1777,6 +1786,7 @@ func (c *LocationClient) DeleteOneID(id uuid.UUID) *LocationDeleteOne {
func (c *LocationClient) Query() *LocationQuery { func (c *LocationClient) Query() *LocationQuery {
return &LocationQuery{ return &LocationQuery{
config: c.config, config: c.config,
ctx: &QueryContext{Type: TypeLocation},
inters: c.Interceptors(), inters: c.Interceptors(),
} }
} }
@ -1958,6 +1968,7 @@ func (c *MaintenanceEntryClient) DeleteOneID(id uuid.UUID) *MaintenanceEntryDele
func (c *MaintenanceEntryClient) Query() *MaintenanceEntryQuery { func (c *MaintenanceEntryClient) Query() *MaintenanceEntryQuery {
return &MaintenanceEntryQuery{ return &MaintenanceEntryQuery{
config: c.config, config: c.config,
ctx: &QueryContext{Type: TypeMaintenanceEntry},
inters: c.Interceptors(), inters: c.Interceptors(),
} }
} }
@ -2091,6 +2102,7 @@ func (c *UserClient) DeleteOneID(id uuid.UUID) *UserDeleteOne {
func (c *UserClient) Query() *UserQuery { func (c *UserClient) Query() *UserQuery {
return &UserQuery{ return &UserQuery{
config: c.config, config: c.config,
ctx: &QueryContext{Type: TypeUser},
inters: c.Interceptors(), inters: c.Interceptors(),
} }
} }

View file

@ -137,19 +137,19 @@ func (d *Document) assignValues(columns []string, values []any) error {
// QueryGroup queries the "group" edge of the Document entity. // QueryGroup queries the "group" edge of the Document entity.
func (d *Document) QueryGroup() *GroupQuery { func (d *Document) QueryGroup() *GroupQuery {
return (&DocumentClient{config: d.config}).QueryGroup(d) return NewDocumentClient(d.config).QueryGroup(d)
} }
// QueryAttachments queries the "attachments" edge of the Document entity. // QueryAttachments queries the "attachments" edge of the Document entity.
func (d *Document) QueryAttachments() *AttachmentQuery { func (d *Document) QueryAttachments() *AttachmentQuery {
return (&DocumentClient{config: d.config}).QueryAttachments(d) return NewDocumentClient(d.config).QueryAttachments(d)
} }
// Update returns a builder for updating this Document. // Update returns a builder for updating this Document.
// Note that you need to call Document.Unwrap() before calling this method if this Document // Note that you need to call Document.Unwrap() before calling this method if this Document
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (d *Document) Update() *DocumentUpdateOne { func (d *Document) Update() *DocumentUpdateOne {
return (&DocumentClient{config: d.config}).UpdateOne(d) return NewDocumentClient(d.config).UpdateOne(d)
} }
// Unwrap unwraps the Document entity that was returned from a transaction after it was closed, // Unwrap unwraps the Document entity that was returned from a transaction after it was closed,

View file

@ -69,6 +69,12 @@ type DocumentDeleteOne struct {
dd *DocumentDelete dd *DocumentDelete
} }
// Where appends a list predicates to the DocumentDelete builder.
func (ddo *DocumentDeleteOne) Where(ps ...predicate.Document) *DocumentDeleteOne {
ddo.dd.mutation.Where(ps...)
return ddo
}
// Exec executes the deletion query. // Exec executes the deletion query.
func (ddo *DocumentDeleteOne) Exec(ctx context.Context) error { func (ddo *DocumentDeleteOne) Exec(ctx context.Context) error {
n, err := ddo.dd.Exec(ctx) n, err := ddo.dd.Exec(ctx)
@ -84,5 +90,7 @@ func (ddo *DocumentDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (ddo *DocumentDeleteOne) ExecX(ctx context.Context) { func (ddo *DocumentDeleteOne) ExecX(ctx context.Context) {
ddo.dd.ExecX(ctx) if err := ddo.Exec(ctx); err != nil {
panic(err)
}
} }

View file

@ -21,11 +21,8 @@ import (
// DocumentQuery is the builder for querying Document entities. // DocumentQuery is the builder for querying Document entities.
type DocumentQuery struct { type DocumentQuery struct {
config config
limit *int ctx *QueryContext
offset *int
unique *bool
order []OrderFunc order []OrderFunc
fields []string
inters []Interceptor inters []Interceptor
predicates []predicate.Document predicates []predicate.Document
withGroup *GroupQuery withGroup *GroupQuery
@ -44,20 +41,20 @@ func (dq *DocumentQuery) Where(ps ...predicate.Document) *DocumentQuery {
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (dq *DocumentQuery) Limit(limit int) *DocumentQuery { func (dq *DocumentQuery) Limit(limit int) *DocumentQuery {
dq.limit = &limit dq.ctx.Limit = &limit
return dq return dq
} }
// Offset to start from. // Offset to start from.
func (dq *DocumentQuery) Offset(offset int) *DocumentQuery { func (dq *DocumentQuery) Offset(offset int) *DocumentQuery {
dq.offset = &offset dq.ctx.Offset = &offset
return dq return dq
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (dq *DocumentQuery) Unique(unique bool) *DocumentQuery { func (dq *DocumentQuery) Unique(unique bool) *DocumentQuery {
dq.unique = &unique dq.ctx.Unique = &unique
return dq return dq
} }
@ -114,7 +111,7 @@ func (dq *DocumentQuery) QueryAttachments() *AttachmentQuery {
// First returns the first Document entity from the query. // First returns the first Document entity from the query.
// Returns a *NotFoundError when no Document was found. // Returns a *NotFoundError when no Document was found.
func (dq *DocumentQuery) First(ctx context.Context) (*Document, error) { func (dq *DocumentQuery) First(ctx context.Context) (*Document, error) {
nodes, err := dq.Limit(1).All(newQueryContext(ctx, TypeDocument, "First")) nodes, err := dq.Limit(1).All(setContextOp(ctx, dq.ctx, "First"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -137,7 +134,7 @@ func (dq *DocumentQuery) FirstX(ctx context.Context) *Document {
// Returns a *NotFoundError when no Document ID was found. // Returns a *NotFoundError when no Document ID was found.
func (dq *DocumentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { func (dq *DocumentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = dq.Limit(1).IDs(newQueryContext(ctx, TypeDocument, "FirstID")); err != nil { if ids, err = dq.Limit(1).IDs(setContextOp(ctx, dq.ctx, "FirstID")); err != nil {
return return
} }
if len(ids) == 0 { if len(ids) == 0 {
@ -160,7 +157,7 @@ func (dq *DocumentQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one Document entity is found. // Returns a *NotSingularError when more than one Document entity is found.
// Returns a *NotFoundError when no Document entities are found. // Returns a *NotFoundError when no Document entities are found.
func (dq *DocumentQuery) Only(ctx context.Context) (*Document, error) { func (dq *DocumentQuery) Only(ctx context.Context) (*Document, error) {
nodes, err := dq.Limit(2).All(newQueryContext(ctx, TypeDocument, "Only")) nodes, err := dq.Limit(2).All(setContextOp(ctx, dq.ctx, "Only"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -188,7 +185,7 @@ func (dq *DocumentQuery) OnlyX(ctx context.Context) *Document {
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (dq *DocumentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { func (dq *DocumentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = dq.Limit(2).IDs(newQueryContext(ctx, TypeDocument, "OnlyID")); err != nil { if ids, err = dq.Limit(2).IDs(setContextOp(ctx, dq.ctx, "OnlyID")); err != nil {
return return
} }
switch len(ids) { switch len(ids) {
@ -213,7 +210,7 @@ func (dq *DocumentQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Documents. // All executes the query and returns a list of Documents.
func (dq *DocumentQuery) All(ctx context.Context) ([]*Document, error) { func (dq *DocumentQuery) All(ctx context.Context) ([]*Document, error) {
ctx = newQueryContext(ctx, TypeDocument, "All") ctx = setContextOp(ctx, dq.ctx, "All")
if err := dq.prepareQuery(ctx); err != nil { if err := dq.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
@ -233,7 +230,7 @@ func (dq *DocumentQuery) AllX(ctx context.Context) []*Document {
// IDs executes the query and returns a list of Document IDs. // IDs executes the query and returns a list of Document IDs.
func (dq *DocumentQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { func (dq *DocumentQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
var ids []uuid.UUID var ids []uuid.UUID
ctx = newQueryContext(ctx, TypeDocument, "IDs") ctx = setContextOp(ctx, dq.ctx, "IDs")
if err := dq.Select(document.FieldID).Scan(ctx, &ids); err != nil { if err := dq.Select(document.FieldID).Scan(ctx, &ids); err != nil {
return nil, err return nil, err
} }
@ -251,7 +248,7 @@ func (dq *DocumentQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query. // Count returns the count of the given query.
func (dq *DocumentQuery) Count(ctx context.Context) (int, error) { func (dq *DocumentQuery) Count(ctx context.Context) (int, error) {
ctx = newQueryContext(ctx, TypeDocument, "Count") ctx = setContextOp(ctx, dq.ctx, "Count")
if err := dq.prepareQuery(ctx); err != nil { if err := dq.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
@ -269,7 +266,7 @@ func (dq *DocumentQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (dq *DocumentQuery) Exist(ctx context.Context) (bool, error) { func (dq *DocumentQuery) Exist(ctx context.Context) (bool, error) {
ctx = newQueryContext(ctx, TypeDocument, "Exist") ctx = setContextOp(ctx, dq.ctx, "Exist")
switch _, err := dq.FirstID(ctx); { switch _, err := dq.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
return false, nil return false, nil
@ -297,8 +294,7 @@ func (dq *DocumentQuery) Clone() *DocumentQuery {
} }
return &DocumentQuery{ return &DocumentQuery{
config: dq.config, config: dq.config,
limit: dq.limit, ctx: dq.ctx.Clone(),
offset: dq.offset,
order: append([]OrderFunc{}, dq.order...), order: append([]OrderFunc{}, dq.order...),
inters: append([]Interceptor{}, dq.inters...), inters: append([]Interceptor{}, dq.inters...),
predicates: append([]predicate.Document{}, dq.predicates...), predicates: append([]predicate.Document{}, dq.predicates...),
@ -307,7 +303,6 @@ func (dq *DocumentQuery) Clone() *DocumentQuery {
// clone intermediate query. // clone intermediate query.
sql: dq.sql.Clone(), sql: dq.sql.Clone(),
path: dq.path, path: dq.path,
unique: dq.unique,
} }
} }
@ -348,9 +343,9 @@ func (dq *DocumentQuery) WithAttachments(opts ...func(*AttachmentQuery)) *Docume
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (dq *DocumentQuery) GroupBy(field string, fields ...string) *DocumentGroupBy { func (dq *DocumentQuery) GroupBy(field string, fields ...string) *DocumentGroupBy {
dq.fields = append([]string{field}, fields...) dq.ctx.Fields = append([]string{field}, fields...)
grbuild := &DocumentGroupBy{build: dq} grbuild := &DocumentGroupBy{build: dq}
grbuild.flds = &dq.fields grbuild.flds = &dq.ctx.Fields
grbuild.label = document.Label grbuild.label = document.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
return grbuild return grbuild
@ -369,10 +364,10 @@ func (dq *DocumentQuery) GroupBy(field string, fields ...string) *DocumentGroupB
// Select(document.FieldCreatedAt). // Select(document.FieldCreatedAt).
// Scan(ctx, &v) // Scan(ctx, &v)
func (dq *DocumentQuery) Select(fields ...string) *DocumentSelect { func (dq *DocumentQuery) Select(fields ...string) *DocumentSelect {
dq.fields = append(dq.fields, fields...) dq.ctx.Fields = append(dq.ctx.Fields, fields...)
sbuild := &DocumentSelect{DocumentQuery: dq} sbuild := &DocumentSelect{DocumentQuery: dq}
sbuild.label = document.Label sbuild.label = document.Label
sbuild.flds, sbuild.scan = &dq.fields, sbuild.Scan sbuild.flds, sbuild.scan = &dq.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
@ -392,7 +387,7 @@ func (dq *DocumentQuery) prepareQuery(ctx context.Context) error {
} }
} }
} }
for _, f := range dq.fields { for _, f := range dq.ctx.Fields {
if !document.ValidColumn(f) { if !document.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
} }
@ -470,6 +465,9 @@ func (dq *DocumentQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(group.IDIn(ids...)) query.Where(group.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -520,9 +518,9 @@ func (dq *DocumentQuery) loadAttachments(ctx context.Context, query *AttachmentQ
func (dq *DocumentQuery) sqlCount(ctx context.Context) (int, error) { func (dq *DocumentQuery) sqlCount(ctx context.Context) (int, error) {
_spec := dq.querySpec() _spec := dq.querySpec()
_spec.Node.Columns = dq.fields _spec.Node.Columns = dq.ctx.Fields
if len(dq.fields) > 0 { if len(dq.ctx.Fields) > 0 {
_spec.Unique = dq.unique != nil && *dq.unique _spec.Unique = dq.ctx.Unique != nil && *dq.ctx.Unique
} }
return sqlgraph.CountNodes(ctx, dq.driver, _spec) return sqlgraph.CountNodes(ctx, dq.driver, _spec)
} }
@ -540,10 +538,10 @@ func (dq *DocumentQuery) querySpec() *sqlgraph.QuerySpec {
From: dq.sql, From: dq.sql,
Unique: true, Unique: true,
} }
if unique := dq.unique; unique != nil { if unique := dq.ctx.Unique; unique != nil {
_spec.Unique = *unique _spec.Unique = *unique
} }
if fields := dq.fields; len(fields) > 0 { if fields := dq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, document.FieldID) _spec.Node.Columns = append(_spec.Node.Columns, document.FieldID)
for i := range fields { for i := range fields {
@ -559,10 +557,10 @@ func (dq *DocumentQuery) querySpec() *sqlgraph.QuerySpec {
} }
} }
} }
if limit := dq.limit; limit != nil { if limit := dq.ctx.Limit; limit != nil {
_spec.Limit = *limit _spec.Limit = *limit
} }
if offset := dq.offset; offset != nil { if offset := dq.ctx.Offset; offset != nil {
_spec.Offset = *offset _spec.Offset = *offset
} }
if ps := dq.order; len(ps) > 0 { if ps := dq.order; len(ps) > 0 {
@ -578,7 +576,7 @@ func (dq *DocumentQuery) querySpec() *sqlgraph.QuerySpec {
func (dq *DocumentQuery) sqlQuery(ctx context.Context) *sql.Selector { func (dq *DocumentQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(dq.driver.Dialect()) builder := sql.Dialect(dq.driver.Dialect())
t1 := builder.Table(document.Table) t1 := builder.Table(document.Table)
columns := dq.fields columns := dq.ctx.Fields
if len(columns) == 0 { if len(columns) == 0 {
columns = document.Columns columns = document.Columns
} }
@ -587,7 +585,7 @@ func (dq *DocumentQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = dq.sql selector = dq.sql
selector.Select(selector.Columns(columns...)...) selector.Select(selector.Columns(columns...)...)
} }
if dq.unique != nil && *dq.unique { if dq.ctx.Unique != nil && *dq.ctx.Unique {
selector.Distinct() selector.Distinct()
} }
for _, p := range dq.predicates { for _, p := range dq.predicates {
@ -596,12 +594,12 @@ func (dq *DocumentQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range dq.order { for _, p := range dq.order {
p(selector) p(selector)
} }
if offset := dq.offset; offset != nil { if offset := dq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start // limit is mandatory for offset clause. We start
// with default value, and override it below if needed. // with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32) selector.Offset(*offset).Limit(math.MaxInt32)
} }
if limit := dq.limit; limit != nil { if limit := dq.ctx.Limit; limit != nil {
selector.Limit(*limit) selector.Limit(*limit)
} }
return selector return selector
@ -621,7 +619,7 @@ func (dgb *DocumentGroupBy) Aggregate(fns ...AggregateFunc) *DocumentGroupBy {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (dgb *DocumentGroupBy) Scan(ctx context.Context, v any) error { func (dgb *DocumentGroupBy) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeDocument, "GroupBy") ctx = setContextOp(ctx, dgb.build.ctx, "GroupBy")
if err := dgb.build.prepareQuery(ctx); err != nil { if err := dgb.build.prepareQuery(ctx); err != nil {
return err return err
} }
@ -669,7 +667,7 @@ func (ds *DocumentSelect) Aggregate(fns ...AggregateFunc) *DocumentSelect {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (ds *DocumentSelect) Scan(ctx context.Context, v any) error { func (ds *DocumentSelect) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeDocument, "Select") ctx = setContextOp(ctx, ds.ctx, "Select")
if err := ds.prepareQuery(ctx); err != nil { if err := ds.prepareQuery(ctx); err != nil {
return err return err
} }

View file

@ -31,6 +31,7 @@ type (
Hook = ent.Hook Hook = ent.Hook
Value = ent.Value Value = ent.Value
Query = ent.Query Query = ent.Query
QueryContext = ent.QueryContext
Querier = ent.Querier Querier = ent.Querier
QuerierFunc = ent.QuerierFunc QuerierFunc = ent.QuerierFunc
Interceptor = ent.Interceptor Interceptor = ent.Interceptor
@ -525,10 +526,11 @@ func withHooks[V Value, M any, PM interface {
return nv, nil return nv, nil
} }
// newQueryContext returns a new context with the given QueryContext attached in case it does not exist. // setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist.
func newQueryContext(ctx context.Context, typ, op string) context.Context { func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context {
if ent.QueryFromContext(ctx) == nil { if ent.QueryFromContext(ctx) == nil {
ctx = ent.NewQueryContext(ctx, &ent.QueryContext{Type: typ, Op: op}) qc.Op = op
ctx = ent.NewQueryContext(ctx, qc)
} }
return ctx return ctx
} }

View file

@ -166,39 +166,39 @@ func (gr *Group) assignValues(columns []string, values []any) error {
// QueryUsers queries the "users" edge of the Group entity. // QueryUsers queries the "users" edge of the Group entity.
func (gr *Group) QueryUsers() *UserQuery { func (gr *Group) QueryUsers() *UserQuery {
return (&GroupClient{config: gr.config}).QueryUsers(gr) return NewGroupClient(gr.config).QueryUsers(gr)
} }
// QueryLocations queries the "locations" edge of the Group entity. // QueryLocations queries the "locations" edge of the Group entity.
func (gr *Group) QueryLocations() *LocationQuery { func (gr *Group) QueryLocations() *LocationQuery {
return (&GroupClient{config: gr.config}).QueryLocations(gr) return NewGroupClient(gr.config).QueryLocations(gr)
} }
// QueryItems queries the "items" edge of the Group entity. // QueryItems queries the "items" edge of the Group entity.
func (gr *Group) QueryItems() *ItemQuery { func (gr *Group) QueryItems() *ItemQuery {
return (&GroupClient{config: gr.config}).QueryItems(gr) return NewGroupClient(gr.config).QueryItems(gr)
} }
// QueryLabels queries the "labels" edge of the Group entity. // QueryLabels queries the "labels" edge of the Group entity.
func (gr *Group) QueryLabels() *LabelQuery { func (gr *Group) QueryLabels() *LabelQuery {
return (&GroupClient{config: gr.config}).QueryLabels(gr) return NewGroupClient(gr.config).QueryLabels(gr)
} }
// QueryDocuments queries the "documents" edge of the Group entity. // QueryDocuments queries the "documents" edge of the Group entity.
func (gr *Group) QueryDocuments() *DocumentQuery { func (gr *Group) QueryDocuments() *DocumentQuery {
return (&GroupClient{config: gr.config}).QueryDocuments(gr) return NewGroupClient(gr.config).QueryDocuments(gr)
} }
// QueryInvitationTokens queries the "invitation_tokens" edge of the Group entity. // QueryInvitationTokens queries the "invitation_tokens" edge of the Group entity.
func (gr *Group) QueryInvitationTokens() *GroupInvitationTokenQuery { func (gr *Group) QueryInvitationTokens() *GroupInvitationTokenQuery {
return (&GroupClient{config: gr.config}).QueryInvitationTokens(gr) return NewGroupClient(gr.config).QueryInvitationTokens(gr)
} }
// Update returns a builder for updating this Group. // Update returns a builder for updating this Group.
// Note that you need to call Group.Unwrap() before calling this method if this Group // Note that you need to call Group.Unwrap() before calling this method if this Group
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (gr *Group) Update() *GroupUpdateOne { func (gr *Group) Update() *GroupUpdateOne {
return (&GroupClient{config: gr.config}).UpdateOne(gr) return NewGroupClient(gr.config).UpdateOne(gr)
} }
// Unwrap unwraps the Group entity that was returned from a transaction after it was closed, // Unwrap unwraps the Group entity that was returned from a transaction after it was closed,

View file

@ -69,6 +69,12 @@ type GroupDeleteOne struct {
gd *GroupDelete gd *GroupDelete
} }
// Where appends a list predicates to the GroupDelete builder.
func (gdo *GroupDeleteOne) Where(ps ...predicate.Group) *GroupDeleteOne {
gdo.gd.mutation.Where(ps...)
return gdo
}
// Exec executes the deletion query. // Exec executes the deletion query.
func (gdo *GroupDeleteOne) Exec(ctx context.Context) error { func (gdo *GroupDeleteOne) Exec(ctx context.Context) error {
n, err := gdo.gd.Exec(ctx) n, err := gdo.gd.Exec(ctx)
@ -84,5 +90,7 @@ func (gdo *GroupDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (gdo *GroupDeleteOne) ExecX(ctx context.Context) { func (gdo *GroupDeleteOne) ExecX(ctx context.Context) {
gdo.gd.ExecX(ctx) if err := gdo.Exec(ctx); err != nil {
panic(err)
}
} }

View file

@ -25,11 +25,8 @@ import (
// GroupQuery is the builder for querying Group entities. // GroupQuery is the builder for querying Group entities.
type GroupQuery struct { type GroupQuery struct {
config config
limit *int ctx *QueryContext
offset *int
unique *bool
order []OrderFunc order []OrderFunc
fields []string
inters []Interceptor inters []Interceptor
predicates []predicate.Group predicates []predicate.Group
withUsers *UserQuery withUsers *UserQuery
@ -51,20 +48,20 @@ func (gq *GroupQuery) Where(ps ...predicate.Group) *GroupQuery {
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (gq *GroupQuery) Limit(limit int) *GroupQuery { func (gq *GroupQuery) Limit(limit int) *GroupQuery {
gq.limit = &limit gq.ctx.Limit = &limit
return gq return gq
} }
// Offset to start from. // Offset to start from.
func (gq *GroupQuery) Offset(offset int) *GroupQuery { func (gq *GroupQuery) Offset(offset int) *GroupQuery {
gq.offset = &offset gq.ctx.Offset = &offset
return gq return gq
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (gq *GroupQuery) Unique(unique bool) *GroupQuery { func (gq *GroupQuery) Unique(unique bool) *GroupQuery {
gq.unique = &unique gq.ctx.Unique = &unique
return gq return gq
} }
@ -209,7 +206,7 @@ func (gq *GroupQuery) QueryInvitationTokens() *GroupInvitationTokenQuery {
// First returns the first Group entity from the query. // First returns the first Group entity from the query.
// Returns a *NotFoundError when no Group was found. // Returns a *NotFoundError when no Group was found.
func (gq *GroupQuery) First(ctx context.Context) (*Group, error) { func (gq *GroupQuery) First(ctx context.Context) (*Group, error) {
nodes, err := gq.Limit(1).All(newQueryContext(ctx, TypeGroup, "First")) nodes, err := gq.Limit(1).All(setContextOp(ctx, gq.ctx, "First"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -232,7 +229,7 @@ func (gq *GroupQuery) FirstX(ctx context.Context) *Group {
// Returns a *NotFoundError when no Group ID was found. // Returns a *NotFoundError when no Group ID was found.
func (gq *GroupQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { func (gq *GroupQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = gq.Limit(1).IDs(newQueryContext(ctx, TypeGroup, "FirstID")); err != nil { if ids, err = gq.Limit(1).IDs(setContextOp(ctx, gq.ctx, "FirstID")); err != nil {
return return
} }
if len(ids) == 0 { if len(ids) == 0 {
@ -255,7 +252,7 @@ func (gq *GroupQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one Group entity is found. // Returns a *NotSingularError when more than one Group entity is found.
// Returns a *NotFoundError when no Group entities are found. // Returns a *NotFoundError when no Group entities are found.
func (gq *GroupQuery) Only(ctx context.Context) (*Group, error) { func (gq *GroupQuery) Only(ctx context.Context) (*Group, error) {
nodes, err := gq.Limit(2).All(newQueryContext(ctx, TypeGroup, "Only")) nodes, err := gq.Limit(2).All(setContextOp(ctx, gq.ctx, "Only"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -283,7 +280,7 @@ func (gq *GroupQuery) OnlyX(ctx context.Context) *Group {
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (gq *GroupQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { func (gq *GroupQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = gq.Limit(2).IDs(newQueryContext(ctx, TypeGroup, "OnlyID")); err != nil { if ids, err = gq.Limit(2).IDs(setContextOp(ctx, gq.ctx, "OnlyID")); err != nil {
return return
} }
switch len(ids) { switch len(ids) {
@ -308,7 +305,7 @@ func (gq *GroupQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Groups. // All executes the query and returns a list of Groups.
func (gq *GroupQuery) All(ctx context.Context) ([]*Group, error) { func (gq *GroupQuery) All(ctx context.Context) ([]*Group, error) {
ctx = newQueryContext(ctx, TypeGroup, "All") ctx = setContextOp(ctx, gq.ctx, "All")
if err := gq.prepareQuery(ctx); err != nil { if err := gq.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
@ -328,7 +325,7 @@ func (gq *GroupQuery) AllX(ctx context.Context) []*Group {
// IDs executes the query and returns a list of Group IDs. // IDs executes the query and returns a list of Group IDs.
func (gq *GroupQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { func (gq *GroupQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
var ids []uuid.UUID var ids []uuid.UUID
ctx = newQueryContext(ctx, TypeGroup, "IDs") ctx = setContextOp(ctx, gq.ctx, "IDs")
if err := gq.Select(group.FieldID).Scan(ctx, &ids); err != nil { if err := gq.Select(group.FieldID).Scan(ctx, &ids); err != nil {
return nil, err return nil, err
} }
@ -346,7 +343,7 @@ func (gq *GroupQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query. // Count returns the count of the given query.
func (gq *GroupQuery) Count(ctx context.Context) (int, error) { func (gq *GroupQuery) Count(ctx context.Context) (int, error) {
ctx = newQueryContext(ctx, TypeGroup, "Count") ctx = setContextOp(ctx, gq.ctx, "Count")
if err := gq.prepareQuery(ctx); err != nil { if err := gq.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
@ -364,7 +361,7 @@ func (gq *GroupQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (gq *GroupQuery) Exist(ctx context.Context) (bool, error) { func (gq *GroupQuery) Exist(ctx context.Context) (bool, error) {
ctx = newQueryContext(ctx, TypeGroup, "Exist") ctx = setContextOp(ctx, gq.ctx, "Exist")
switch _, err := gq.FirstID(ctx); { switch _, err := gq.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
return false, nil return false, nil
@ -392,8 +389,7 @@ func (gq *GroupQuery) Clone() *GroupQuery {
} }
return &GroupQuery{ return &GroupQuery{
config: gq.config, config: gq.config,
limit: gq.limit, ctx: gq.ctx.Clone(),
offset: gq.offset,
order: append([]OrderFunc{}, gq.order...), order: append([]OrderFunc{}, gq.order...),
inters: append([]Interceptor{}, gq.inters...), inters: append([]Interceptor{}, gq.inters...),
predicates: append([]predicate.Group{}, gq.predicates...), predicates: append([]predicate.Group{}, gq.predicates...),
@ -406,7 +402,6 @@ func (gq *GroupQuery) Clone() *GroupQuery {
// clone intermediate query. // clone intermediate query.
sql: gq.sql.Clone(), sql: gq.sql.Clone(),
path: gq.path, path: gq.path,
unique: gq.unique,
} }
} }
@ -491,9 +486,9 @@ func (gq *GroupQuery) WithInvitationTokens(opts ...func(*GroupInvitationTokenQue
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (gq *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy { func (gq *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy {
gq.fields = append([]string{field}, fields...) gq.ctx.Fields = append([]string{field}, fields...)
grbuild := &GroupGroupBy{build: gq} grbuild := &GroupGroupBy{build: gq}
grbuild.flds = &gq.fields grbuild.flds = &gq.ctx.Fields
grbuild.label = group.Label grbuild.label = group.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
return grbuild return grbuild
@ -512,10 +507,10 @@ func (gq *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy {
// Select(group.FieldCreatedAt). // Select(group.FieldCreatedAt).
// Scan(ctx, &v) // Scan(ctx, &v)
func (gq *GroupQuery) Select(fields ...string) *GroupSelect { func (gq *GroupQuery) Select(fields ...string) *GroupSelect {
gq.fields = append(gq.fields, fields...) gq.ctx.Fields = append(gq.ctx.Fields, fields...)
sbuild := &GroupSelect{GroupQuery: gq} sbuild := &GroupSelect{GroupQuery: gq}
sbuild.label = group.Label sbuild.label = group.Label
sbuild.flds, sbuild.scan = &gq.fields, sbuild.Scan sbuild.flds, sbuild.scan = &gq.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
@ -535,7 +530,7 @@ func (gq *GroupQuery) prepareQuery(ctx context.Context) error {
} }
} }
} }
for _, f := range gq.fields { for _, f := range gq.ctx.Fields {
if !group.ValidColumn(f) { if !group.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
} }
@ -817,9 +812,9 @@ func (gq *GroupQuery) loadInvitationTokens(ctx context.Context, query *GroupInvi
func (gq *GroupQuery) sqlCount(ctx context.Context) (int, error) { func (gq *GroupQuery) sqlCount(ctx context.Context) (int, error) {
_spec := gq.querySpec() _spec := gq.querySpec()
_spec.Node.Columns = gq.fields _spec.Node.Columns = gq.ctx.Fields
if len(gq.fields) > 0 { if len(gq.ctx.Fields) > 0 {
_spec.Unique = gq.unique != nil && *gq.unique _spec.Unique = gq.ctx.Unique != nil && *gq.ctx.Unique
} }
return sqlgraph.CountNodes(ctx, gq.driver, _spec) return sqlgraph.CountNodes(ctx, gq.driver, _spec)
} }
@ -837,10 +832,10 @@ func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec {
From: gq.sql, From: gq.sql,
Unique: true, Unique: true,
} }
if unique := gq.unique; unique != nil { if unique := gq.ctx.Unique; unique != nil {
_spec.Unique = *unique _spec.Unique = *unique
} }
if fields := gq.fields; len(fields) > 0 { if fields := gq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, group.FieldID) _spec.Node.Columns = append(_spec.Node.Columns, group.FieldID)
for i := range fields { for i := range fields {
@ -856,10 +851,10 @@ func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec {
} }
} }
} }
if limit := gq.limit; limit != nil { if limit := gq.ctx.Limit; limit != nil {
_spec.Limit = *limit _spec.Limit = *limit
} }
if offset := gq.offset; offset != nil { if offset := gq.ctx.Offset; offset != nil {
_spec.Offset = *offset _spec.Offset = *offset
} }
if ps := gq.order; len(ps) > 0 { if ps := gq.order; len(ps) > 0 {
@ -875,7 +870,7 @@ func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec {
func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector { func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(gq.driver.Dialect()) builder := sql.Dialect(gq.driver.Dialect())
t1 := builder.Table(group.Table) t1 := builder.Table(group.Table)
columns := gq.fields columns := gq.ctx.Fields
if len(columns) == 0 { if len(columns) == 0 {
columns = group.Columns columns = group.Columns
} }
@ -884,7 +879,7 @@ func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = gq.sql selector = gq.sql
selector.Select(selector.Columns(columns...)...) selector.Select(selector.Columns(columns...)...)
} }
if gq.unique != nil && *gq.unique { if gq.ctx.Unique != nil && *gq.ctx.Unique {
selector.Distinct() selector.Distinct()
} }
for _, p := range gq.predicates { for _, p := range gq.predicates {
@ -893,12 +888,12 @@ func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range gq.order { for _, p := range gq.order {
p(selector) p(selector)
} }
if offset := gq.offset; offset != nil { if offset := gq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start // limit is mandatory for offset clause. We start
// with default value, and override it below if needed. // with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32) selector.Offset(*offset).Limit(math.MaxInt32)
} }
if limit := gq.limit; limit != nil { if limit := gq.ctx.Limit; limit != nil {
selector.Limit(*limit) selector.Limit(*limit)
} }
return selector return selector
@ -918,7 +913,7 @@ func (ggb *GroupGroupBy) Aggregate(fns ...AggregateFunc) *GroupGroupBy {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (ggb *GroupGroupBy) Scan(ctx context.Context, v any) error { func (ggb *GroupGroupBy) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeGroup, "GroupBy") ctx = setContextOp(ctx, ggb.build.ctx, "GroupBy")
if err := ggb.build.prepareQuery(ctx); err != nil { if err := ggb.build.prepareQuery(ctx); err != nil {
return err return err
} }
@ -966,7 +961,7 @@ func (gs *GroupSelect) Aggregate(fns ...AggregateFunc) *GroupSelect {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (gs *GroupSelect) Scan(ctx context.Context, v any) error { func (gs *GroupSelect) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeGroup, "Select") ctx = setContextOp(ctx, gs.ctx, "Select")
if err := gs.prepareQuery(ctx); err != nil { if err := gs.prepareQuery(ctx); err != nil {
return err return err
} }

View file

@ -136,14 +136,14 @@ func (git *GroupInvitationToken) assignValues(columns []string, values []any) er
// QueryGroup queries the "group" edge of the GroupInvitationToken entity. // QueryGroup queries the "group" edge of the GroupInvitationToken entity.
func (git *GroupInvitationToken) QueryGroup() *GroupQuery { func (git *GroupInvitationToken) QueryGroup() *GroupQuery {
return (&GroupInvitationTokenClient{config: git.config}).QueryGroup(git) return NewGroupInvitationTokenClient(git.config).QueryGroup(git)
} }
// Update returns a builder for updating this GroupInvitationToken. // Update returns a builder for updating this GroupInvitationToken.
// Note that you need to call GroupInvitationToken.Unwrap() before calling this method if this GroupInvitationToken // Note that you need to call GroupInvitationToken.Unwrap() before calling this method if this GroupInvitationToken
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (git *GroupInvitationToken) Update() *GroupInvitationTokenUpdateOne { func (git *GroupInvitationToken) Update() *GroupInvitationTokenUpdateOne {
return (&GroupInvitationTokenClient{config: git.config}).UpdateOne(git) return NewGroupInvitationTokenClient(git.config).UpdateOne(git)
} }
// Unwrap unwraps the GroupInvitationToken entity that was returned from a transaction after it was closed, // Unwrap unwraps the GroupInvitationToken entity that was returned from a transaction after it was closed,

View file

@ -69,6 +69,12 @@ type GroupInvitationTokenDeleteOne struct {
gitd *GroupInvitationTokenDelete gitd *GroupInvitationTokenDelete
} }
// Where appends a list predicates to the GroupInvitationTokenDelete builder.
func (gitdo *GroupInvitationTokenDeleteOne) Where(ps ...predicate.GroupInvitationToken) *GroupInvitationTokenDeleteOne {
gitdo.gitd.mutation.Where(ps...)
return gitdo
}
// Exec executes the deletion query. // Exec executes the deletion query.
func (gitdo *GroupInvitationTokenDeleteOne) Exec(ctx context.Context) error { func (gitdo *GroupInvitationTokenDeleteOne) Exec(ctx context.Context) error {
n, err := gitdo.gitd.Exec(ctx) n, err := gitdo.gitd.Exec(ctx)
@ -84,5 +90,7 @@ func (gitdo *GroupInvitationTokenDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (gitdo *GroupInvitationTokenDeleteOne) ExecX(ctx context.Context) { func (gitdo *GroupInvitationTokenDeleteOne) ExecX(ctx context.Context) {
gitdo.gitd.ExecX(ctx) if err := gitdo.Exec(ctx); err != nil {
panic(err)
}
} }

View file

@ -19,11 +19,8 @@ import (
// GroupInvitationTokenQuery is the builder for querying GroupInvitationToken entities. // GroupInvitationTokenQuery is the builder for querying GroupInvitationToken entities.
type GroupInvitationTokenQuery struct { type GroupInvitationTokenQuery struct {
config config
limit *int ctx *QueryContext
offset *int
unique *bool
order []OrderFunc order []OrderFunc
fields []string
inters []Interceptor inters []Interceptor
predicates []predicate.GroupInvitationToken predicates []predicate.GroupInvitationToken
withGroup *GroupQuery withGroup *GroupQuery
@ -41,20 +38,20 @@ func (gitq *GroupInvitationTokenQuery) Where(ps ...predicate.GroupInvitationToke
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (gitq *GroupInvitationTokenQuery) Limit(limit int) *GroupInvitationTokenQuery { func (gitq *GroupInvitationTokenQuery) Limit(limit int) *GroupInvitationTokenQuery {
gitq.limit = &limit gitq.ctx.Limit = &limit
return gitq return gitq
} }
// Offset to start from. // Offset to start from.
func (gitq *GroupInvitationTokenQuery) Offset(offset int) *GroupInvitationTokenQuery { func (gitq *GroupInvitationTokenQuery) Offset(offset int) *GroupInvitationTokenQuery {
gitq.offset = &offset gitq.ctx.Offset = &offset
return gitq return gitq
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (gitq *GroupInvitationTokenQuery) Unique(unique bool) *GroupInvitationTokenQuery { func (gitq *GroupInvitationTokenQuery) Unique(unique bool) *GroupInvitationTokenQuery {
gitq.unique = &unique gitq.ctx.Unique = &unique
return gitq return gitq
} }
@ -89,7 +86,7 @@ func (gitq *GroupInvitationTokenQuery) QueryGroup() *GroupQuery {
// First returns the first GroupInvitationToken entity from the query. // First returns the first GroupInvitationToken entity from the query.
// Returns a *NotFoundError when no GroupInvitationToken was found. // Returns a *NotFoundError when no GroupInvitationToken was found.
func (gitq *GroupInvitationTokenQuery) First(ctx context.Context) (*GroupInvitationToken, error) { func (gitq *GroupInvitationTokenQuery) First(ctx context.Context) (*GroupInvitationToken, error) {
nodes, err := gitq.Limit(1).All(newQueryContext(ctx, TypeGroupInvitationToken, "First")) nodes, err := gitq.Limit(1).All(setContextOp(ctx, gitq.ctx, "First"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -112,7 +109,7 @@ func (gitq *GroupInvitationTokenQuery) FirstX(ctx context.Context) *GroupInvitat
// Returns a *NotFoundError when no GroupInvitationToken ID was found. // Returns a *NotFoundError when no GroupInvitationToken ID was found.
func (gitq *GroupInvitationTokenQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { func (gitq *GroupInvitationTokenQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = gitq.Limit(1).IDs(newQueryContext(ctx, TypeGroupInvitationToken, "FirstID")); err != nil { if ids, err = gitq.Limit(1).IDs(setContextOp(ctx, gitq.ctx, "FirstID")); err != nil {
return return
} }
if len(ids) == 0 { if len(ids) == 0 {
@ -135,7 +132,7 @@ func (gitq *GroupInvitationTokenQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one GroupInvitationToken entity is found. // Returns a *NotSingularError when more than one GroupInvitationToken entity is found.
// Returns a *NotFoundError when no GroupInvitationToken entities are found. // Returns a *NotFoundError when no GroupInvitationToken entities are found.
func (gitq *GroupInvitationTokenQuery) Only(ctx context.Context) (*GroupInvitationToken, error) { func (gitq *GroupInvitationTokenQuery) Only(ctx context.Context) (*GroupInvitationToken, error) {
nodes, err := gitq.Limit(2).All(newQueryContext(ctx, TypeGroupInvitationToken, "Only")) nodes, err := gitq.Limit(2).All(setContextOp(ctx, gitq.ctx, "Only"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -163,7 +160,7 @@ func (gitq *GroupInvitationTokenQuery) OnlyX(ctx context.Context) *GroupInvitati
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (gitq *GroupInvitationTokenQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { func (gitq *GroupInvitationTokenQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = gitq.Limit(2).IDs(newQueryContext(ctx, TypeGroupInvitationToken, "OnlyID")); err != nil { if ids, err = gitq.Limit(2).IDs(setContextOp(ctx, gitq.ctx, "OnlyID")); err != nil {
return return
} }
switch len(ids) { switch len(ids) {
@ -188,7 +185,7 @@ func (gitq *GroupInvitationTokenQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of GroupInvitationTokens. // All executes the query and returns a list of GroupInvitationTokens.
func (gitq *GroupInvitationTokenQuery) All(ctx context.Context) ([]*GroupInvitationToken, error) { func (gitq *GroupInvitationTokenQuery) All(ctx context.Context) ([]*GroupInvitationToken, error) {
ctx = newQueryContext(ctx, TypeGroupInvitationToken, "All") ctx = setContextOp(ctx, gitq.ctx, "All")
if err := gitq.prepareQuery(ctx); err != nil { if err := gitq.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
@ -208,7 +205,7 @@ func (gitq *GroupInvitationTokenQuery) AllX(ctx context.Context) []*GroupInvitat
// IDs executes the query and returns a list of GroupInvitationToken IDs. // IDs executes the query and returns a list of GroupInvitationToken IDs.
func (gitq *GroupInvitationTokenQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { func (gitq *GroupInvitationTokenQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
var ids []uuid.UUID var ids []uuid.UUID
ctx = newQueryContext(ctx, TypeGroupInvitationToken, "IDs") ctx = setContextOp(ctx, gitq.ctx, "IDs")
if err := gitq.Select(groupinvitationtoken.FieldID).Scan(ctx, &ids); err != nil { if err := gitq.Select(groupinvitationtoken.FieldID).Scan(ctx, &ids); err != nil {
return nil, err return nil, err
} }
@ -226,7 +223,7 @@ func (gitq *GroupInvitationTokenQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query. // Count returns the count of the given query.
func (gitq *GroupInvitationTokenQuery) Count(ctx context.Context) (int, error) { func (gitq *GroupInvitationTokenQuery) Count(ctx context.Context) (int, error) {
ctx = newQueryContext(ctx, TypeGroupInvitationToken, "Count") ctx = setContextOp(ctx, gitq.ctx, "Count")
if err := gitq.prepareQuery(ctx); err != nil { if err := gitq.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
@ -244,7 +241,7 @@ func (gitq *GroupInvitationTokenQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (gitq *GroupInvitationTokenQuery) Exist(ctx context.Context) (bool, error) { func (gitq *GroupInvitationTokenQuery) Exist(ctx context.Context) (bool, error) {
ctx = newQueryContext(ctx, TypeGroupInvitationToken, "Exist") ctx = setContextOp(ctx, gitq.ctx, "Exist")
switch _, err := gitq.FirstID(ctx); { switch _, err := gitq.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
return false, nil return false, nil
@ -272,8 +269,7 @@ func (gitq *GroupInvitationTokenQuery) Clone() *GroupInvitationTokenQuery {
} }
return &GroupInvitationTokenQuery{ return &GroupInvitationTokenQuery{
config: gitq.config, config: gitq.config,
limit: gitq.limit, ctx: gitq.ctx.Clone(),
offset: gitq.offset,
order: append([]OrderFunc{}, gitq.order...), order: append([]OrderFunc{}, gitq.order...),
inters: append([]Interceptor{}, gitq.inters...), inters: append([]Interceptor{}, gitq.inters...),
predicates: append([]predicate.GroupInvitationToken{}, gitq.predicates...), predicates: append([]predicate.GroupInvitationToken{}, gitq.predicates...),
@ -281,7 +277,6 @@ func (gitq *GroupInvitationTokenQuery) Clone() *GroupInvitationTokenQuery {
// clone intermediate query. // clone intermediate query.
sql: gitq.sql.Clone(), sql: gitq.sql.Clone(),
path: gitq.path, path: gitq.path,
unique: gitq.unique,
} }
} }
@ -311,9 +306,9 @@ func (gitq *GroupInvitationTokenQuery) WithGroup(opts ...func(*GroupQuery)) *Gro
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (gitq *GroupInvitationTokenQuery) GroupBy(field string, fields ...string) *GroupInvitationTokenGroupBy { func (gitq *GroupInvitationTokenQuery) GroupBy(field string, fields ...string) *GroupInvitationTokenGroupBy {
gitq.fields = append([]string{field}, fields...) gitq.ctx.Fields = append([]string{field}, fields...)
grbuild := &GroupInvitationTokenGroupBy{build: gitq} grbuild := &GroupInvitationTokenGroupBy{build: gitq}
grbuild.flds = &gitq.fields grbuild.flds = &gitq.ctx.Fields
grbuild.label = groupinvitationtoken.Label grbuild.label = groupinvitationtoken.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
return grbuild return grbuild
@ -332,10 +327,10 @@ func (gitq *GroupInvitationTokenQuery) GroupBy(field string, fields ...string) *
// Select(groupinvitationtoken.FieldCreatedAt). // Select(groupinvitationtoken.FieldCreatedAt).
// Scan(ctx, &v) // Scan(ctx, &v)
func (gitq *GroupInvitationTokenQuery) Select(fields ...string) *GroupInvitationTokenSelect { func (gitq *GroupInvitationTokenQuery) Select(fields ...string) *GroupInvitationTokenSelect {
gitq.fields = append(gitq.fields, fields...) gitq.ctx.Fields = append(gitq.ctx.Fields, fields...)
sbuild := &GroupInvitationTokenSelect{GroupInvitationTokenQuery: gitq} sbuild := &GroupInvitationTokenSelect{GroupInvitationTokenQuery: gitq}
sbuild.label = groupinvitationtoken.Label sbuild.label = groupinvitationtoken.Label
sbuild.flds, sbuild.scan = &gitq.fields, sbuild.Scan sbuild.flds, sbuild.scan = &gitq.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
@ -355,7 +350,7 @@ func (gitq *GroupInvitationTokenQuery) prepareQuery(ctx context.Context) error {
} }
} }
} }
for _, f := range gitq.fields { for _, f := range gitq.ctx.Fields {
if !groupinvitationtoken.ValidColumn(f) { if !groupinvitationtoken.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
} }
@ -425,6 +420,9 @@ func (gitq *GroupInvitationTokenQuery) loadGroup(ctx context.Context, query *Gro
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(group.IDIn(ids...)) query.Where(group.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -444,9 +442,9 @@ func (gitq *GroupInvitationTokenQuery) loadGroup(ctx context.Context, query *Gro
func (gitq *GroupInvitationTokenQuery) sqlCount(ctx context.Context) (int, error) { func (gitq *GroupInvitationTokenQuery) sqlCount(ctx context.Context) (int, error) {
_spec := gitq.querySpec() _spec := gitq.querySpec()
_spec.Node.Columns = gitq.fields _spec.Node.Columns = gitq.ctx.Fields
if len(gitq.fields) > 0 { if len(gitq.ctx.Fields) > 0 {
_spec.Unique = gitq.unique != nil && *gitq.unique _spec.Unique = gitq.ctx.Unique != nil && *gitq.ctx.Unique
} }
return sqlgraph.CountNodes(ctx, gitq.driver, _spec) return sqlgraph.CountNodes(ctx, gitq.driver, _spec)
} }
@ -464,10 +462,10 @@ func (gitq *GroupInvitationTokenQuery) querySpec() *sqlgraph.QuerySpec {
From: gitq.sql, From: gitq.sql,
Unique: true, Unique: true,
} }
if unique := gitq.unique; unique != nil { if unique := gitq.ctx.Unique; unique != nil {
_spec.Unique = *unique _spec.Unique = *unique
} }
if fields := gitq.fields; len(fields) > 0 { if fields := gitq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, groupinvitationtoken.FieldID) _spec.Node.Columns = append(_spec.Node.Columns, groupinvitationtoken.FieldID)
for i := range fields { for i := range fields {
@ -483,10 +481,10 @@ func (gitq *GroupInvitationTokenQuery) querySpec() *sqlgraph.QuerySpec {
} }
} }
} }
if limit := gitq.limit; limit != nil { if limit := gitq.ctx.Limit; limit != nil {
_spec.Limit = *limit _spec.Limit = *limit
} }
if offset := gitq.offset; offset != nil { if offset := gitq.ctx.Offset; offset != nil {
_spec.Offset = *offset _spec.Offset = *offset
} }
if ps := gitq.order; len(ps) > 0 { if ps := gitq.order; len(ps) > 0 {
@ -502,7 +500,7 @@ func (gitq *GroupInvitationTokenQuery) querySpec() *sqlgraph.QuerySpec {
func (gitq *GroupInvitationTokenQuery) sqlQuery(ctx context.Context) *sql.Selector { func (gitq *GroupInvitationTokenQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(gitq.driver.Dialect()) builder := sql.Dialect(gitq.driver.Dialect())
t1 := builder.Table(groupinvitationtoken.Table) t1 := builder.Table(groupinvitationtoken.Table)
columns := gitq.fields columns := gitq.ctx.Fields
if len(columns) == 0 { if len(columns) == 0 {
columns = groupinvitationtoken.Columns columns = groupinvitationtoken.Columns
} }
@ -511,7 +509,7 @@ func (gitq *GroupInvitationTokenQuery) sqlQuery(ctx context.Context) *sql.Select
selector = gitq.sql selector = gitq.sql
selector.Select(selector.Columns(columns...)...) selector.Select(selector.Columns(columns...)...)
} }
if gitq.unique != nil && *gitq.unique { if gitq.ctx.Unique != nil && *gitq.ctx.Unique {
selector.Distinct() selector.Distinct()
} }
for _, p := range gitq.predicates { for _, p := range gitq.predicates {
@ -520,12 +518,12 @@ func (gitq *GroupInvitationTokenQuery) sqlQuery(ctx context.Context) *sql.Select
for _, p := range gitq.order { for _, p := range gitq.order {
p(selector) p(selector)
} }
if offset := gitq.offset; offset != nil { if offset := gitq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start // limit is mandatory for offset clause. We start
// with default value, and override it below if needed. // with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32) selector.Offset(*offset).Limit(math.MaxInt32)
} }
if limit := gitq.limit; limit != nil { if limit := gitq.ctx.Limit; limit != nil {
selector.Limit(*limit) selector.Limit(*limit)
} }
return selector return selector
@ -545,7 +543,7 @@ func (gitgb *GroupInvitationTokenGroupBy) Aggregate(fns ...AggregateFunc) *Group
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (gitgb *GroupInvitationTokenGroupBy) Scan(ctx context.Context, v any) error { func (gitgb *GroupInvitationTokenGroupBy) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeGroupInvitationToken, "GroupBy") ctx = setContextOp(ctx, gitgb.build.ctx, "GroupBy")
if err := gitgb.build.prepareQuery(ctx); err != nil { if err := gitgb.build.prepareQuery(ctx); err != nil {
return err return err
} }
@ -593,7 +591,7 @@ func (gits *GroupInvitationTokenSelect) Aggregate(fns ...AggregateFunc) *GroupIn
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (gits *GroupInvitationTokenSelect) Scan(ctx context.Context, v any) error { func (gits *GroupInvitationTokenSelect) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeGroupInvitationToken, "Select") ctx = setContextOp(ctx, gits.ctx, "Select")
if err := gits.prepareQuery(ctx); err != nil { if err := gits.prepareQuery(ctx); err != nil {
return err return err
} }

View file

@ -390,49 +390,49 @@ func (i *Item) assignValues(columns []string, values []any) error {
// QueryParent queries the "parent" edge of the Item entity. // QueryParent queries the "parent" edge of the Item entity.
func (i *Item) QueryParent() *ItemQuery { func (i *Item) QueryParent() *ItemQuery {
return (&ItemClient{config: i.config}).QueryParent(i) return NewItemClient(i.config).QueryParent(i)
} }
// QueryChildren queries the "children" edge of the Item entity. // QueryChildren queries the "children" edge of the Item entity.
func (i *Item) QueryChildren() *ItemQuery { func (i *Item) QueryChildren() *ItemQuery {
return (&ItemClient{config: i.config}).QueryChildren(i) return NewItemClient(i.config).QueryChildren(i)
} }
// QueryGroup queries the "group" edge of the Item entity. // QueryGroup queries the "group" edge of the Item entity.
func (i *Item) QueryGroup() *GroupQuery { func (i *Item) QueryGroup() *GroupQuery {
return (&ItemClient{config: i.config}).QueryGroup(i) return NewItemClient(i.config).QueryGroup(i)
} }
// QueryLabel queries the "label" edge of the Item entity. // QueryLabel queries the "label" edge of the Item entity.
func (i *Item) QueryLabel() *LabelQuery { func (i *Item) QueryLabel() *LabelQuery {
return (&ItemClient{config: i.config}).QueryLabel(i) return NewItemClient(i.config).QueryLabel(i)
} }
// QueryLocation queries the "location" edge of the Item entity. // QueryLocation queries the "location" edge of the Item entity.
func (i *Item) QueryLocation() *LocationQuery { func (i *Item) QueryLocation() *LocationQuery {
return (&ItemClient{config: i.config}).QueryLocation(i) return NewItemClient(i.config).QueryLocation(i)
} }
// QueryFields queries the "fields" edge of the Item entity. // QueryFields queries the "fields" edge of the Item entity.
func (i *Item) QueryFields() *ItemFieldQuery { func (i *Item) QueryFields() *ItemFieldQuery {
return (&ItemClient{config: i.config}).QueryFields(i) return NewItemClient(i.config).QueryFields(i)
} }
// QueryMaintenanceEntries queries the "maintenance_entries" edge of the Item entity. // QueryMaintenanceEntries queries the "maintenance_entries" edge of the Item entity.
func (i *Item) QueryMaintenanceEntries() *MaintenanceEntryQuery { func (i *Item) QueryMaintenanceEntries() *MaintenanceEntryQuery {
return (&ItemClient{config: i.config}).QueryMaintenanceEntries(i) return NewItemClient(i.config).QueryMaintenanceEntries(i)
} }
// QueryAttachments queries the "attachments" edge of the Item entity. // QueryAttachments queries the "attachments" edge of the Item entity.
func (i *Item) QueryAttachments() *AttachmentQuery { func (i *Item) QueryAttachments() *AttachmentQuery {
return (&ItemClient{config: i.config}).QueryAttachments(i) return NewItemClient(i.config).QueryAttachments(i)
} }
// Update returns a builder for updating this Item. // Update returns a builder for updating this Item.
// Note that you need to call Item.Unwrap() before calling this method if this Item // Note that you need to call Item.Unwrap() before calling this method if this Item
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (i *Item) Update() *ItemUpdateOne { func (i *Item) Update() *ItemUpdateOne {
return (&ItemClient{config: i.config}).UpdateOne(i) return NewItemClient(i.config).UpdateOne(i)
} }
// Unwrap unwraps the Item entity that was returned from a transaction after it was closed, // Unwrap unwraps the Item entity that was returned from a transaction after it was closed,

View file

@ -69,6 +69,12 @@ type ItemDeleteOne struct {
id *ItemDelete id *ItemDelete
} }
// Where appends a list predicates to the ItemDelete builder.
func (ido *ItemDeleteOne) Where(ps ...predicate.Item) *ItemDeleteOne {
ido.id.mutation.Where(ps...)
return ido
}
// Exec executes the deletion query. // Exec executes the deletion query.
func (ido *ItemDeleteOne) Exec(ctx context.Context) error { func (ido *ItemDeleteOne) Exec(ctx context.Context) error {
n, err := ido.id.Exec(ctx) n, err := ido.id.Exec(ctx)
@ -84,5 +90,7 @@ func (ido *ItemDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (ido *ItemDeleteOne) ExecX(ctx context.Context) { func (ido *ItemDeleteOne) ExecX(ctx context.Context) {
ido.id.ExecX(ctx) if err := ido.Exec(ctx); err != nil {
panic(err)
}
} }

View file

@ -25,11 +25,8 @@ import (
// ItemQuery is the builder for querying Item entities. // ItemQuery is the builder for querying Item entities.
type ItemQuery struct { type ItemQuery struct {
config config
limit *int ctx *QueryContext
offset *int
unique *bool
order []OrderFunc order []OrderFunc
fields []string
inters []Interceptor inters []Interceptor
predicates []predicate.Item predicates []predicate.Item
withParent *ItemQuery withParent *ItemQuery
@ -54,20 +51,20 @@ func (iq *ItemQuery) Where(ps ...predicate.Item) *ItemQuery {
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (iq *ItemQuery) Limit(limit int) *ItemQuery { func (iq *ItemQuery) Limit(limit int) *ItemQuery {
iq.limit = &limit iq.ctx.Limit = &limit
return iq return iq
} }
// Offset to start from. // Offset to start from.
func (iq *ItemQuery) Offset(offset int) *ItemQuery { func (iq *ItemQuery) Offset(offset int) *ItemQuery {
iq.offset = &offset iq.ctx.Offset = &offset
return iq return iq
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (iq *ItemQuery) Unique(unique bool) *ItemQuery { func (iq *ItemQuery) Unique(unique bool) *ItemQuery {
iq.unique = &unique iq.ctx.Unique = &unique
return iq return iq
} }
@ -256,7 +253,7 @@ func (iq *ItemQuery) QueryAttachments() *AttachmentQuery {
// First returns the first Item entity from the query. // First returns the first Item entity from the query.
// Returns a *NotFoundError when no Item was found. // Returns a *NotFoundError when no Item was found.
func (iq *ItemQuery) First(ctx context.Context) (*Item, error) { func (iq *ItemQuery) First(ctx context.Context) (*Item, error) {
nodes, err := iq.Limit(1).All(newQueryContext(ctx, TypeItem, "First")) nodes, err := iq.Limit(1).All(setContextOp(ctx, iq.ctx, "First"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -279,7 +276,7 @@ func (iq *ItemQuery) FirstX(ctx context.Context) *Item {
// Returns a *NotFoundError when no Item ID was found. // Returns a *NotFoundError when no Item ID was found.
func (iq *ItemQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { func (iq *ItemQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = iq.Limit(1).IDs(newQueryContext(ctx, TypeItem, "FirstID")); err != nil { if ids, err = iq.Limit(1).IDs(setContextOp(ctx, iq.ctx, "FirstID")); err != nil {
return return
} }
if len(ids) == 0 { if len(ids) == 0 {
@ -302,7 +299,7 @@ func (iq *ItemQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one Item entity is found. // Returns a *NotSingularError when more than one Item entity is found.
// Returns a *NotFoundError when no Item entities are found. // Returns a *NotFoundError when no Item entities are found.
func (iq *ItemQuery) Only(ctx context.Context) (*Item, error) { func (iq *ItemQuery) Only(ctx context.Context) (*Item, error) {
nodes, err := iq.Limit(2).All(newQueryContext(ctx, TypeItem, "Only")) nodes, err := iq.Limit(2).All(setContextOp(ctx, iq.ctx, "Only"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -330,7 +327,7 @@ func (iq *ItemQuery) OnlyX(ctx context.Context) *Item {
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (iq *ItemQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { func (iq *ItemQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = iq.Limit(2).IDs(newQueryContext(ctx, TypeItem, "OnlyID")); err != nil { if ids, err = iq.Limit(2).IDs(setContextOp(ctx, iq.ctx, "OnlyID")); err != nil {
return return
} }
switch len(ids) { switch len(ids) {
@ -355,7 +352,7 @@ func (iq *ItemQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Items. // All executes the query and returns a list of Items.
func (iq *ItemQuery) All(ctx context.Context) ([]*Item, error) { func (iq *ItemQuery) All(ctx context.Context) ([]*Item, error) {
ctx = newQueryContext(ctx, TypeItem, "All") ctx = setContextOp(ctx, iq.ctx, "All")
if err := iq.prepareQuery(ctx); err != nil { if err := iq.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
@ -375,7 +372,7 @@ func (iq *ItemQuery) AllX(ctx context.Context) []*Item {
// IDs executes the query and returns a list of Item IDs. // IDs executes the query and returns a list of Item IDs.
func (iq *ItemQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { func (iq *ItemQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
var ids []uuid.UUID var ids []uuid.UUID
ctx = newQueryContext(ctx, TypeItem, "IDs") ctx = setContextOp(ctx, iq.ctx, "IDs")
if err := iq.Select(item.FieldID).Scan(ctx, &ids); err != nil { if err := iq.Select(item.FieldID).Scan(ctx, &ids); err != nil {
return nil, err return nil, err
} }
@ -393,7 +390,7 @@ func (iq *ItemQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query. // Count returns the count of the given query.
func (iq *ItemQuery) Count(ctx context.Context) (int, error) { func (iq *ItemQuery) Count(ctx context.Context) (int, error) {
ctx = newQueryContext(ctx, TypeItem, "Count") ctx = setContextOp(ctx, iq.ctx, "Count")
if err := iq.prepareQuery(ctx); err != nil { if err := iq.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
@ -411,7 +408,7 @@ func (iq *ItemQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (iq *ItemQuery) Exist(ctx context.Context) (bool, error) { func (iq *ItemQuery) Exist(ctx context.Context) (bool, error) {
ctx = newQueryContext(ctx, TypeItem, "Exist") ctx = setContextOp(ctx, iq.ctx, "Exist")
switch _, err := iq.FirstID(ctx); { switch _, err := iq.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
return false, nil return false, nil
@ -439,8 +436,7 @@ func (iq *ItemQuery) Clone() *ItemQuery {
} }
return &ItemQuery{ return &ItemQuery{
config: iq.config, config: iq.config,
limit: iq.limit, ctx: iq.ctx.Clone(),
offset: iq.offset,
order: append([]OrderFunc{}, iq.order...), order: append([]OrderFunc{}, iq.order...),
inters: append([]Interceptor{}, iq.inters...), inters: append([]Interceptor{}, iq.inters...),
predicates: append([]predicate.Item{}, iq.predicates...), predicates: append([]predicate.Item{}, iq.predicates...),
@ -455,7 +451,6 @@ func (iq *ItemQuery) Clone() *ItemQuery {
// clone intermediate query. // clone intermediate query.
sql: iq.sql.Clone(), sql: iq.sql.Clone(),
path: iq.path, path: iq.path,
unique: iq.unique,
} }
} }
@ -562,9 +557,9 @@ func (iq *ItemQuery) WithAttachments(opts ...func(*AttachmentQuery)) *ItemQuery
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (iq *ItemQuery) GroupBy(field string, fields ...string) *ItemGroupBy { func (iq *ItemQuery) GroupBy(field string, fields ...string) *ItemGroupBy {
iq.fields = append([]string{field}, fields...) iq.ctx.Fields = append([]string{field}, fields...)
grbuild := &ItemGroupBy{build: iq} grbuild := &ItemGroupBy{build: iq}
grbuild.flds = &iq.fields grbuild.flds = &iq.ctx.Fields
grbuild.label = item.Label grbuild.label = item.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
return grbuild return grbuild
@ -583,10 +578,10 @@ func (iq *ItemQuery) GroupBy(field string, fields ...string) *ItemGroupBy {
// Select(item.FieldCreatedAt). // Select(item.FieldCreatedAt).
// Scan(ctx, &v) // Scan(ctx, &v)
func (iq *ItemQuery) Select(fields ...string) *ItemSelect { func (iq *ItemQuery) Select(fields ...string) *ItemSelect {
iq.fields = append(iq.fields, fields...) iq.ctx.Fields = append(iq.ctx.Fields, fields...)
sbuild := &ItemSelect{ItemQuery: iq} sbuild := &ItemSelect{ItemQuery: iq}
sbuild.label = item.Label sbuild.label = item.Label
sbuild.flds, sbuild.scan = &iq.fields, sbuild.Scan sbuild.flds, sbuild.scan = &iq.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
@ -606,7 +601,7 @@ func (iq *ItemQuery) prepareQuery(ctx context.Context) error {
} }
} }
} }
for _, f := range iq.fields { for _, f := range iq.ctx.Fields {
if !item.ValidColumn(f) { if !item.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
} }
@ -730,6 +725,9 @@ func (iq *ItemQuery) loadParent(ctx context.Context, query *ItemQuery, nodes []*
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(item.IDIn(ids...)) query.Where(item.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -790,6 +788,9 @@ func (iq *ItemQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(group.IDIn(ids...)) query.Where(group.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -829,7 +830,8 @@ func (iq *ItemQuery) loadLabel(ctx context.Context, query *LabelQuery, nodes []*
if err := query.prepareQuery(ctx); err != nil { if err := query.prepareQuery(ctx); err != nil {
return err return err
} }
neighbors, err := query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) {
assign := spec.Assign assign := spec.Assign
values := spec.ScanValues values := spec.ScanValues
spec.ScanValues = func(columns []string) ([]any, error) { spec.ScanValues = func(columns []string) ([]any, error) {
@ -850,6 +852,8 @@ func (iq *ItemQuery) loadLabel(ctx context.Context, query *LabelQuery, nodes []*
return nil return nil
} }
}) })
})
neighbors, err := withInterceptors[[]*Label](ctx, query, qr, query.inters)
if err != nil { if err != nil {
return err return err
} }
@ -877,6 +881,9 @@ func (iq *ItemQuery) loadLocation(ctx context.Context, query *LocationQuery, nod
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(location.IDIn(ids...)) query.Where(location.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -985,9 +992,9 @@ func (iq *ItemQuery) loadAttachments(ctx context.Context, query *AttachmentQuery
func (iq *ItemQuery) sqlCount(ctx context.Context) (int, error) { func (iq *ItemQuery) sqlCount(ctx context.Context) (int, error) {
_spec := iq.querySpec() _spec := iq.querySpec()
_spec.Node.Columns = iq.fields _spec.Node.Columns = iq.ctx.Fields
if len(iq.fields) > 0 { if len(iq.ctx.Fields) > 0 {
_spec.Unique = iq.unique != nil && *iq.unique _spec.Unique = iq.ctx.Unique != nil && *iq.ctx.Unique
} }
return sqlgraph.CountNodes(ctx, iq.driver, _spec) return sqlgraph.CountNodes(ctx, iq.driver, _spec)
} }
@ -1005,10 +1012,10 @@ func (iq *ItemQuery) querySpec() *sqlgraph.QuerySpec {
From: iq.sql, From: iq.sql,
Unique: true, Unique: true,
} }
if unique := iq.unique; unique != nil { if unique := iq.ctx.Unique; unique != nil {
_spec.Unique = *unique _spec.Unique = *unique
} }
if fields := iq.fields; len(fields) > 0 { if fields := iq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, item.FieldID) _spec.Node.Columns = append(_spec.Node.Columns, item.FieldID)
for i := range fields { for i := range fields {
@ -1024,10 +1031,10 @@ func (iq *ItemQuery) querySpec() *sqlgraph.QuerySpec {
} }
} }
} }
if limit := iq.limit; limit != nil { if limit := iq.ctx.Limit; limit != nil {
_spec.Limit = *limit _spec.Limit = *limit
} }
if offset := iq.offset; offset != nil { if offset := iq.ctx.Offset; offset != nil {
_spec.Offset = *offset _spec.Offset = *offset
} }
if ps := iq.order; len(ps) > 0 { if ps := iq.order; len(ps) > 0 {
@ -1043,7 +1050,7 @@ func (iq *ItemQuery) querySpec() *sqlgraph.QuerySpec {
func (iq *ItemQuery) sqlQuery(ctx context.Context) *sql.Selector { func (iq *ItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(iq.driver.Dialect()) builder := sql.Dialect(iq.driver.Dialect())
t1 := builder.Table(item.Table) t1 := builder.Table(item.Table)
columns := iq.fields columns := iq.ctx.Fields
if len(columns) == 0 { if len(columns) == 0 {
columns = item.Columns columns = item.Columns
} }
@ -1052,7 +1059,7 @@ func (iq *ItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = iq.sql selector = iq.sql
selector.Select(selector.Columns(columns...)...) selector.Select(selector.Columns(columns...)...)
} }
if iq.unique != nil && *iq.unique { if iq.ctx.Unique != nil && *iq.ctx.Unique {
selector.Distinct() selector.Distinct()
} }
for _, p := range iq.predicates { for _, p := range iq.predicates {
@ -1061,12 +1068,12 @@ func (iq *ItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range iq.order { for _, p := range iq.order {
p(selector) p(selector)
} }
if offset := iq.offset; offset != nil { if offset := iq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start // limit is mandatory for offset clause. We start
// with default value, and override it below if needed. // with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32) selector.Offset(*offset).Limit(math.MaxInt32)
} }
if limit := iq.limit; limit != nil { if limit := iq.ctx.Limit; limit != nil {
selector.Limit(*limit) selector.Limit(*limit)
} }
return selector return selector
@ -1086,7 +1093,7 @@ func (igb *ItemGroupBy) Aggregate(fns ...AggregateFunc) *ItemGroupBy {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (igb *ItemGroupBy) Scan(ctx context.Context, v any) error { func (igb *ItemGroupBy) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeItem, "GroupBy") ctx = setContextOp(ctx, igb.build.ctx, "GroupBy")
if err := igb.build.prepareQuery(ctx); err != nil { if err := igb.build.prepareQuery(ctx); err != nil {
return err return err
} }
@ -1134,7 +1141,7 @@ func (is *ItemSelect) Aggregate(fns ...AggregateFunc) *ItemSelect {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (is *ItemSelect) Scan(ctx context.Context, v any) error { func (is *ItemSelect) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeItem, "Select") ctx = setContextOp(ctx, is.ctx, "Select")
if err := is.prepareQuery(ctx); err != nil { if err := is.prepareQuery(ctx); err != nil {
return err return err
} }

View file

@ -170,14 +170,14 @@ func (_if *ItemField) assignValues(columns []string, values []any) error {
// QueryItem queries the "item" edge of the ItemField entity. // QueryItem queries the "item" edge of the ItemField entity.
func (_if *ItemField) QueryItem() *ItemQuery { func (_if *ItemField) QueryItem() *ItemQuery {
return (&ItemFieldClient{config: _if.config}).QueryItem(_if) return NewItemFieldClient(_if.config).QueryItem(_if)
} }
// Update returns a builder for updating this ItemField. // Update returns a builder for updating this ItemField.
// Note that you need to call ItemField.Unwrap() before calling this method if this ItemField // Note that you need to call ItemField.Unwrap() before calling this method if this ItemField
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (_if *ItemField) Update() *ItemFieldUpdateOne { func (_if *ItemField) Update() *ItemFieldUpdateOne {
return (&ItemFieldClient{config: _if.config}).UpdateOne(_if) return NewItemFieldClient(_if.config).UpdateOne(_if)
} }
// Unwrap unwraps the ItemField entity that was returned from a transaction after it was closed, // Unwrap unwraps the ItemField entity that was returned from a transaction after it was closed,

View file

@ -69,6 +69,12 @@ type ItemFieldDeleteOne struct {
ifd *ItemFieldDelete ifd *ItemFieldDelete
} }
// Where appends a list predicates to the ItemFieldDelete builder.
func (ifdo *ItemFieldDeleteOne) Where(ps ...predicate.ItemField) *ItemFieldDeleteOne {
ifdo.ifd.mutation.Where(ps...)
return ifdo
}
// Exec executes the deletion query. // Exec executes the deletion query.
func (ifdo *ItemFieldDeleteOne) Exec(ctx context.Context) error { func (ifdo *ItemFieldDeleteOne) Exec(ctx context.Context) error {
n, err := ifdo.ifd.Exec(ctx) n, err := ifdo.ifd.Exec(ctx)
@ -84,5 +90,7 @@ func (ifdo *ItemFieldDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (ifdo *ItemFieldDeleteOne) ExecX(ctx context.Context) { func (ifdo *ItemFieldDeleteOne) ExecX(ctx context.Context) {
ifdo.ifd.ExecX(ctx) if err := ifdo.Exec(ctx); err != nil {
panic(err)
}
} }

View file

@ -19,11 +19,8 @@ import (
// ItemFieldQuery is the builder for querying ItemField entities. // ItemFieldQuery is the builder for querying ItemField entities.
type ItemFieldQuery struct { type ItemFieldQuery struct {
config config
limit *int ctx *QueryContext
offset *int
unique *bool
order []OrderFunc order []OrderFunc
fields []string
inters []Interceptor inters []Interceptor
predicates []predicate.ItemField predicates []predicate.ItemField
withItem *ItemQuery withItem *ItemQuery
@ -41,20 +38,20 @@ func (ifq *ItemFieldQuery) Where(ps ...predicate.ItemField) *ItemFieldQuery {
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (ifq *ItemFieldQuery) Limit(limit int) *ItemFieldQuery { func (ifq *ItemFieldQuery) Limit(limit int) *ItemFieldQuery {
ifq.limit = &limit ifq.ctx.Limit = &limit
return ifq return ifq
} }
// Offset to start from. // Offset to start from.
func (ifq *ItemFieldQuery) Offset(offset int) *ItemFieldQuery { func (ifq *ItemFieldQuery) Offset(offset int) *ItemFieldQuery {
ifq.offset = &offset ifq.ctx.Offset = &offset
return ifq return ifq
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (ifq *ItemFieldQuery) Unique(unique bool) *ItemFieldQuery { func (ifq *ItemFieldQuery) Unique(unique bool) *ItemFieldQuery {
ifq.unique = &unique ifq.ctx.Unique = &unique
return ifq return ifq
} }
@ -89,7 +86,7 @@ func (ifq *ItemFieldQuery) QueryItem() *ItemQuery {
// First returns the first ItemField entity from the query. // First returns the first ItemField entity from the query.
// Returns a *NotFoundError when no ItemField was found. // Returns a *NotFoundError when no ItemField was found.
func (ifq *ItemFieldQuery) First(ctx context.Context) (*ItemField, error) { func (ifq *ItemFieldQuery) First(ctx context.Context) (*ItemField, error) {
nodes, err := ifq.Limit(1).All(newQueryContext(ctx, TypeItemField, "First")) nodes, err := ifq.Limit(1).All(setContextOp(ctx, ifq.ctx, "First"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -112,7 +109,7 @@ func (ifq *ItemFieldQuery) FirstX(ctx context.Context) *ItemField {
// Returns a *NotFoundError when no ItemField ID was found. // Returns a *NotFoundError when no ItemField ID was found.
func (ifq *ItemFieldQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { func (ifq *ItemFieldQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = ifq.Limit(1).IDs(newQueryContext(ctx, TypeItemField, "FirstID")); err != nil { if ids, err = ifq.Limit(1).IDs(setContextOp(ctx, ifq.ctx, "FirstID")); err != nil {
return return
} }
if len(ids) == 0 { if len(ids) == 0 {
@ -135,7 +132,7 @@ func (ifq *ItemFieldQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one ItemField entity is found. // Returns a *NotSingularError when more than one ItemField entity is found.
// Returns a *NotFoundError when no ItemField entities are found. // Returns a *NotFoundError when no ItemField entities are found.
func (ifq *ItemFieldQuery) Only(ctx context.Context) (*ItemField, error) { func (ifq *ItemFieldQuery) Only(ctx context.Context) (*ItemField, error) {
nodes, err := ifq.Limit(2).All(newQueryContext(ctx, TypeItemField, "Only")) nodes, err := ifq.Limit(2).All(setContextOp(ctx, ifq.ctx, "Only"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -163,7 +160,7 @@ func (ifq *ItemFieldQuery) OnlyX(ctx context.Context) *ItemField {
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (ifq *ItemFieldQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { func (ifq *ItemFieldQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = ifq.Limit(2).IDs(newQueryContext(ctx, TypeItemField, "OnlyID")); err != nil { if ids, err = ifq.Limit(2).IDs(setContextOp(ctx, ifq.ctx, "OnlyID")); err != nil {
return return
} }
switch len(ids) { switch len(ids) {
@ -188,7 +185,7 @@ func (ifq *ItemFieldQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of ItemFields. // All executes the query and returns a list of ItemFields.
func (ifq *ItemFieldQuery) All(ctx context.Context) ([]*ItemField, error) { func (ifq *ItemFieldQuery) All(ctx context.Context) ([]*ItemField, error) {
ctx = newQueryContext(ctx, TypeItemField, "All") ctx = setContextOp(ctx, ifq.ctx, "All")
if err := ifq.prepareQuery(ctx); err != nil { if err := ifq.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
@ -208,7 +205,7 @@ func (ifq *ItemFieldQuery) AllX(ctx context.Context) []*ItemField {
// IDs executes the query and returns a list of ItemField IDs. // IDs executes the query and returns a list of ItemField IDs.
func (ifq *ItemFieldQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { func (ifq *ItemFieldQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
var ids []uuid.UUID var ids []uuid.UUID
ctx = newQueryContext(ctx, TypeItemField, "IDs") ctx = setContextOp(ctx, ifq.ctx, "IDs")
if err := ifq.Select(itemfield.FieldID).Scan(ctx, &ids); err != nil { if err := ifq.Select(itemfield.FieldID).Scan(ctx, &ids); err != nil {
return nil, err return nil, err
} }
@ -226,7 +223,7 @@ func (ifq *ItemFieldQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query. // Count returns the count of the given query.
func (ifq *ItemFieldQuery) Count(ctx context.Context) (int, error) { func (ifq *ItemFieldQuery) Count(ctx context.Context) (int, error) {
ctx = newQueryContext(ctx, TypeItemField, "Count") ctx = setContextOp(ctx, ifq.ctx, "Count")
if err := ifq.prepareQuery(ctx); err != nil { if err := ifq.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
@ -244,7 +241,7 @@ func (ifq *ItemFieldQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (ifq *ItemFieldQuery) Exist(ctx context.Context) (bool, error) { func (ifq *ItemFieldQuery) Exist(ctx context.Context) (bool, error) {
ctx = newQueryContext(ctx, TypeItemField, "Exist") ctx = setContextOp(ctx, ifq.ctx, "Exist")
switch _, err := ifq.FirstID(ctx); { switch _, err := ifq.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
return false, nil return false, nil
@ -272,8 +269,7 @@ func (ifq *ItemFieldQuery) Clone() *ItemFieldQuery {
} }
return &ItemFieldQuery{ return &ItemFieldQuery{
config: ifq.config, config: ifq.config,
limit: ifq.limit, ctx: ifq.ctx.Clone(),
offset: ifq.offset,
order: append([]OrderFunc{}, ifq.order...), order: append([]OrderFunc{}, ifq.order...),
inters: append([]Interceptor{}, ifq.inters...), inters: append([]Interceptor{}, ifq.inters...),
predicates: append([]predicate.ItemField{}, ifq.predicates...), predicates: append([]predicate.ItemField{}, ifq.predicates...),
@ -281,7 +277,6 @@ func (ifq *ItemFieldQuery) Clone() *ItemFieldQuery {
// clone intermediate query. // clone intermediate query.
sql: ifq.sql.Clone(), sql: ifq.sql.Clone(),
path: ifq.path, path: ifq.path,
unique: ifq.unique,
} }
} }
@ -311,9 +306,9 @@ func (ifq *ItemFieldQuery) WithItem(opts ...func(*ItemQuery)) *ItemFieldQuery {
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (ifq *ItemFieldQuery) GroupBy(field string, fields ...string) *ItemFieldGroupBy { func (ifq *ItemFieldQuery) GroupBy(field string, fields ...string) *ItemFieldGroupBy {
ifq.fields = append([]string{field}, fields...) ifq.ctx.Fields = append([]string{field}, fields...)
grbuild := &ItemFieldGroupBy{build: ifq} grbuild := &ItemFieldGroupBy{build: ifq}
grbuild.flds = &ifq.fields grbuild.flds = &ifq.ctx.Fields
grbuild.label = itemfield.Label grbuild.label = itemfield.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
return grbuild return grbuild
@ -332,10 +327,10 @@ func (ifq *ItemFieldQuery) GroupBy(field string, fields ...string) *ItemFieldGro
// Select(itemfield.FieldCreatedAt). // Select(itemfield.FieldCreatedAt).
// Scan(ctx, &v) // Scan(ctx, &v)
func (ifq *ItemFieldQuery) Select(fields ...string) *ItemFieldSelect { func (ifq *ItemFieldQuery) Select(fields ...string) *ItemFieldSelect {
ifq.fields = append(ifq.fields, fields...) ifq.ctx.Fields = append(ifq.ctx.Fields, fields...)
sbuild := &ItemFieldSelect{ItemFieldQuery: ifq} sbuild := &ItemFieldSelect{ItemFieldQuery: ifq}
sbuild.label = itemfield.Label sbuild.label = itemfield.Label
sbuild.flds, sbuild.scan = &ifq.fields, sbuild.Scan sbuild.flds, sbuild.scan = &ifq.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
@ -355,7 +350,7 @@ func (ifq *ItemFieldQuery) prepareQuery(ctx context.Context) error {
} }
} }
} }
for _, f := range ifq.fields { for _, f := range ifq.ctx.Fields {
if !itemfield.ValidColumn(f) { if !itemfield.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
} }
@ -425,6 +420,9 @@ func (ifq *ItemFieldQuery) loadItem(ctx context.Context, query *ItemQuery, nodes
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(item.IDIn(ids...)) query.Where(item.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -444,9 +442,9 @@ func (ifq *ItemFieldQuery) loadItem(ctx context.Context, query *ItemQuery, nodes
func (ifq *ItemFieldQuery) sqlCount(ctx context.Context) (int, error) { func (ifq *ItemFieldQuery) sqlCount(ctx context.Context) (int, error) {
_spec := ifq.querySpec() _spec := ifq.querySpec()
_spec.Node.Columns = ifq.fields _spec.Node.Columns = ifq.ctx.Fields
if len(ifq.fields) > 0 { if len(ifq.ctx.Fields) > 0 {
_spec.Unique = ifq.unique != nil && *ifq.unique _spec.Unique = ifq.ctx.Unique != nil && *ifq.ctx.Unique
} }
return sqlgraph.CountNodes(ctx, ifq.driver, _spec) return sqlgraph.CountNodes(ctx, ifq.driver, _spec)
} }
@ -464,10 +462,10 @@ func (ifq *ItemFieldQuery) querySpec() *sqlgraph.QuerySpec {
From: ifq.sql, From: ifq.sql,
Unique: true, Unique: true,
} }
if unique := ifq.unique; unique != nil { if unique := ifq.ctx.Unique; unique != nil {
_spec.Unique = *unique _spec.Unique = *unique
} }
if fields := ifq.fields; len(fields) > 0 { if fields := ifq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, itemfield.FieldID) _spec.Node.Columns = append(_spec.Node.Columns, itemfield.FieldID)
for i := range fields { for i := range fields {
@ -483,10 +481,10 @@ func (ifq *ItemFieldQuery) querySpec() *sqlgraph.QuerySpec {
} }
} }
} }
if limit := ifq.limit; limit != nil { if limit := ifq.ctx.Limit; limit != nil {
_spec.Limit = *limit _spec.Limit = *limit
} }
if offset := ifq.offset; offset != nil { if offset := ifq.ctx.Offset; offset != nil {
_spec.Offset = *offset _spec.Offset = *offset
} }
if ps := ifq.order; len(ps) > 0 { if ps := ifq.order; len(ps) > 0 {
@ -502,7 +500,7 @@ func (ifq *ItemFieldQuery) querySpec() *sqlgraph.QuerySpec {
func (ifq *ItemFieldQuery) sqlQuery(ctx context.Context) *sql.Selector { func (ifq *ItemFieldQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(ifq.driver.Dialect()) builder := sql.Dialect(ifq.driver.Dialect())
t1 := builder.Table(itemfield.Table) t1 := builder.Table(itemfield.Table)
columns := ifq.fields columns := ifq.ctx.Fields
if len(columns) == 0 { if len(columns) == 0 {
columns = itemfield.Columns columns = itemfield.Columns
} }
@ -511,7 +509,7 @@ func (ifq *ItemFieldQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = ifq.sql selector = ifq.sql
selector.Select(selector.Columns(columns...)...) selector.Select(selector.Columns(columns...)...)
} }
if ifq.unique != nil && *ifq.unique { if ifq.ctx.Unique != nil && *ifq.ctx.Unique {
selector.Distinct() selector.Distinct()
} }
for _, p := range ifq.predicates { for _, p := range ifq.predicates {
@ -520,12 +518,12 @@ func (ifq *ItemFieldQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range ifq.order { for _, p := range ifq.order {
p(selector) p(selector)
} }
if offset := ifq.offset; offset != nil { if offset := ifq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start // limit is mandatory for offset clause. We start
// with default value, and override it below if needed. // with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32) selector.Offset(*offset).Limit(math.MaxInt32)
} }
if limit := ifq.limit; limit != nil { if limit := ifq.ctx.Limit; limit != nil {
selector.Limit(*limit) selector.Limit(*limit)
} }
return selector return selector
@ -545,7 +543,7 @@ func (ifgb *ItemFieldGroupBy) Aggregate(fns ...AggregateFunc) *ItemFieldGroupBy
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (ifgb *ItemFieldGroupBy) Scan(ctx context.Context, v any) error { func (ifgb *ItemFieldGroupBy) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeItemField, "GroupBy") ctx = setContextOp(ctx, ifgb.build.ctx, "GroupBy")
if err := ifgb.build.prepareQuery(ctx); err != nil { if err := ifgb.build.prepareQuery(ctx); err != nil {
return err return err
} }
@ -593,7 +591,7 @@ func (ifs *ItemFieldSelect) Aggregate(fns ...AggregateFunc) *ItemFieldSelect {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (ifs *ItemFieldSelect) Scan(ctx context.Context, v any) error { func (ifs *ItemFieldSelect) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeItemField, "Select") ctx = setContextOp(ctx, ifs.ctx, "Select")
if err := ifs.prepareQuery(ctx); err != nil { if err := ifs.prepareQuery(ctx); err != nil {
return err return err
} }

View file

@ -145,19 +145,19 @@ func (l *Label) assignValues(columns []string, values []any) error {
// QueryGroup queries the "group" edge of the Label entity. // QueryGroup queries the "group" edge of the Label entity.
func (l *Label) QueryGroup() *GroupQuery { func (l *Label) QueryGroup() *GroupQuery {
return (&LabelClient{config: l.config}).QueryGroup(l) return NewLabelClient(l.config).QueryGroup(l)
} }
// QueryItems queries the "items" edge of the Label entity. // QueryItems queries the "items" edge of the Label entity.
func (l *Label) QueryItems() *ItemQuery { func (l *Label) QueryItems() *ItemQuery {
return (&LabelClient{config: l.config}).QueryItems(l) return NewLabelClient(l.config).QueryItems(l)
} }
// Update returns a builder for updating this Label. // Update returns a builder for updating this Label.
// Note that you need to call Label.Unwrap() before calling this method if this Label // Note that you need to call Label.Unwrap() before calling this method if this Label
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (l *Label) Update() *LabelUpdateOne { func (l *Label) Update() *LabelUpdateOne {
return (&LabelClient{config: l.config}).UpdateOne(l) return NewLabelClient(l.config).UpdateOne(l)
} }
// Unwrap unwraps the Label entity that was returned from a transaction after it was closed, // Unwrap unwraps the Label entity that was returned from a transaction after it was closed,

View file

@ -69,6 +69,12 @@ type LabelDeleteOne struct {
ld *LabelDelete ld *LabelDelete
} }
// Where appends a list predicates to the LabelDelete builder.
func (ldo *LabelDeleteOne) Where(ps ...predicate.Label) *LabelDeleteOne {
ldo.ld.mutation.Where(ps...)
return ldo
}
// Exec executes the deletion query. // Exec executes the deletion query.
func (ldo *LabelDeleteOne) Exec(ctx context.Context) error { func (ldo *LabelDeleteOne) Exec(ctx context.Context) error {
n, err := ldo.ld.Exec(ctx) n, err := ldo.ld.Exec(ctx)
@ -84,5 +90,7 @@ func (ldo *LabelDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (ldo *LabelDeleteOne) ExecX(ctx context.Context) { func (ldo *LabelDeleteOne) ExecX(ctx context.Context) {
ldo.ld.ExecX(ctx) if err := ldo.Exec(ctx); err != nil {
panic(err)
}
} }

View file

@ -21,11 +21,8 @@ import (
// LabelQuery is the builder for querying Label entities. // LabelQuery is the builder for querying Label entities.
type LabelQuery struct { type LabelQuery struct {
config config
limit *int ctx *QueryContext
offset *int
unique *bool
order []OrderFunc order []OrderFunc
fields []string
inters []Interceptor inters []Interceptor
predicates []predicate.Label predicates []predicate.Label
withGroup *GroupQuery withGroup *GroupQuery
@ -44,20 +41,20 @@ func (lq *LabelQuery) Where(ps ...predicate.Label) *LabelQuery {
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (lq *LabelQuery) Limit(limit int) *LabelQuery { func (lq *LabelQuery) Limit(limit int) *LabelQuery {
lq.limit = &limit lq.ctx.Limit = &limit
return lq return lq
} }
// Offset to start from. // Offset to start from.
func (lq *LabelQuery) Offset(offset int) *LabelQuery { func (lq *LabelQuery) Offset(offset int) *LabelQuery {
lq.offset = &offset lq.ctx.Offset = &offset
return lq return lq
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (lq *LabelQuery) Unique(unique bool) *LabelQuery { func (lq *LabelQuery) Unique(unique bool) *LabelQuery {
lq.unique = &unique lq.ctx.Unique = &unique
return lq return lq
} }
@ -114,7 +111,7 @@ func (lq *LabelQuery) QueryItems() *ItemQuery {
// First returns the first Label entity from the query. // First returns the first Label entity from the query.
// Returns a *NotFoundError when no Label was found. // Returns a *NotFoundError when no Label was found.
func (lq *LabelQuery) First(ctx context.Context) (*Label, error) { func (lq *LabelQuery) First(ctx context.Context) (*Label, error) {
nodes, err := lq.Limit(1).All(newQueryContext(ctx, TypeLabel, "First")) nodes, err := lq.Limit(1).All(setContextOp(ctx, lq.ctx, "First"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -137,7 +134,7 @@ func (lq *LabelQuery) FirstX(ctx context.Context) *Label {
// Returns a *NotFoundError when no Label ID was found. // Returns a *NotFoundError when no Label ID was found.
func (lq *LabelQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { func (lq *LabelQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = lq.Limit(1).IDs(newQueryContext(ctx, TypeLabel, "FirstID")); err != nil { if ids, err = lq.Limit(1).IDs(setContextOp(ctx, lq.ctx, "FirstID")); err != nil {
return return
} }
if len(ids) == 0 { if len(ids) == 0 {
@ -160,7 +157,7 @@ func (lq *LabelQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one Label entity is found. // Returns a *NotSingularError when more than one Label entity is found.
// Returns a *NotFoundError when no Label entities are found. // Returns a *NotFoundError when no Label entities are found.
func (lq *LabelQuery) Only(ctx context.Context) (*Label, error) { func (lq *LabelQuery) Only(ctx context.Context) (*Label, error) {
nodes, err := lq.Limit(2).All(newQueryContext(ctx, TypeLabel, "Only")) nodes, err := lq.Limit(2).All(setContextOp(ctx, lq.ctx, "Only"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -188,7 +185,7 @@ func (lq *LabelQuery) OnlyX(ctx context.Context) *Label {
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (lq *LabelQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { func (lq *LabelQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = lq.Limit(2).IDs(newQueryContext(ctx, TypeLabel, "OnlyID")); err != nil { if ids, err = lq.Limit(2).IDs(setContextOp(ctx, lq.ctx, "OnlyID")); err != nil {
return return
} }
switch len(ids) { switch len(ids) {
@ -213,7 +210,7 @@ func (lq *LabelQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Labels. // All executes the query and returns a list of Labels.
func (lq *LabelQuery) All(ctx context.Context) ([]*Label, error) { func (lq *LabelQuery) All(ctx context.Context) ([]*Label, error) {
ctx = newQueryContext(ctx, TypeLabel, "All") ctx = setContextOp(ctx, lq.ctx, "All")
if err := lq.prepareQuery(ctx); err != nil { if err := lq.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
@ -233,7 +230,7 @@ func (lq *LabelQuery) AllX(ctx context.Context) []*Label {
// IDs executes the query and returns a list of Label IDs. // IDs executes the query and returns a list of Label IDs.
func (lq *LabelQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { func (lq *LabelQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
var ids []uuid.UUID var ids []uuid.UUID
ctx = newQueryContext(ctx, TypeLabel, "IDs") ctx = setContextOp(ctx, lq.ctx, "IDs")
if err := lq.Select(label.FieldID).Scan(ctx, &ids); err != nil { if err := lq.Select(label.FieldID).Scan(ctx, &ids); err != nil {
return nil, err return nil, err
} }
@ -251,7 +248,7 @@ func (lq *LabelQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query. // Count returns the count of the given query.
func (lq *LabelQuery) Count(ctx context.Context) (int, error) { func (lq *LabelQuery) Count(ctx context.Context) (int, error) {
ctx = newQueryContext(ctx, TypeLabel, "Count") ctx = setContextOp(ctx, lq.ctx, "Count")
if err := lq.prepareQuery(ctx); err != nil { if err := lq.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
@ -269,7 +266,7 @@ func (lq *LabelQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (lq *LabelQuery) Exist(ctx context.Context) (bool, error) { func (lq *LabelQuery) Exist(ctx context.Context) (bool, error) {
ctx = newQueryContext(ctx, TypeLabel, "Exist") ctx = setContextOp(ctx, lq.ctx, "Exist")
switch _, err := lq.FirstID(ctx); { switch _, err := lq.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
return false, nil return false, nil
@ -297,8 +294,7 @@ func (lq *LabelQuery) Clone() *LabelQuery {
} }
return &LabelQuery{ return &LabelQuery{
config: lq.config, config: lq.config,
limit: lq.limit, ctx: lq.ctx.Clone(),
offset: lq.offset,
order: append([]OrderFunc{}, lq.order...), order: append([]OrderFunc{}, lq.order...),
inters: append([]Interceptor{}, lq.inters...), inters: append([]Interceptor{}, lq.inters...),
predicates: append([]predicate.Label{}, lq.predicates...), predicates: append([]predicate.Label{}, lq.predicates...),
@ -307,7 +303,6 @@ func (lq *LabelQuery) Clone() *LabelQuery {
// clone intermediate query. // clone intermediate query.
sql: lq.sql.Clone(), sql: lq.sql.Clone(),
path: lq.path, path: lq.path,
unique: lq.unique,
} }
} }
@ -348,9 +343,9 @@ func (lq *LabelQuery) WithItems(opts ...func(*ItemQuery)) *LabelQuery {
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (lq *LabelQuery) GroupBy(field string, fields ...string) *LabelGroupBy { func (lq *LabelQuery) GroupBy(field string, fields ...string) *LabelGroupBy {
lq.fields = append([]string{field}, fields...) lq.ctx.Fields = append([]string{field}, fields...)
grbuild := &LabelGroupBy{build: lq} grbuild := &LabelGroupBy{build: lq}
grbuild.flds = &lq.fields grbuild.flds = &lq.ctx.Fields
grbuild.label = label.Label grbuild.label = label.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
return grbuild return grbuild
@ -369,10 +364,10 @@ func (lq *LabelQuery) GroupBy(field string, fields ...string) *LabelGroupBy {
// Select(label.FieldCreatedAt). // Select(label.FieldCreatedAt).
// Scan(ctx, &v) // Scan(ctx, &v)
func (lq *LabelQuery) Select(fields ...string) *LabelSelect { func (lq *LabelQuery) Select(fields ...string) *LabelSelect {
lq.fields = append(lq.fields, fields...) lq.ctx.Fields = append(lq.ctx.Fields, fields...)
sbuild := &LabelSelect{LabelQuery: lq} sbuild := &LabelSelect{LabelQuery: lq}
sbuild.label = label.Label sbuild.label = label.Label
sbuild.flds, sbuild.scan = &lq.fields, sbuild.Scan sbuild.flds, sbuild.scan = &lq.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
@ -392,7 +387,7 @@ func (lq *LabelQuery) prepareQuery(ctx context.Context) error {
} }
} }
} }
for _, f := range lq.fields { for _, f := range lq.ctx.Fields {
if !label.ValidColumn(f) { if !label.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
} }
@ -470,6 +465,9 @@ func (lq *LabelQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(group.IDIn(ids...)) query.Where(group.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -509,7 +507,8 @@ func (lq *LabelQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*
if err := query.prepareQuery(ctx); err != nil { if err := query.prepareQuery(ctx); err != nil {
return err return err
} }
neighbors, err := query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) {
assign := spec.Assign assign := spec.Assign
values := spec.ScanValues values := spec.ScanValues
spec.ScanValues = func(columns []string) ([]any, error) { spec.ScanValues = func(columns []string) ([]any, error) {
@ -530,6 +529,8 @@ func (lq *LabelQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*
return nil return nil
} }
}) })
})
neighbors, err := withInterceptors[[]*Item](ctx, query, qr, query.inters)
if err != nil { if err != nil {
return err return err
} }
@ -547,9 +548,9 @@ func (lq *LabelQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*
func (lq *LabelQuery) sqlCount(ctx context.Context) (int, error) { func (lq *LabelQuery) sqlCount(ctx context.Context) (int, error) {
_spec := lq.querySpec() _spec := lq.querySpec()
_spec.Node.Columns = lq.fields _spec.Node.Columns = lq.ctx.Fields
if len(lq.fields) > 0 { if len(lq.ctx.Fields) > 0 {
_spec.Unique = lq.unique != nil && *lq.unique _spec.Unique = lq.ctx.Unique != nil && *lq.ctx.Unique
} }
return sqlgraph.CountNodes(ctx, lq.driver, _spec) return sqlgraph.CountNodes(ctx, lq.driver, _spec)
} }
@ -567,10 +568,10 @@ func (lq *LabelQuery) querySpec() *sqlgraph.QuerySpec {
From: lq.sql, From: lq.sql,
Unique: true, Unique: true,
} }
if unique := lq.unique; unique != nil { if unique := lq.ctx.Unique; unique != nil {
_spec.Unique = *unique _spec.Unique = *unique
} }
if fields := lq.fields; len(fields) > 0 { if fields := lq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, label.FieldID) _spec.Node.Columns = append(_spec.Node.Columns, label.FieldID)
for i := range fields { for i := range fields {
@ -586,10 +587,10 @@ func (lq *LabelQuery) querySpec() *sqlgraph.QuerySpec {
} }
} }
} }
if limit := lq.limit; limit != nil { if limit := lq.ctx.Limit; limit != nil {
_spec.Limit = *limit _spec.Limit = *limit
} }
if offset := lq.offset; offset != nil { if offset := lq.ctx.Offset; offset != nil {
_spec.Offset = *offset _spec.Offset = *offset
} }
if ps := lq.order; len(ps) > 0 { if ps := lq.order; len(ps) > 0 {
@ -605,7 +606,7 @@ func (lq *LabelQuery) querySpec() *sqlgraph.QuerySpec {
func (lq *LabelQuery) sqlQuery(ctx context.Context) *sql.Selector { func (lq *LabelQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(lq.driver.Dialect()) builder := sql.Dialect(lq.driver.Dialect())
t1 := builder.Table(label.Table) t1 := builder.Table(label.Table)
columns := lq.fields columns := lq.ctx.Fields
if len(columns) == 0 { if len(columns) == 0 {
columns = label.Columns columns = label.Columns
} }
@ -614,7 +615,7 @@ func (lq *LabelQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = lq.sql selector = lq.sql
selector.Select(selector.Columns(columns...)...) selector.Select(selector.Columns(columns...)...)
} }
if lq.unique != nil && *lq.unique { if lq.ctx.Unique != nil && *lq.ctx.Unique {
selector.Distinct() selector.Distinct()
} }
for _, p := range lq.predicates { for _, p := range lq.predicates {
@ -623,12 +624,12 @@ func (lq *LabelQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range lq.order { for _, p := range lq.order {
p(selector) p(selector)
} }
if offset := lq.offset; offset != nil { if offset := lq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start // limit is mandatory for offset clause. We start
// with default value, and override it below if needed. // with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32) selector.Offset(*offset).Limit(math.MaxInt32)
} }
if limit := lq.limit; limit != nil { if limit := lq.ctx.Limit; limit != nil {
selector.Limit(*limit) selector.Limit(*limit)
} }
return selector return selector
@ -648,7 +649,7 @@ func (lgb *LabelGroupBy) Aggregate(fns ...AggregateFunc) *LabelGroupBy {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (lgb *LabelGroupBy) Scan(ctx context.Context, v any) error { func (lgb *LabelGroupBy) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeLabel, "GroupBy") ctx = setContextOp(ctx, lgb.build.ctx, "GroupBy")
if err := lgb.build.prepareQuery(ctx); err != nil { if err := lgb.build.prepareQuery(ctx); err != nil {
return err return err
} }
@ -696,7 +697,7 @@ func (ls *LabelSelect) Aggregate(fns ...AggregateFunc) *LabelSelect {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (ls *LabelSelect) Scan(ctx context.Context, v any) error { func (ls *LabelSelect) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeLabel, "Select") ctx = setContextOp(ctx, ls.ctx, "Select")
if err := ls.prepareQuery(ctx); err != nil { if err := ls.prepareQuery(ctx); err != nil {
return err return err
} }

View file

@ -173,29 +173,29 @@ func (l *Location) assignValues(columns []string, values []any) error {
// QueryParent queries the "parent" edge of the Location entity. // QueryParent queries the "parent" edge of the Location entity.
func (l *Location) QueryParent() *LocationQuery { func (l *Location) QueryParent() *LocationQuery {
return (&LocationClient{config: l.config}).QueryParent(l) return NewLocationClient(l.config).QueryParent(l)
} }
// QueryChildren queries the "children" edge of the Location entity. // QueryChildren queries the "children" edge of the Location entity.
func (l *Location) QueryChildren() *LocationQuery { func (l *Location) QueryChildren() *LocationQuery {
return (&LocationClient{config: l.config}).QueryChildren(l) return NewLocationClient(l.config).QueryChildren(l)
} }
// QueryGroup queries the "group" edge of the Location entity. // QueryGroup queries the "group" edge of the Location entity.
func (l *Location) QueryGroup() *GroupQuery { func (l *Location) QueryGroup() *GroupQuery {
return (&LocationClient{config: l.config}).QueryGroup(l) return NewLocationClient(l.config).QueryGroup(l)
} }
// QueryItems queries the "items" edge of the Location entity. // QueryItems queries the "items" edge of the Location entity.
func (l *Location) QueryItems() *ItemQuery { func (l *Location) QueryItems() *ItemQuery {
return (&LocationClient{config: l.config}).QueryItems(l) return NewLocationClient(l.config).QueryItems(l)
} }
// Update returns a builder for updating this Location. // Update returns a builder for updating this Location.
// Note that you need to call Location.Unwrap() before calling this method if this Location // Note that you need to call Location.Unwrap() before calling this method if this Location
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (l *Location) Update() *LocationUpdateOne { func (l *Location) Update() *LocationUpdateOne {
return (&LocationClient{config: l.config}).UpdateOne(l) return NewLocationClient(l.config).UpdateOne(l)
} }
// Unwrap unwraps the Location entity that was returned from a transaction after it was closed, // Unwrap unwraps the Location entity that was returned from a transaction after it was closed,

View file

@ -69,6 +69,12 @@ type LocationDeleteOne struct {
ld *LocationDelete ld *LocationDelete
} }
// Where appends a list predicates to the LocationDelete builder.
func (ldo *LocationDeleteOne) Where(ps ...predicate.Location) *LocationDeleteOne {
ldo.ld.mutation.Where(ps...)
return ldo
}
// Exec executes the deletion query. // Exec executes the deletion query.
func (ldo *LocationDeleteOne) Exec(ctx context.Context) error { func (ldo *LocationDeleteOne) Exec(ctx context.Context) error {
n, err := ldo.ld.Exec(ctx) n, err := ldo.ld.Exec(ctx)
@ -84,5 +90,7 @@ func (ldo *LocationDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (ldo *LocationDeleteOne) ExecX(ctx context.Context) { func (ldo *LocationDeleteOne) ExecX(ctx context.Context) {
ldo.ld.ExecX(ctx) if err := ldo.Exec(ctx); err != nil {
panic(err)
}
} }

View file

@ -21,11 +21,8 @@ import (
// LocationQuery is the builder for querying Location entities. // LocationQuery is the builder for querying Location entities.
type LocationQuery struct { type LocationQuery struct {
config config
limit *int ctx *QueryContext
offset *int
unique *bool
order []OrderFunc order []OrderFunc
fields []string
inters []Interceptor inters []Interceptor
predicates []predicate.Location predicates []predicate.Location
withParent *LocationQuery withParent *LocationQuery
@ -46,20 +43,20 @@ func (lq *LocationQuery) Where(ps ...predicate.Location) *LocationQuery {
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (lq *LocationQuery) Limit(limit int) *LocationQuery { func (lq *LocationQuery) Limit(limit int) *LocationQuery {
lq.limit = &limit lq.ctx.Limit = &limit
return lq return lq
} }
// Offset to start from. // Offset to start from.
func (lq *LocationQuery) Offset(offset int) *LocationQuery { func (lq *LocationQuery) Offset(offset int) *LocationQuery {
lq.offset = &offset lq.ctx.Offset = &offset
return lq return lq
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (lq *LocationQuery) Unique(unique bool) *LocationQuery { func (lq *LocationQuery) Unique(unique bool) *LocationQuery {
lq.unique = &unique lq.ctx.Unique = &unique
return lq return lq
} }
@ -160,7 +157,7 @@ func (lq *LocationQuery) QueryItems() *ItemQuery {
// First returns the first Location entity from the query. // First returns the first Location entity from the query.
// Returns a *NotFoundError when no Location was found. // Returns a *NotFoundError when no Location was found.
func (lq *LocationQuery) First(ctx context.Context) (*Location, error) { func (lq *LocationQuery) First(ctx context.Context) (*Location, error) {
nodes, err := lq.Limit(1).All(newQueryContext(ctx, TypeLocation, "First")) nodes, err := lq.Limit(1).All(setContextOp(ctx, lq.ctx, "First"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -183,7 +180,7 @@ func (lq *LocationQuery) FirstX(ctx context.Context) *Location {
// Returns a *NotFoundError when no Location ID was found. // Returns a *NotFoundError when no Location ID was found.
func (lq *LocationQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { func (lq *LocationQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = lq.Limit(1).IDs(newQueryContext(ctx, TypeLocation, "FirstID")); err != nil { if ids, err = lq.Limit(1).IDs(setContextOp(ctx, lq.ctx, "FirstID")); err != nil {
return return
} }
if len(ids) == 0 { if len(ids) == 0 {
@ -206,7 +203,7 @@ func (lq *LocationQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one Location entity is found. // Returns a *NotSingularError when more than one Location entity is found.
// Returns a *NotFoundError when no Location entities are found. // Returns a *NotFoundError when no Location entities are found.
func (lq *LocationQuery) Only(ctx context.Context) (*Location, error) { func (lq *LocationQuery) Only(ctx context.Context) (*Location, error) {
nodes, err := lq.Limit(2).All(newQueryContext(ctx, TypeLocation, "Only")) nodes, err := lq.Limit(2).All(setContextOp(ctx, lq.ctx, "Only"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -234,7 +231,7 @@ func (lq *LocationQuery) OnlyX(ctx context.Context) *Location {
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (lq *LocationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { func (lq *LocationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = lq.Limit(2).IDs(newQueryContext(ctx, TypeLocation, "OnlyID")); err != nil { if ids, err = lq.Limit(2).IDs(setContextOp(ctx, lq.ctx, "OnlyID")); err != nil {
return return
} }
switch len(ids) { switch len(ids) {
@ -259,7 +256,7 @@ func (lq *LocationQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Locations. // All executes the query and returns a list of Locations.
func (lq *LocationQuery) All(ctx context.Context) ([]*Location, error) { func (lq *LocationQuery) All(ctx context.Context) ([]*Location, error) {
ctx = newQueryContext(ctx, TypeLocation, "All") ctx = setContextOp(ctx, lq.ctx, "All")
if err := lq.prepareQuery(ctx); err != nil { if err := lq.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
@ -279,7 +276,7 @@ func (lq *LocationQuery) AllX(ctx context.Context) []*Location {
// IDs executes the query and returns a list of Location IDs. // IDs executes the query and returns a list of Location IDs.
func (lq *LocationQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { func (lq *LocationQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
var ids []uuid.UUID var ids []uuid.UUID
ctx = newQueryContext(ctx, TypeLocation, "IDs") ctx = setContextOp(ctx, lq.ctx, "IDs")
if err := lq.Select(location.FieldID).Scan(ctx, &ids); err != nil { if err := lq.Select(location.FieldID).Scan(ctx, &ids); err != nil {
return nil, err return nil, err
} }
@ -297,7 +294,7 @@ func (lq *LocationQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query. // Count returns the count of the given query.
func (lq *LocationQuery) Count(ctx context.Context) (int, error) { func (lq *LocationQuery) Count(ctx context.Context) (int, error) {
ctx = newQueryContext(ctx, TypeLocation, "Count") ctx = setContextOp(ctx, lq.ctx, "Count")
if err := lq.prepareQuery(ctx); err != nil { if err := lq.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
@ -315,7 +312,7 @@ func (lq *LocationQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (lq *LocationQuery) Exist(ctx context.Context) (bool, error) { func (lq *LocationQuery) Exist(ctx context.Context) (bool, error) {
ctx = newQueryContext(ctx, TypeLocation, "Exist") ctx = setContextOp(ctx, lq.ctx, "Exist")
switch _, err := lq.FirstID(ctx); { switch _, err := lq.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
return false, nil return false, nil
@ -343,8 +340,7 @@ func (lq *LocationQuery) Clone() *LocationQuery {
} }
return &LocationQuery{ return &LocationQuery{
config: lq.config, config: lq.config,
limit: lq.limit, ctx: lq.ctx.Clone(),
offset: lq.offset,
order: append([]OrderFunc{}, lq.order...), order: append([]OrderFunc{}, lq.order...),
inters: append([]Interceptor{}, lq.inters...), inters: append([]Interceptor{}, lq.inters...),
predicates: append([]predicate.Location{}, lq.predicates...), predicates: append([]predicate.Location{}, lq.predicates...),
@ -355,7 +351,6 @@ func (lq *LocationQuery) Clone() *LocationQuery {
// clone intermediate query. // clone intermediate query.
sql: lq.sql.Clone(), sql: lq.sql.Clone(),
path: lq.path, path: lq.path,
unique: lq.unique,
} }
} }
@ -418,9 +413,9 @@ func (lq *LocationQuery) WithItems(opts ...func(*ItemQuery)) *LocationQuery {
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (lq *LocationQuery) GroupBy(field string, fields ...string) *LocationGroupBy { func (lq *LocationQuery) GroupBy(field string, fields ...string) *LocationGroupBy {
lq.fields = append([]string{field}, fields...) lq.ctx.Fields = append([]string{field}, fields...)
grbuild := &LocationGroupBy{build: lq} grbuild := &LocationGroupBy{build: lq}
grbuild.flds = &lq.fields grbuild.flds = &lq.ctx.Fields
grbuild.label = location.Label grbuild.label = location.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
return grbuild return grbuild
@ -439,10 +434,10 @@ func (lq *LocationQuery) GroupBy(field string, fields ...string) *LocationGroupB
// Select(location.FieldCreatedAt). // Select(location.FieldCreatedAt).
// Scan(ctx, &v) // Scan(ctx, &v)
func (lq *LocationQuery) Select(fields ...string) *LocationSelect { func (lq *LocationQuery) Select(fields ...string) *LocationSelect {
lq.fields = append(lq.fields, fields...) lq.ctx.Fields = append(lq.ctx.Fields, fields...)
sbuild := &LocationSelect{LocationQuery: lq} sbuild := &LocationSelect{LocationQuery: lq}
sbuild.label = location.Label sbuild.label = location.Label
sbuild.flds, sbuild.scan = &lq.fields, sbuild.Scan sbuild.flds, sbuild.scan = &lq.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
@ -462,7 +457,7 @@ func (lq *LocationQuery) prepareQuery(ctx context.Context) error {
} }
} }
} }
for _, f := range lq.fields { for _, f := range lq.ctx.Fields {
if !location.ValidColumn(f) { if !location.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
} }
@ -555,6 +550,9 @@ func (lq *LocationQuery) loadParent(ctx context.Context, query *LocationQuery, n
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(location.IDIn(ids...)) query.Where(location.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -615,6 +613,9 @@ func (lq *LocationQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(group.IDIn(ids...)) query.Where(group.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -665,9 +666,9 @@ func (lq *LocationQuery) loadItems(ctx context.Context, query *ItemQuery, nodes
func (lq *LocationQuery) sqlCount(ctx context.Context) (int, error) { func (lq *LocationQuery) sqlCount(ctx context.Context) (int, error) {
_spec := lq.querySpec() _spec := lq.querySpec()
_spec.Node.Columns = lq.fields _spec.Node.Columns = lq.ctx.Fields
if len(lq.fields) > 0 { if len(lq.ctx.Fields) > 0 {
_spec.Unique = lq.unique != nil && *lq.unique _spec.Unique = lq.ctx.Unique != nil && *lq.ctx.Unique
} }
return sqlgraph.CountNodes(ctx, lq.driver, _spec) return sqlgraph.CountNodes(ctx, lq.driver, _spec)
} }
@ -685,10 +686,10 @@ func (lq *LocationQuery) querySpec() *sqlgraph.QuerySpec {
From: lq.sql, From: lq.sql,
Unique: true, Unique: true,
} }
if unique := lq.unique; unique != nil { if unique := lq.ctx.Unique; unique != nil {
_spec.Unique = *unique _spec.Unique = *unique
} }
if fields := lq.fields; len(fields) > 0 { if fields := lq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, location.FieldID) _spec.Node.Columns = append(_spec.Node.Columns, location.FieldID)
for i := range fields { for i := range fields {
@ -704,10 +705,10 @@ func (lq *LocationQuery) querySpec() *sqlgraph.QuerySpec {
} }
} }
} }
if limit := lq.limit; limit != nil { if limit := lq.ctx.Limit; limit != nil {
_spec.Limit = *limit _spec.Limit = *limit
} }
if offset := lq.offset; offset != nil { if offset := lq.ctx.Offset; offset != nil {
_spec.Offset = *offset _spec.Offset = *offset
} }
if ps := lq.order; len(ps) > 0 { if ps := lq.order; len(ps) > 0 {
@ -723,7 +724,7 @@ func (lq *LocationQuery) querySpec() *sqlgraph.QuerySpec {
func (lq *LocationQuery) sqlQuery(ctx context.Context) *sql.Selector { func (lq *LocationQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(lq.driver.Dialect()) builder := sql.Dialect(lq.driver.Dialect())
t1 := builder.Table(location.Table) t1 := builder.Table(location.Table)
columns := lq.fields columns := lq.ctx.Fields
if len(columns) == 0 { if len(columns) == 0 {
columns = location.Columns columns = location.Columns
} }
@ -732,7 +733,7 @@ func (lq *LocationQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = lq.sql selector = lq.sql
selector.Select(selector.Columns(columns...)...) selector.Select(selector.Columns(columns...)...)
} }
if lq.unique != nil && *lq.unique { if lq.ctx.Unique != nil && *lq.ctx.Unique {
selector.Distinct() selector.Distinct()
} }
for _, p := range lq.predicates { for _, p := range lq.predicates {
@ -741,12 +742,12 @@ func (lq *LocationQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range lq.order { for _, p := range lq.order {
p(selector) p(selector)
} }
if offset := lq.offset; offset != nil { if offset := lq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start // limit is mandatory for offset clause. We start
// with default value, and override it below if needed. // with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32) selector.Offset(*offset).Limit(math.MaxInt32)
} }
if limit := lq.limit; limit != nil { if limit := lq.ctx.Limit; limit != nil {
selector.Limit(*limit) selector.Limit(*limit)
} }
return selector return selector
@ -766,7 +767,7 @@ func (lgb *LocationGroupBy) Aggregate(fns ...AggregateFunc) *LocationGroupBy {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (lgb *LocationGroupBy) Scan(ctx context.Context, v any) error { func (lgb *LocationGroupBy) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeLocation, "GroupBy") ctx = setContextOp(ctx, lgb.build.ctx, "GroupBy")
if err := lgb.build.prepareQuery(ctx); err != nil { if err := lgb.build.prepareQuery(ctx); err != nil {
return err return err
} }
@ -814,7 +815,7 @@ func (ls *LocationSelect) Aggregate(fns ...AggregateFunc) *LocationSelect {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (ls *LocationSelect) Scan(ctx context.Context, v any) error { func (ls *LocationSelect) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeLocation, "Select") ctx = setContextOp(ctx, ls.ctx, "Select")
if err := ls.prepareQuery(ctx); err != nil { if err := ls.prepareQuery(ctx); err != nil {
return err return err
} }

View file

@ -142,14 +142,14 @@ func (me *MaintenanceEntry) assignValues(columns []string, values []any) error {
// QueryItem queries the "item" edge of the MaintenanceEntry entity. // QueryItem queries the "item" edge of the MaintenanceEntry entity.
func (me *MaintenanceEntry) QueryItem() *ItemQuery { func (me *MaintenanceEntry) QueryItem() *ItemQuery {
return (&MaintenanceEntryClient{config: me.config}).QueryItem(me) return NewMaintenanceEntryClient(me.config).QueryItem(me)
} }
// Update returns a builder for updating this MaintenanceEntry. // Update returns a builder for updating this MaintenanceEntry.
// Note that you need to call MaintenanceEntry.Unwrap() before calling this method if this MaintenanceEntry // Note that you need to call MaintenanceEntry.Unwrap() before calling this method if this MaintenanceEntry
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (me *MaintenanceEntry) Update() *MaintenanceEntryUpdateOne { func (me *MaintenanceEntry) Update() *MaintenanceEntryUpdateOne {
return (&MaintenanceEntryClient{config: me.config}).UpdateOne(me) return NewMaintenanceEntryClient(me.config).UpdateOne(me)
} }
// Unwrap unwraps the MaintenanceEntry entity that was returned from a transaction after it was closed, // Unwrap unwraps the MaintenanceEntry entity that was returned from a transaction after it was closed,

View file

@ -69,6 +69,12 @@ type MaintenanceEntryDeleteOne struct {
med *MaintenanceEntryDelete med *MaintenanceEntryDelete
} }
// Where appends a list predicates to the MaintenanceEntryDelete builder.
func (medo *MaintenanceEntryDeleteOne) Where(ps ...predicate.MaintenanceEntry) *MaintenanceEntryDeleteOne {
medo.med.mutation.Where(ps...)
return medo
}
// Exec executes the deletion query. // Exec executes the deletion query.
func (medo *MaintenanceEntryDeleteOne) Exec(ctx context.Context) error { func (medo *MaintenanceEntryDeleteOne) Exec(ctx context.Context) error {
n, err := medo.med.Exec(ctx) n, err := medo.med.Exec(ctx)
@ -84,5 +90,7 @@ func (medo *MaintenanceEntryDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (medo *MaintenanceEntryDeleteOne) ExecX(ctx context.Context) { func (medo *MaintenanceEntryDeleteOne) ExecX(ctx context.Context) {
medo.med.ExecX(ctx) if err := medo.Exec(ctx); err != nil {
panic(err)
}
} }

View file

@ -19,11 +19,8 @@ import (
// MaintenanceEntryQuery is the builder for querying MaintenanceEntry entities. // MaintenanceEntryQuery is the builder for querying MaintenanceEntry entities.
type MaintenanceEntryQuery struct { type MaintenanceEntryQuery struct {
config config
limit *int ctx *QueryContext
offset *int
unique *bool
order []OrderFunc order []OrderFunc
fields []string
inters []Interceptor inters []Interceptor
predicates []predicate.MaintenanceEntry predicates []predicate.MaintenanceEntry
withItem *ItemQuery withItem *ItemQuery
@ -40,20 +37,20 @@ func (meq *MaintenanceEntryQuery) Where(ps ...predicate.MaintenanceEntry) *Maint
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (meq *MaintenanceEntryQuery) Limit(limit int) *MaintenanceEntryQuery { func (meq *MaintenanceEntryQuery) Limit(limit int) *MaintenanceEntryQuery {
meq.limit = &limit meq.ctx.Limit = &limit
return meq return meq
} }
// Offset to start from. // Offset to start from.
func (meq *MaintenanceEntryQuery) Offset(offset int) *MaintenanceEntryQuery { func (meq *MaintenanceEntryQuery) Offset(offset int) *MaintenanceEntryQuery {
meq.offset = &offset meq.ctx.Offset = &offset
return meq return meq
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (meq *MaintenanceEntryQuery) Unique(unique bool) *MaintenanceEntryQuery { func (meq *MaintenanceEntryQuery) Unique(unique bool) *MaintenanceEntryQuery {
meq.unique = &unique meq.ctx.Unique = &unique
return meq return meq
} }
@ -88,7 +85,7 @@ func (meq *MaintenanceEntryQuery) QueryItem() *ItemQuery {
// First returns the first MaintenanceEntry entity from the query. // First returns the first MaintenanceEntry entity from the query.
// Returns a *NotFoundError when no MaintenanceEntry was found. // Returns a *NotFoundError when no MaintenanceEntry was found.
func (meq *MaintenanceEntryQuery) First(ctx context.Context) (*MaintenanceEntry, error) { func (meq *MaintenanceEntryQuery) First(ctx context.Context) (*MaintenanceEntry, error) {
nodes, err := meq.Limit(1).All(newQueryContext(ctx, TypeMaintenanceEntry, "First")) nodes, err := meq.Limit(1).All(setContextOp(ctx, meq.ctx, "First"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -111,7 +108,7 @@ func (meq *MaintenanceEntryQuery) FirstX(ctx context.Context) *MaintenanceEntry
// Returns a *NotFoundError when no MaintenanceEntry ID was found. // Returns a *NotFoundError when no MaintenanceEntry ID was found.
func (meq *MaintenanceEntryQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { func (meq *MaintenanceEntryQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = meq.Limit(1).IDs(newQueryContext(ctx, TypeMaintenanceEntry, "FirstID")); err != nil { if ids, err = meq.Limit(1).IDs(setContextOp(ctx, meq.ctx, "FirstID")); err != nil {
return return
} }
if len(ids) == 0 { if len(ids) == 0 {
@ -134,7 +131,7 @@ func (meq *MaintenanceEntryQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one MaintenanceEntry entity is found. // Returns a *NotSingularError when more than one MaintenanceEntry entity is found.
// Returns a *NotFoundError when no MaintenanceEntry entities are found. // Returns a *NotFoundError when no MaintenanceEntry entities are found.
func (meq *MaintenanceEntryQuery) Only(ctx context.Context) (*MaintenanceEntry, error) { func (meq *MaintenanceEntryQuery) Only(ctx context.Context) (*MaintenanceEntry, error) {
nodes, err := meq.Limit(2).All(newQueryContext(ctx, TypeMaintenanceEntry, "Only")) nodes, err := meq.Limit(2).All(setContextOp(ctx, meq.ctx, "Only"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -162,7 +159,7 @@ func (meq *MaintenanceEntryQuery) OnlyX(ctx context.Context) *MaintenanceEntry {
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (meq *MaintenanceEntryQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { func (meq *MaintenanceEntryQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = meq.Limit(2).IDs(newQueryContext(ctx, TypeMaintenanceEntry, "OnlyID")); err != nil { if ids, err = meq.Limit(2).IDs(setContextOp(ctx, meq.ctx, "OnlyID")); err != nil {
return return
} }
switch len(ids) { switch len(ids) {
@ -187,7 +184,7 @@ func (meq *MaintenanceEntryQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of MaintenanceEntries. // All executes the query and returns a list of MaintenanceEntries.
func (meq *MaintenanceEntryQuery) All(ctx context.Context) ([]*MaintenanceEntry, error) { func (meq *MaintenanceEntryQuery) All(ctx context.Context) ([]*MaintenanceEntry, error) {
ctx = newQueryContext(ctx, TypeMaintenanceEntry, "All") ctx = setContextOp(ctx, meq.ctx, "All")
if err := meq.prepareQuery(ctx); err != nil { if err := meq.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
@ -207,7 +204,7 @@ func (meq *MaintenanceEntryQuery) AllX(ctx context.Context) []*MaintenanceEntry
// IDs executes the query and returns a list of MaintenanceEntry IDs. // IDs executes the query and returns a list of MaintenanceEntry IDs.
func (meq *MaintenanceEntryQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { func (meq *MaintenanceEntryQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
var ids []uuid.UUID var ids []uuid.UUID
ctx = newQueryContext(ctx, TypeMaintenanceEntry, "IDs") ctx = setContextOp(ctx, meq.ctx, "IDs")
if err := meq.Select(maintenanceentry.FieldID).Scan(ctx, &ids); err != nil { if err := meq.Select(maintenanceentry.FieldID).Scan(ctx, &ids); err != nil {
return nil, err return nil, err
} }
@ -225,7 +222,7 @@ func (meq *MaintenanceEntryQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query. // Count returns the count of the given query.
func (meq *MaintenanceEntryQuery) Count(ctx context.Context) (int, error) { func (meq *MaintenanceEntryQuery) Count(ctx context.Context) (int, error) {
ctx = newQueryContext(ctx, TypeMaintenanceEntry, "Count") ctx = setContextOp(ctx, meq.ctx, "Count")
if err := meq.prepareQuery(ctx); err != nil { if err := meq.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
@ -243,7 +240,7 @@ func (meq *MaintenanceEntryQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (meq *MaintenanceEntryQuery) Exist(ctx context.Context) (bool, error) { func (meq *MaintenanceEntryQuery) Exist(ctx context.Context) (bool, error) {
ctx = newQueryContext(ctx, TypeMaintenanceEntry, "Exist") ctx = setContextOp(ctx, meq.ctx, "Exist")
switch _, err := meq.FirstID(ctx); { switch _, err := meq.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
return false, nil return false, nil
@ -271,8 +268,7 @@ func (meq *MaintenanceEntryQuery) Clone() *MaintenanceEntryQuery {
} }
return &MaintenanceEntryQuery{ return &MaintenanceEntryQuery{
config: meq.config, config: meq.config,
limit: meq.limit, ctx: meq.ctx.Clone(),
offset: meq.offset,
order: append([]OrderFunc{}, meq.order...), order: append([]OrderFunc{}, meq.order...),
inters: append([]Interceptor{}, meq.inters...), inters: append([]Interceptor{}, meq.inters...),
predicates: append([]predicate.MaintenanceEntry{}, meq.predicates...), predicates: append([]predicate.MaintenanceEntry{}, meq.predicates...),
@ -280,7 +276,6 @@ func (meq *MaintenanceEntryQuery) Clone() *MaintenanceEntryQuery {
// clone intermediate query. // clone intermediate query.
sql: meq.sql.Clone(), sql: meq.sql.Clone(),
path: meq.path, path: meq.path,
unique: meq.unique,
} }
} }
@ -310,9 +305,9 @@ func (meq *MaintenanceEntryQuery) WithItem(opts ...func(*ItemQuery)) *Maintenanc
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (meq *MaintenanceEntryQuery) GroupBy(field string, fields ...string) *MaintenanceEntryGroupBy { func (meq *MaintenanceEntryQuery) GroupBy(field string, fields ...string) *MaintenanceEntryGroupBy {
meq.fields = append([]string{field}, fields...) meq.ctx.Fields = append([]string{field}, fields...)
grbuild := &MaintenanceEntryGroupBy{build: meq} grbuild := &MaintenanceEntryGroupBy{build: meq}
grbuild.flds = &meq.fields grbuild.flds = &meq.ctx.Fields
grbuild.label = maintenanceentry.Label grbuild.label = maintenanceentry.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
return grbuild return grbuild
@ -331,10 +326,10 @@ func (meq *MaintenanceEntryQuery) GroupBy(field string, fields ...string) *Maint
// Select(maintenanceentry.FieldCreatedAt). // Select(maintenanceentry.FieldCreatedAt).
// Scan(ctx, &v) // Scan(ctx, &v)
func (meq *MaintenanceEntryQuery) Select(fields ...string) *MaintenanceEntrySelect { func (meq *MaintenanceEntryQuery) Select(fields ...string) *MaintenanceEntrySelect {
meq.fields = append(meq.fields, fields...) meq.ctx.Fields = append(meq.ctx.Fields, fields...)
sbuild := &MaintenanceEntrySelect{MaintenanceEntryQuery: meq} sbuild := &MaintenanceEntrySelect{MaintenanceEntryQuery: meq}
sbuild.label = maintenanceentry.Label sbuild.label = maintenanceentry.Label
sbuild.flds, sbuild.scan = &meq.fields, sbuild.Scan sbuild.flds, sbuild.scan = &meq.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
@ -354,7 +349,7 @@ func (meq *MaintenanceEntryQuery) prepareQuery(ctx context.Context) error {
} }
} }
} }
for _, f := range meq.fields { for _, f := range meq.ctx.Fields {
if !maintenanceentry.ValidColumn(f) { if !maintenanceentry.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
} }
@ -414,6 +409,9 @@ func (meq *MaintenanceEntryQuery) loadItem(ctx context.Context, query *ItemQuery
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(item.IDIn(ids...)) query.Where(item.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -433,9 +431,9 @@ func (meq *MaintenanceEntryQuery) loadItem(ctx context.Context, query *ItemQuery
func (meq *MaintenanceEntryQuery) sqlCount(ctx context.Context) (int, error) { func (meq *MaintenanceEntryQuery) sqlCount(ctx context.Context) (int, error) {
_spec := meq.querySpec() _spec := meq.querySpec()
_spec.Node.Columns = meq.fields _spec.Node.Columns = meq.ctx.Fields
if len(meq.fields) > 0 { if len(meq.ctx.Fields) > 0 {
_spec.Unique = meq.unique != nil && *meq.unique _spec.Unique = meq.ctx.Unique != nil && *meq.ctx.Unique
} }
return sqlgraph.CountNodes(ctx, meq.driver, _spec) return sqlgraph.CountNodes(ctx, meq.driver, _spec)
} }
@ -453,10 +451,10 @@ func (meq *MaintenanceEntryQuery) querySpec() *sqlgraph.QuerySpec {
From: meq.sql, From: meq.sql,
Unique: true, Unique: true,
} }
if unique := meq.unique; unique != nil { if unique := meq.ctx.Unique; unique != nil {
_spec.Unique = *unique _spec.Unique = *unique
} }
if fields := meq.fields; len(fields) > 0 { if fields := meq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, maintenanceentry.FieldID) _spec.Node.Columns = append(_spec.Node.Columns, maintenanceentry.FieldID)
for i := range fields { for i := range fields {
@ -472,10 +470,10 @@ func (meq *MaintenanceEntryQuery) querySpec() *sqlgraph.QuerySpec {
} }
} }
} }
if limit := meq.limit; limit != nil { if limit := meq.ctx.Limit; limit != nil {
_spec.Limit = *limit _spec.Limit = *limit
} }
if offset := meq.offset; offset != nil { if offset := meq.ctx.Offset; offset != nil {
_spec.Offset = *offset _spec.Offset = *offset
} }
if ps := meq.order; len(ps) > 0 { if ps := meq.order; len(ps) > 0 {
@ -491,7 +489,7 @@ func (meq *MaintenanceEntryQuery) querySpec() *sqlgraph.QuerySpec {
func (meq *MaintenanceEntryQuery) sqlQuery(ctx context.Context) *sql.Selector { func (meq *MaintenanceEntryQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(meq.driver.Dialect()) builder := sql.Dialect(meq.driver.Dialect())
t1 := builder.Table(maintenanceentry.Table) t1 := builder.Table(maintenanceentry.Table)
columns := meq.fields columns := meq.ctx.Fields
if len(columns) == 0 { if len(columns) == 0 {
columns = maintenanceentry.Columns columns = maintenanceentry.Columns
} }
@ -500,7 +498,7 @@ func (meq *MaintenanceEntryQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = meq.sql selector = meq.sql
selector.Select(selector.Columns(columns...)...) selector.Select(selector.Columns(columns...)...)
} }
if meq.unique != nil && *meq.unique { if meq.ctx.Unique != nil && *meq.ctx.Unique {
selector.Distinct() selector.Distinct()
} }
for _, p := range meq.predicates { for _, p := range meq.predicates {
@ -509,12 +507,12 @@ func (meq *MaintenanceEntryQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range meq.order { for _, p := range meq.order {
p(selector) p(selector)
} }
if offset := meq.offset; offset != nil { if offset := meq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start // limit is mandatory for offset clause. We start
// with default value, and override it below if needed. // with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32) selector.Offset(*offset).Limit(math.MaxInt32)
} }
if limit := meq.limit; limit != nil { if limit := meq.ctx.Limit; limit != nil {
selector.Limit(*limit) selector.Limit(*limit)
} }
return selector return selector
@ -534,7 +532,7 @@ func (megb *MaintenanceEntryGroupBy) Aggregate(fns ...AggregateFunc) *Maintenanc
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (megb *MaintenanceEntryGroupBy) Scan(ctx context.Context, v any) error { func (megb *MaintenanceEntryGroupBy) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeMaintenanceEntry, "GroupBy") ctx = setContextOp(ctx, megb.build.ctx, "GroupBy")
if err := megb.build.prepareQuery(ctx); err != nil { if err := megb.build.prepareQuery(ctx); err != nil {
return err return err
} }
@ -582,7 +580,7 @@ func (mes *MaintenanceEntrySelect) Aggregate(fns ...AggregateFunc) *MaintenanceE
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (mes *MaintenanceEntrySelect) Scan(ctx context.Context, v any) error { func (mes *MaintenanceEntrySelect) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeMaintenanceEntry, "Select") ctx = setContextOp(ctx, mes.ctx, "Select")
if err := mes.prepareQuery(ctx); err != nil { if err := mes.prepareQuery(ctx); err != nil {
return err return err
} }

View file

@ -5,6 +5,6 @@ package runtime
// The schema-stitching logic is generated in github.com/hay-kot/homebox/backend/internal/data/ent/runtime.go // The schema-stitching logic is generated in github.com/hay-kot/homebox/backend/internal/data/ent/runtime.go
const ( const (
Version = "v0.11.5" // Version of ent codegen. Version = "v0.11.7" // Version of ent codegen.
Sum = "h1:V2qhG91C4PMQTa82Q4StoESMQ4dzkMNeStCzszxi0jQ=" // Sum of ent codegen. Sum = "h1:V+wKFh0jhAbY/FoU+PPbdMOf2Ma5vh07R/IdF+N/nFg=" // Sum of ent codegen.
) )

View file

@ -179,19 +179,19 @@ func (u *User) assignValues(columns []string, values []any) error {
// QueryGroup queries the "group" edge of the User entity. // QueryGroup queries the "group" edge of the User entity.
func (u *User) QueryGroup() *GroupQuery { func (u *User) QueryGroup() *GroupQuery {
return (&UserClient{config: u.config}).QueryGroup(u) return NewUserClient(u.config).QueryGroup(u)
} }
// QueryAuthTokens queries the "auth_tokens" edge of the User entity. // QueryAuthTokens queries the "auth_tokens" edge of the User entity.
func (u *User) QueryAuthTokens() *AuthTokensQuery { func (u *User) QueryAuthTokens() *AuthTokensQuery {
return (&UserClient{config: u.config}).QueryAuthTokens(u) return NewUserClient(u.config).QueryAuthTokens(u)
} }
// Update returns a builder for updating this User. // Update returns a builder for updating this User.
// Note that you need to call User.Unwrap() before calling this method if this User // Note that you need to call User.Unwrap() before calling this method if this User
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (u *User) Update() *UserUpdateOne { func (u *User) Update() *UserUpdateOne {
return (&UserClient{config: u.config}).UpdateOne(u) return NewUserClient(u.config).UpdateOne(u)
} }
// Unwrap unwraps the User entity that was returned from a transaction after it was closed, // Unwrap unwraps the User entity that was returned from a transaction after it was closed,

View file

@ -69,6 +69,12 @@ type UserDeleteOne struct {
ud *UserDelete ud *UserDelete
} }
// Where appends a list predicates to the UserDelete builder.
func (udo *UserDeleteOne) Where(ps ...predicate.User) *UserDeleteOne {
udo.ud.mutation.Where(ps...)
return udo
}
// Exec executes the deletion query. // Exec executes the deletion query.
func (udo *UserDeleteOne) Exec(ctx context.Context) error { func (udo *UserDeleteOne) Exec(ctx context.Context) error {
n, err := udo.ud.Exec(ctx) n, err := udo.ud.Exec(ctx)
@ -84,5 +90,7 @@ func (udo *UserDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (udo *UserDeleteOne) ExecX(ctx context.Context) { func (udo *UserDeleteOne) ExecX(ctx context.Context) {
udo.ud.ExecX(ctx) if err := udo.Exec(ctx); err != nil {
panic(err)
}
} }

View file

@ -21,11 +21,8 @@ import (
// UserQuery is the builder for querying User entities. // UserQuery is the builder for querying User entities.
type UserQuery struct { type UserQuery struct {
config config
limit *int ctx *QueryContext
offset *int
unique *bool
order []OrderFunc order []OrderFunc
fields []string
inters []Interceptor inters []Interceptor
predicates []predicate.User predicates []predicate.User
withGroup *GroupQuery withGroup *GroupQuery
@ -44,20 +41,20 @@ func (uq *UserQuery) Where(ps ...predicate.User) *UserQuery {
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (uq *UserQuery) Limit(limit int) *UserQuery { func (uq *UserQuery) Limit(limit int) *UserQuery {
uq.limit = &limit uq.ctx.Limit = &limit
return uq return uq
} }
// Offset to start from. // Offset to start from.
func (uq *UserQuery) Offset(offset int) *UserQuery { func (uq *UserQuery) Offset(offset int) *UserQuery {
uq.offset = &offset uq.ctx.Offset = &offset
return uq return uq
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (uq *UserQuery) Unique(unique bool) *UserQuery { func (uq *UserQuery) Unique(unique bool) *UserQuery {
uq.unique = &unique uq.ctx.Unique = &unique
return uq return uq
} }
@ -114,7 +111,7 @@ func (uq *UserQuery) QueryAuthTokens() *AuthTokensQuery {
// First returns the first User entity from the query. // First returns the first User entity from the query.
// Returns a *NotFoundError when no User was found. // Returns a *NotFoundError when no User was found.
func (uq *UserQuery) First(ctx context.Context) (*User, error) { func (uq *UserQuery) First(ctx context.Context) (*User, error) {
nodes, err := uq.Limit(1).All(newQueryContext(ctx, TypeUser, "First")) nodes, err := uq.Limit(1).All(setContextOp(ctx, uq.ctx, "First"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -137,7 +134,7 @@ func (uq *UserQuery) FirstX(ctx context.Context) *User {
// Returns a *NotFoundError when no User ID was found. // Returns a *NotFoundError when no User ID was found.
func (uq *UserQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { func (uq *UserQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = uq.Limit(1).IDs(newQueryContext(ctx, TypeUser, "FirstID")); err != nil { if ids, err = uq.Limit(1).IDs(setContextOp(ctx, uq.ctx, "FirstID")); err != nil {
return return
} }
if len(ids) == 0 { if len(ids) == 0 {
@ -160,7 +157,7 @@ func (uq *UserQuery) FirstIDX(ctx context.Context) uuid.UUID {
// Returns a *NotSingularError when more than one User entity is found. // Returns a *NotSingularError when more than one User entity is found.
// Returns a *NotFoundError when no User entities are found. // Returns a *NotFoundError when no User entities are found.
func (uq *UserQuery) Only(ctx context.Context) (*User, error) { func (uq *UserQuery) Only(ctx context.Context) (*User, error) {
nodes, err := uq.Limit(2).All(newQueryContext(ctx, TypeUser, "Only")) nodes, err := uq.Limit(2).All(setContextOp(ctx, uq.ctx, "Only"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -188,7 +185,7 @@ func (uq *UserQuery) OnlyX(ctx context.Context) *User {
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (uq *UserQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { func (uq *UserQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID var ids []uuid.UUID
if ids, err = uq.Limit(2).IDs(newQueryContext(ctx, TypeUser, "OnlyID")); err != nil { if ids, err = uq.Limit(2).IDs(setContextOp(ctx, uq.ctx, "OnlyID")); err != nil {
return return
} }
switch len(ids) { switch len(ids) {
@ -213,7 +210,7 @@ func (uq *UserQuery) OnlyIDX(ctx context.Context) uuid.UUID {
// All executes the query and returns a list of Users. // All executes the query and returns a list of Users.
func (uq *UserQuery) All(ctx context.Context) ([]*User, error) { func (uq *UserQuery) All(ctx context.Context) ([]*User, error) {
ctx = newQueryContext(ctx, TypeUser, "All") ctx = setContextOp(ctx, uq.ctx, "All")
if err := uq.prepareQuery(ctx); err != nil { if err := uq.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
@ -233,7 +230,7 @@ func (uq *UserQuery) AllX(ctx context.Context) []*User {
// IDs executes the query and returns a list of User IDs. // IDs executes the query and returns a list of User IDs.
func (uq *UserQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { func (uq *UserQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
var ids []uuid.UUID var ids []uuid.UUID
ctx = newQueryContext(ctx, TypeUser, "IDs") ctx = setContextOp(ctx, uq.ctx, "IDs")
if err := uq.Select(user.FieldID).Scan(ctx, &ids); err != nil { if err := uq.Select(user.FieldID).Scan(ctx, &ids); err != nil {
return nil, err return nil, err
} }
@ -251,7 +248,7 @@ func (uq *UserQuery) IDsX(ctx context.Context) []uuid.UUID {
// Count returns the count of the given query. // Count returns the count of the given query.
func (uq *UserQuery) Count(ctx context.Context) (int, error) { func (uq *UserQuery) Count(ctx context.Context) (int, error) {
ctx = newQueryContext(ctx, TypeUser, "Count") ctx = setContextOp(ctx, uq.ctx, "Count")
if err := uq.prepareQuery(ctx); err != nil { if err := uq.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
@ -269,7 +266,7 @@ func (uq *UserQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (uq *UserQuery) Exist(ctx context.Context) (bool, error) { func (uq *UserQuery) Exist(ctx context.Context) (bool, error) {
ctx = newQueryContext(ctx, TypeUser, "Exist") ctx = setContextOp(ctx, uq.ctx, "Exist")
switch _, err := uq.FirstID(ctx); { switch _, err := uq.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
return false, nil return false, nil
@ -297,8 +294,7 @@ func (uq *UserQuery) Clone() *UserQuery {
} }
return &UserQuery{ return &UserQuery{
config: uq.config, config: uq.config,
limit: uq.limit, ctx: uq.ctx.Clone(),
offset: uq.offset,
order: append([]OrderFunc{}, uq.order...), order: append([]OrderFunc{}, uq.order...),
inters: append([]Interceptor{}, uq.inters...), inters: append([]Interceptor{}, uq.inters...),
predicates: append([]predicate.User{}, uq.predicates...), predicates: append([]predicate.User{}, uq.predicates...),
@ -307,7 +303,6 @@ func (uq *UserQuery) Clone() *UserQuery {
// clone intermediate query. // clone intermediate query.
sql: uq.sql.Clone(), sql: uq.sql.Clone(),
path: uq.path, path: uq.path,
unique: uq.unique,
} }
} }
@ -348,9 +343,9 @@ func (uq *UserQuery) WithAuthTokens(opts ...func(*AuthTokensQuery)) *UserQuery {
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy { func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy {
uq.fields = append([]string{field}, fields...) uq.ctx.Fields = append([]string{field}, fields...)
grbuild := &UserGroupBy{build: uq} grbuild := &UserGroupBy{build: uq}
grbuild.flds = &uq.fields grbuild.flds = &uq.ctx.Fields
grbuild.label = user.Label grbuild.label = user.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
return grbuild return grbuild
@ -369,10 +364,10 @@ func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy {
// Select(user.FieldCreatedAt). // Select(user.FieldCreatedAt).
// Scan(ctx, &v) // Scan(ctx, &v)
func (uq *UserQuery) Select(fields ...string) *UserSelect { func (uq *UserQuery) Select(fields ...string) *UserSelect {
uq.fields = append(uq.fields, fields...) uq.ctx.Fields = append(uq.ctx.Fields, fields...)
sbuild := &UserSelect{UserQuery: uq} sbuild := &UserSelect{UserQuery: uq}
sbuild.label = user.Label sbuild.label = user.Label
sbuild.flds, sbuild.scan = &uq.fields, sbuild.Scan sbuild.flds, sbuild.scan = &uq.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
@ -392,7 +387,7 @@ func (uq *UserQuery) prepareQuery(ctx context.Context) error {
} }
} }
} }
for _, f := range uq.fields { for _, f := range uq.ctx.Fields {
if !user.ValidColumn(f) { if !user.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
} }
@ -470,6 +465,9 @@ func (uq *UserQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*
} }
nodeids[fk] = append(nodeids[fk], nodes[i]) nodeids[fk] = append(nodeids[fk], nodes[i])
} }
if len(ids) == 0 {
return nil
}
query.Where(group.IDIn(ids...)) query.Where(group.IDIn(ids...))
neighbors, err := query.All(ctx) neighbors, err := query.All(ctx)
if err != nil { if err != nil {
@ -520,9 +518,9 @@ func (uq *UserQuery) loadAuthTokens(ctx context.Context, query *AuthTokensQuery,
func (uq *UserQuery) sqlCount(ctx context.Context) (int, error) { func (uq *UserQuery) sqlCount(ctx context.Context) (int, error) {
_spec := uq.querySpec() _spec := uq.querySpec()
_spec.Node.Columns = uq.fields _spec.Node.Columns = uq.ctx.Fields
if len(uq.fields) > 0 { if len(uq.ctx.Fields) > 0 {
_spec.Unique = uq.unique != nil && *uq.unique _spec.Unique = uq.ctx.Unique != nil && *uq.ctx.Unique
} }
return sqlgraph.CountNodes(ctx, uq.driver, _spec) return sqlgraph.CountNodes(ctx, uq.driver, _spec)
} }
@ -540,10 +538,10 @@ func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec {
From: uq.sql, From: uq.sql,
Unique: true, Unique: true,
} }
if unique := uq.unique; unique != nil { if unique := uq.ctx.Unique; unique != nil {
_spec.Unique = *unique _spec.Unique = *unique
} }
if fields := uq.fields; len(fields) > 0 { if fields := uq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID)
for i := range fields { for i := range fields {
@ -559,10 +557,10 @@ func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec {
} }
} }
} }
if limit := uq.limit; limit != nil { if limit := uq.ctx.Limit; limit != nil {
_spec.Limit = *limit _spec.Limit = *limit
} }
if offset := uq.offset; offset != nil { if offset := uq.ctx.Offset; offset != nil {
_spec.Offset = *offset _spec.Offset = *offset
} }
if ps := uq.order; len(ps) > 0 { if ps := uq.order; len(ps) > 0 {
@ -578,7 +576,7 @@ func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec {
func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(uq.driver.Dialect()) builder := sql.Dialect(uq.driver.Dialect())
t1 := builder.Table(user.Table) t1 := builder.Table(user.Table)
columns := uq.fields columns := uq.ctx.Fields
if len(columns) == 0 { if len(columns) == 0 {
columns = user.Columns columns = user.Columns
} }
@ -587,7 +585,7 @@ func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = uq.sql selector = uq.sql
selector.Select(selector.Columns(columns...)...) selector.Select(selector.Columns(columns...)...)
} }
if uq.unique != nil && *uq.unique { if uq.ctx.Unique != nil && *uq.ctx.Unique {
selector.Distinct() selector.Distinct()
} }
for _, p := range uq.predicates { for _, p := range uq.predicates {
@ -596,12 +594,12 @@ func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range uq.order { for _, p := range uq.order {
p(selector) p(selector)
} }
if offset := uq.offset; offset != nil { if offset := uq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start // limit is mandatory for offset clause. We start
// with default value, and override it below if needed. // with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32) selector.Offset(*offset).Limit(math.MaxInt32)
} }
if limit := uq.limit; limit != nil { if limit := uq.ctx.Limit; limit != nil {
selector.Limit(*limit) selector.Limit(*limit)
} }
return selector return selector
@ -621,7 +619,7 @@ func (ugb *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (ugb *UserGroupBy) Scan(ctx context.Context, v any) error { func (ugb *UserGroupBy) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeUser, "GroupBy") ctx = setContextOp(ctx, ugb.build.ctx, "GroupBy")
if err := ugb.build.prepareQuery(ctx); err != nil { if err := ugb.build.prepareQuery(ctx); err != nil {
return err return err
} }
@ -669,7 +667,7 @@ func (us *UserSelect) Aggregate(fns ...AggregateFunc) *UserSelect {
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (us *UserSelect) Scan(ctx context.Context, v any) error { func (us *UserSelect) Scan(ctx context.Context, v any) error {
ctx = newQueryContext(ctx, TypeUser, "Select") ctx = setContextOp(ctx, us.ctx, "Select")
if err := us.prepareQuery(ctx); err != nil { if err := us.prepareQuery(ctx); err != nil {
return err return err
} }

View file

@ -5,7 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/ardanlabs/conf/v2" "github.com/ardanlabs/conf/v3"
"os" "os"
) )