forked from mirrors/homebox
chore: upgrade deps + code-gen (#249)
This commit is contained in:
parent
3d295b5132
commit
6ed1f3695a
42 changed files with 664 additions and 563 deletions
|
@ -25,11 +25,8 @@ import (
|
|||
// GroupQuery is the builder for querying Group entities.
|
||||
type GroupQuery struct {
|
||||
config
|
||||
limit *int
|
||||
offset *int
|
||||
unique *bool
|
||||
ctx *QueryContext
|
||||
order []OrderFunc
|
||||
fields []string
|
||||
inters []Interceptor
|
||||
predicates []predicate.Group
|
||||
withUsers *UserQuery
|
||||
|
@ -51,20 +48,20 @@ func (gq *GroupQuery) Where(ps ...predicate.Group) *GroupQuery {
|
|||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (gq *GroupQuery) Limit(limit int) *GroupQuery {
|
||||
gq.limit = &limit
|
||||
gq.ctx.Limit = &limit
|
||||
return gq
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (gq *GroupQuery) Offset(offset int) *GroupQuery {
|
||||
gq.offset = &offset
|
||||
gq.ctx.Offset = &offset
|
||||
return gq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (gq *GroupQuery) Unique(unique bool) *GroupQuery {
|
||||
gq.unique = &unique
|
||||
gq.ctx.Unique = &unique
|
||||
return gq
|
||||
}
|
||||
|
||||
|
@ -209,7 +206,7 @@ func (gq *GroupQuery) QueryInvitationTokens() *GroupInvitationTokenQuery {
|
|||
// First returns the first Group entity from the query.
|
||||
// Returns a *NotFoundError when no Group was found.
|
||||
func (gq *GroupQuery) First(ctx context.Context) (*Group, error) {
|
||||
nodes, err := gq.Limit(1).All(newQueryContext(ctx, TypeGroup, "First"))
|
||||
nodes, err := gq.Limit(1).All(setContextOp(ctx, gq.ctx, "First"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -232,7 +229,7 @@ func (gq *GroupQuery) FirstX(ctx context.Context) *Group {
|
|||
// Returns a *NotFoundError when no Group ID was found.
|
||||
func (gq *GroupQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = gq.Limit(1).IDs(newQueryContext(ctx, TypeGroup, "FirstID")); err != nil {
|
||||
if ids, err = gq.Limit(1).IDs(setContextOp(ctx, gq.ctx, "FirstID")); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
|
@ -255,7 +252,7 @@ func (gq *GroupQuery) FirstIDX(ctx context.Context) uuid.UUID {
|
|||
// Returns a *NotSingularError when more than one Group entity is found.
|
||||
// Returns a *NotFoundError when no Group entities are found.
|
||||
func (gq *GroupQuery) Only(ctx context.Context) (*Group, error) {
|
||||
nodes, err := gq.Limit(2).All(newQueryContext(ctx, TypeGroup, "Only"))
|
||||
nodes, err := gq.Limit(2).All(setContextOp(ctx, gq.ctx, "Only"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -283,7 +280,7 @@ func (gq *GroupQuery) OnlyX(ctx context.Context) *Group {
|
|||
// Returns a *NotFoundError when no entities are found.
|
||||
func (gq *GroupQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = gq.Limit(2).IDs(newQueryContext(ctx, TypeGroup, "OnlyID")); err != nil {
|
||||
if ids, err = gq.Limit(2).IDs(setContextOp(ctx, gq.ctx, "OnlyID")); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
|
@ -308,7 +305,7 @@ func (gq *GroupQuery) OnlyIDX(ctx context.Context) uuid.UUID {
|
|||
|
||||
// All executes the query and returns a list of Groups.
|
||||
func (gq *GroupQuery) All(ctx context.Context) ([]*Group, error) {
|
||||
ctx = newQueryContext(ctx, TypeGroup, "All")
|
||||
ctx = setContextOp(ctx, gq.ctx, "All")
|
||||
if err := gq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -328,7 +325,7 @@ func (gq *GroupQuery) AllX(ctx context.Context) []*Group {
|
|||
// IDs executes the query and returns a list of Group IDs.
|
||||
func (gq *GroupQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
|
||||
var ids []uuid.UUID
|
||||
ctx = newQueryContext(ctx, TypeGroup, "IDs")
|
||||
ctx = setContextOp(ctx, gq.ctx, "IDs")
|
||||
if err := gq.Select(group.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -346,7 +343,7 @@ func (gq *GroupQuery) IDsX(ctx context.Context) []uuid.UUID {
|
|||
|
||||
// Count returns the count of the given query.
|
||||
func (gq *GroupQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = newQueryContext(ctx, TypeGroup, "Count")
|
||||
ctx = setContextOp(ctx, gq.ctx, "Count")
|
||||
if err := gq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -364,7 +361,7 @@ func (gq *GroupQuery) CountX(ctx context.Context) int {
|
|||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (gq *GroupQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = newQueryContext(ctx, TypeGroup, "Exist")
|
||||
ctx = setContextOp(ctx, gq.ctx, "Exist")
|
||||
switch _, err := gq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
|
@ -392,8 +389,7 @@ func (gq *GroupQuery) Clone() *GroupQuery {
|
|||
}
|
||||
return &GroupQuery{
|
||||
config: gq.config,
|
||||
limit: gq.limit,
|
||||
offset: gq.offset,
|
||||
ctx: gq.ctx.Clone(),
|
||||
order: append([]OrderFunc{}, gq.order...),
|
||||
inters: append([]Interceptor{}, gq.inters...),
|
||||
predicates: append([]predicate.Group{}, gq.predicates...),
|
||||
|
@ -404,9 +400,8 @@ func (gq *GroupQuery) Clone() *GroupQuery {
|
|||
withDocuments: gq.withDocuments.Clone(),
|
||||
withInvitationTokens: gq.withInvitationTokens.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: gq.sql.Clone(),
|
||||
path: gq.path,
|
||||
unique: gq.unique,
|
||||
sql: gq.sql.Clone(),
|
||||
path: gq.path,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -491,9 +486,9 @@ func (gq *GroupQuery) WithInvitationTokens(opts ...func(*GroupInvitationTokenQue
|
|||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (gq *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy {
|
||||
gq.fields = append([]string{field}, fields...)
|
||||
gq.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &GroupGroupBy{build: gq}
|
||||
grbuild.flds = &gq.fields
|
||||
grbuild.flds = &gq.ctx.Fields
|
||||
grbuild.label = group.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
|
@ -512,10 +507,10 @@ func (gq *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy {
|
|||
// Select(group.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
func (gq *GroupQuery) Select(fields ...string) *GroupSelect {
|
||||
gq.fields = append(gq.fields, fields...)
|
||||
gq.ctx.Fields = append(gq.ctx.Fields, fields...)
|
||||
sbuild := &GroupSelect{GroupQuery: gq}
|
||||
sbuild.label = group.Label
|
||||
sbuild.flds, sbuild.scan = &gq.fields, sbuild.Scan
|
||||
sbuild.flds, sbuild.scan = &gq.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
|
@ -535,7 +530,7 @@ func (gq *GroupQuery) prepareQuery(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
for _, f := range gq.fields {
|
||||
for _, f := range gq.ctx.Fields {
|
||||
if !group.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
|
@ -817,9 +812,9 @@ func (gq *GroupQuery) loadInvitationTokens(ctx context.Context, query *GroupInvi
|
|||
|
||||
func (gq *GroupQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := gq.querySpec()
|
||||
_spec.Node.Columns = gq.fields
|
||||
if len(gq.fields) > 0 {
|
||||
_spec.Unique = gq.unique != nil && *gq.unique
|
||||
_spec.Node.Columns = gq.ctx.Fields
|
||||
if len(gq.ctx.Fields) > 0 {
|
||||
_spec.Unique = gq.ctx.Unique != nil && *gq.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, gq.driver, _spec)
|
||||
}
|
||||
|
@ -837,10 +832,10 @@ func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
From: gq.sql,
|
||||
Unique: true,
|
||||
}
|
||||
if unique := gq.unique; unique != nil {
|
||||
if unique := gq.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
}
|
||||
if fields := gq.fields; len(fields) > 0 {
|
||||
if fields := gq.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, group.FieldID)
|
||||
for i := range fields {
|
||||
|
@ -856,10 +851,10 @@ func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
}
|
||||
}
|
||||
}
|
||||
if limit := gq.limit; limit != nil {
|
||||
if limit := gq.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := gq.offset; offset != nil {
|
||||
if offset := gq.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := gq.order; len(ps) > 0 {
|
||||
|
@ -875,7 +870,7 @@ func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(gq.driver.Dialect())
|
||||
t1 := builder.Table(group.Table)
|
||||
columns := gq.fields
|
||||
columns := gq.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = group.Columns
|
||||
}
|
||||
|
@ -884,7 +879,7 @@ func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||
selector = gq.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if gq.unique != nil && *gq.unique {
|
||||
if gq.ctx.Unique != nil && *gq.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, p := range gq.predicates {
|
||||
|
@ -893,12 +888,12 @@ func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||
for _, p := range gq.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := gq.offset; offset != nil {
|
||||
if offset := gq.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := gq.limit; limit != nil {
|
||||
if limit := gq.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
|
@ -918,7 +913,7 @@ func (ggb *GroupGroupBy) Aggregate(fns ...AggregateFunc) *GroupGroupBy {
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (ggb *GroupGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = newQueryContext(ctx, TypeGroup, "GroupBy")
|
||||
ctx = setContextOp(ctx, ggb.build.ctx, "GroupBy")
|
||||
if err := ggb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -966,7 +961,7 @@ func (gs *GroupSelect) Aggregate(fns ...AggregateFunc) *GroupSelect {
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (gs *GroupSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = newQueryContext(ctx, TypeGroup, "Select")
|
||||
ctx = setContextOp(ctx, gs.ctx, "Select")
|
||||
if err := gs.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue