repository for document and document tokens

This commit is contained in:
Hayden 2022-09-09 21:50:19 -08:00
parent 3add74091e
commit 1983a686c6
48 changed files with 8032 additions and 104 deletions

View file

@ -12,6 +12,8 @@ import (
"github.com/hay-kot/content/backend/ent/migrate" "github.com/hay-kot/content/backend/ent/migrate"
"github.com/hay-kot/content/backend/ent/authtokens" "github.com/hay-kot/content/backend/ent/authtokens"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/documenttoken"
"github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/group"
"github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/item"
"github.com/hay-kot/content/backend/ent/itemfield" "github.com/hay-kot/content/backend/ent/itemfield"
@ -31,6 +33,10 @@ type Client struct {
Schema *migrate.Schema Schema *migrate.Schema
// AuthTokens is the client for interacting with the AuthTokens builders. // AuthTokens is the client for interacting with the AuthTokens builders.
AuthTokens *AuthTokensClient AuthTokens *AuthTokensClient
// Document is the client for interacting with the Document builders.
Document *DocumentClient
// DocumentToken is the client for interacting with the DocumentToken builders.
DocumentToken *DocumentTokenClient
// Group is the client for interacting with the Group builders. // Group is the client for interacting with the Group builders.
Group *GroupClient Group *GroupClient
// Item is the client for interacting with the Item builders. // Item is the client for interacting with the Item builders.
@ -57,6 +63,8 @@ func NewClient(opts ...Option) *Client {
func (c *Client) init() { func (c *Client) init() {
c.Schema = migrate.NewSchema(c.driver) c.Schema = migrate.NewSchema(c.driver)
c.AuthTokens = NewAuthTokensClient(c.config) c.AuthTokens = NewAuthTokensClient(c.config)
c.Document = NewDocumentClient(c.config)
c.DocumentToken = NewDocumentTokenClient(c.config)
c.Group = NewGroupClient(c.config) c.Group = NewGroupClient(c.config)
c.Item = NewItemClient(c.config) c.Item = NewItemClient(c.config)
c.ItemField = NewItemFieldClient(c.config) c.ItemField = NewItemFieldClient(c.config)
@ -94,15 +102,17 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
cfg := c.config cfg := c.config
cfg.driver = tx cfg.driver = tx
return &Tx{ return &Tx{
ctx: ctx, ctx: ctx,
config: cfg, config: cfg,
AuthTokens: NewAuthTokensClient(cfg), AuthTokens: NewAuthTokensClient(cfg),
Group: NewGroupClient(cfg), Document: NewDocumentClient(cfg),
Item: NewItemClient(cfg), DocumentToken: NewDocumentTokenClient(cfg),
ItemField: NewItemFieldClient(cfg), Group: NewGroupClient(cfg),
Label: NewLabelClient(cfg), Item: NewItemClient(cfg),
Location: NewLocationClient(cfg), ItemField: NewItemFieldClient(cfg),
User: NewUserClient(cfg), Label: NewLabelClient(cfg),
Location: NewLocationClient(cfg),
User: NewUserClient(cfg),
}, nil }, nil
} }
@ -120,15 +130,17 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
cfg := c.config cfg := c.config
cfg.driver = &txDriver{tx: tx, drv: c.driver} cfg.driver = &txDriver{tx: tx, drv: c.driver}
return &Tx{ return &Tx{
ctx: ctx, ctx: ctx,
config: cfg, config: cfg,
AuthTokens: NewAuthTokensClient(cfg), AuthTokens: NewAuthTokensClient(cfg),
Group: NewGroupClient(cfg), Document: NewDocumentClient(cfg),
Item: NewItemClient(cfg), DocumentToken: NewDocumentTokenClient(cfg),
ItemField: NewItemFieldClient(cfg), Group: NewGroupClient(cfg),
Label: NewLabelClient(cfg), Item: NewItemClient(cfg),
Location: NewLocationClient(cfg), ItemField: NewItemFieldClient(cfg),
User: NewUserClient(cfg), Label: NewLabelClient(cfg),
Location: NewLocationClient(cfg),
User: NewUserClient(cfg),
}, nil }, nil
} }
@ -158,6 +170,8 @@ func (c *Client) Close() error {
// In order to add hooks to a specific client, call: `client.Node.Use(...)`. // In order to add hooks to a specific client, call: `client.Node.Use(...)`.
func (c *Client) Use(hooks ...Hook) { func (c *Client) Use(hooks ...Hook) {
c.AuthTokens.Use(hooks...) c.AuthTokens.Use(hooks...)
c.Document.Use(hooks...)
c.DocumentToken.Use(hooks...)
c.Group.Use(hooks...) c.Group.Use(hooks...)
c.Item.Use(hooks...) c.Item.Use(hooks...)
c.ItemField.Use(hooks...) c.ItemField.Use(hooks...)
@ -272,6 +286,234 @@ func (c *AuthTokensClient) Hooks() []Hook {
return c.hooks.AuthTokens return c.hooks.AuthTokens
} }
// DocumentClient is a client for the Document schema.
type DocumentClient struct {
config
}
// NewDocumentClient returns a client for the Document from the given config.
func NewDocumentClient(c config) *DocumentClient {
return &DocumentClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `document.Hooks(f(g(h())))`.
func (c *DocumentClient) Use(hooks ...Hook) {
c.hooks.Document = append(c.hooks.Document, hooks...)
}
// Create returns a builder for creating a Document entity.
func (c *DocumentClient) Create() *DocumentCreate {
mutation := newDocumentMutation(c.config, OpCreate)
return &DocumentCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// CreateBulk returns a builder for creating a bulk of Document entities.
func (c *DocumentClient) CreateBulk(builders ...*DocumentCreate) *DocumentCreateBulk {
return &DocumentCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Document.
func (c *DocumentClient) Update() *DocumentUpdate {
mutation := newDocumentMutation(c.config, OpUpdate)
return &DocumentUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *DocumentClient) UpdateOne(d *Document) *DocumentUpdateOne {
mutation := newDocumentMutation(c.config, OpUpdateOne, withDocument(d))
return &DocumentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *DocumentClient) UpdateOneID(id uuid.UUID) *DocumentUpdateOne {
mutation := newDocumentMutation(c.config, OpUpdateOne, withDocumentID(id))
return &DocumentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Document.
func (c *DocumentClient) Delete() *DocumentDelete {
mutation := newDocumentMutation(c.config, OpDelete)
return &DocumentDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a builder for deleting the given entity.
func (c *DocumentClient) DeleteOne(d *Document) *DocumentDeleteOne {
return c.DeleteOneID(d.ID)
}
// DeleteOne returns a builder for deleting the given entity by its id.
func (c *DocumentClient) DeleteOneID(id uuid.UUID) *DocumentDeleteOne {
builder := c.Delete().Where(document.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &DocumentDeleteOne{builder}
}
// Query returns a query builder for Document.
func (c *DocumentClient) Query() *DocumentQuery {
return &DocumentQuery{
config: c.config,
}
}
// Get returns a Document entity by its id.
func (c *DocumentClient) Get(ctx context.Context, id uuid.UUID) (*Document, error) {
return c.Query().Where(document.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *DocumentClient) GetX(ctx context.Context, id uuid.UUID) *Document {
obj, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return obj
}
// QueryGroup queries the group edge of a Document.
func (c *DocumentClient) QueryGroup(d *Document) *GroupQuery {
query := &GroupQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := d.ID
step := sqlgraph.NewStep(
sqlgraph.From(document.Table, document.FieldID, id),
sqlgraph.To(group.Table, group.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, document.GroupTable, document.GroupColumn),
)
fromV = sqlgraph.Neighbors(d.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryDocumentTokens queries the document_tokens edge of a Document.
func (c *DocumentClient) QueryDocumentTokens(d *Document) *DocumentTokenQuery {
query := &DocumentTokenQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := d.ID
step := sqlgraph.NewStep(
sqlgraph.From(document.Table, document.FieldID, id),
sqlgraph.To(documenttoken.Table, documenttoken.FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, document.DocumentTokensTable, document.DocumentTokensColumn),
)
fromV = sqlgraph.Neighbors(d.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *DocumentClient) Hooks() []Hook {
return c.hooks.Document
}
// DocumentTokenClient is a client for the DocumentToken schema.
type DocumentTokenClient struct {
config
}
// NewDocumentTokenClient returns a client for the DocumentToken from the given config.
func NewDocumentTokenClient(c config) *DocumentTokenClient {
return &DocumentTokenClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `documenttoken.Hooks(f(g(h())))`.
func (c *DocumentTokenClient) Use(hooks ...Hook) {
c.hooks.DocumentToken = append(c.hooks.DocumentToken, hooks...)
}
// Create returns a builder for creating a DocumentToken entity.
func (c *DocumentTokenClient) Create() *DocumentTokenCreate {
mutation := newDocumentTokenMutation(c.config, OpCreate)
return &DocumentTokenCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// CreateBulk returns a builder for creating a bulk of DocumentToken entities.
func (c *DocumentTokenClient) CreateBulk(builders ...*DocumentTokenCreate) *DocumentTokenCreateBulk {
return &DocumentTokenCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for DocumentToken.
func (c *DocumentTokenClient) Update() *DocumentTokenUpdate {
mutation := newDocumentTokenMutation(c.config, OpUpdate)
return &DocumentTokenUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *DocumentTokenClient) UpdateOne(dt *DocumentToken) *DocumentTokenUpdateOne {
mutation := newDocumentTokenMutation(c.config, OpUpdateOne, withDocumentToken(dt))
return &DocumentTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *DocumentTokenClient) UpdateOneID(id uuid.UUID) *DocumentTokenUpdateOne {
mutation := newDocumentTokenMutation(c.config, OpUpdateOne, withDocumentTokenID(id))
return &DocumentTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for DocumentToken.
func (c *DocumentTokenClient) Delete() *DocumentTokenDelete {
mutation := newDocumentTokenMutation(c.config, OpDelete)
return &DocumentTokenDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a builder for deleting the given entity.
func (c *DocumentTokenClient) DeleteOne(dt *DocumentToken) *DocumentTokenDeleteOne {
return c.DeleteOneID(dt.ID)
}
// DeleteOne returns a builder for deleting the given entity by its id.
func (c *DocumentTokenClient) DeleteOneID(id uuid.UUID) *DocumentTokenDeleteOne {
builder := c.Delete().Where(documenttoken.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &DocumentTokenDeleteOne{builder}
}
// Query returns a query builder for DocumentToken.
func (c *DocumentTokenClient) Query() *DocumentTokenQuery {
return &DocumentTokenQuery{
config: c.config,
}
}
// Get returns a DocumentToken entity by its id.
func (c *DocumentTokenClient) Get(ctx context.Context, id uuid.UUID) (*DocumentToken, error) {
return c.Query().Where(documenttoken.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *DocumentTokenClient) GetX(ctx context.Context, id uuid.UUID) *DocumentToken {
obj, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return obj
}
// QueryDocument queries the document edge of a DocumentToken.
func (c *DocumentTokenClient) QueryDocument(dt *DocumentToken) *DocumentQuery {
query := &DocumentQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := dt.ID
step := sqlgraph.NewStep(
sqlgraph.From(documenttoken.Table, documenttoken.FieldID, id),
sqlgraph.To(document.Table, document.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, documenttoken.DocumentTable, documenttoken.DocumentColumn),
)
fromV = sqlgraph.Neighbors(dt.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *DocumentTokenClient) Hooks() []Hook {
return c.hooks.DocumentToken
}
// GroupClient is a client for the Group schema. // GroupClient is a client for the Group schema.
type GroupClient struct { type GroupClient struct {
config config
@ -421,6 +663,22 @@ func (c *GroupClient) QueryLabels(gr *Group) *LabelQuery {
return query return query
} }
// QueryDocuments queries the documents edge of a Group.
func (c *GroupClient) QueryDocuments(gr *Group) *DocumentQuery {
query := &DocumentQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := gr.ID
step := sqlgraph.NewStep(
sqlgraph.From(group.Table, group.FieldID, id),
sqlgraph.To(document.Table, document.FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, group.DocumentsTable, group.DocumentsColumn),
)
fromV = sqlgraph.Neighbors(gr.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks. // Hooks returns the client hooks.
func (c *GroupClient) Hooks() []Hook { func (c *GroupClient) Hooks() []Hook {
return c.hooks.Group return c.hooks.Group

View file

@ -24,13 +24,15 @@ type config struct {
// hooks per client, for fast access. // hooks per client, for fast access.
type hooks struct { type hooks struct {
AuthTokens []ent.Hook AuthTokens []ent.Hook
Group []ent.Hook Document []ent.Hook
Item []ent.Hook DocumentToken []ent.Hook
ItemField []ent.Hook Group []ent.Hook
Label []ent.Hook Item []ent.Hook
Location []ent.Hook ItemField []ent.Hook
User []ent.Hook Label []ent.Hook
Location []ent.Hook
User []ent.Hook
} }
// Options applies the options on the config object. // Options applies the options on the config object.

193
backend/ent/document.go Normal file
View file

@ -0,0 +1,193 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"fmt"
"strings"
"time"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/group"
)
// Document is the model entity for the Document schema.
type Document struct {
config `json:"-"`
// ID of the ent.
ID uuid.UUID `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// Title holds the value of the "title" field.
Title string `json:"title,omitempty"`
// Path holds the value of the "path" field.
Path string `json:"path,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the DocumentQuery when eager-loading is set.
Edges DocumentEdges `json:"edges"`
group_documents *uuid.UUID
}
// DocumentEdges holds the relations/edges for other nodes in the graph.
type DocumentEdges struct {
// Group holds the value of the group edge.
Group *Group `json:"group,omitempty"`
// DocumentTokens holds the value of the document_tokens edge.
DocumentTokens []*DocumentToken `json:"document_tokens,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [2]bool
}
// GroupOrErr returns the Group value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e DocumentEdges) GroupOrErr() (*Group, error) {
if e.loadedTypes[0] {
if e.Group == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: group.Label}
}
return e.Group, nil
}
return nil, &NotLoadedError{edge: "group"}
}
// DocumentTokensOrErr returns the DocumentTokens value or an error if the edge
// was not loaded in eager-loading.
func (e DocumentEdges) DocumentTokensOrErr() ([]*DocumentToken, error) {
if e.loadedTypes[1] {
return e.DocumentTokens, nil
}
return nil, &NotLoadedError{edge: "document_tokens"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Document) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns))
for i := range columns {
switch columns[i] {
case document.FieldTitle, document.FieldPath:
values[i] = new(sql.NullString)
case document.FieldCreatedAt, document.FieldUpdatedAt:
values[i] = new(sql.NullTime)
case document.FieldID:
values[i] = new(uuid.UUID)
case document.ForeignKeys[0]: // group_documents
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
return nil, fmt.Errorf("unexpected column %q for type Document", columns[i])
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Document fields.
func (d *Document) assignValues(columns []string, values []interface{}) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case document.FieldID:
if value, ok := values[i].(*uuid.UUID); !ok {
return fmt.Errorf("unexpected type %T for field id", values[i])
} else if value != nil {
d.ID = *value
}
case document.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
d.CreatedAt = value.Time
}
case document.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
d.UpdatedAt = value.Time
}
case document.FieldTitle:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field title", values[i])
} else if value.Valid {
d.Title = value.String
}
case document.FieldPath:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field path", values[i])
} else if value.Valid {
d.Path = value.String
}
case document.ForeignKeys[0]:
if value, ok := values[i].(*sql.NullScanner); !ok {
return fmt.Errorf("unexpected type %T for field group_documents", values[i])
} else if value.Valid {
d.group_documents = new(uuid.UUID)
*d.group_documents = *value.S.(*uuid.UUID)
}
}
}
return nil
}
// QueryGroup queries the "group" edge of the Document entity.
func (d *Document) QueryGroup() *GroupQuery {
return (&DocumentClient{config: d.config}).QueryGroup(d)
}
// QueryDocumentTokens queries the "document_tokens" edge of the Document entity.
func (d *Document) QueryDocumentTokens() *DocumentTokenQuery {
return (&DocumentClient{config: d.config}).QueryDocumentTokens(d)
}
// Update returns a builder for updating this Document.
// Note that you need to call Document.Unwrap() before calling this method if this Document
// was returned from a transaction, and the transaction was committed or rolled back.
func (d *Document) Update() *DocumentUpdateOne {
return (&DocumentClient{config: d.config}).UpdateOne(d)
}
// Unwrap unwraps the Document entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (d *Document) Unwrap() *Document {
_tx, ok := d.config.driver.(*txDriver)
if !ok {
panic("ent: Document is not a transactional entity")
}
d.config.driver = _tx.drv
return d
}
// String implements the fmt.Stringer.
func (d *Document) String() string {
var builder strings.Builder
builder.WriteString("Document(")
builder.WriteString(fmt.Sprintf("id=%v, ", d.ID))
builder.WriteString("created_at=")
builder.WriteString(d.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(d.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("title=")
builder.WriteString(d.Title)
builder.WriteString(", ")
builder.WriteString("path=")
builder.WriteString(d.Path)
builder.WriteByte(')')
return builder.String()
}
// Documents is a parsable slice of Document.
type Documents []*Document
func (d Documents) config(cfg config) {
for _i := range d {
d[_i].config = cfg
}
}

View file

@ -0,0 +1,89 @@
// Code generated by ent, DO NOT EDIT.
package document
import (
"time"
"github.com/google/uuid"
)
const (
// Label holds the string label denoting the document type in the database.
Label = "document"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldTitle holds the string denoting the title field in the database.
FieldTitle = "title"
// FieldPath holds the string denoting the path field in the database.
FieldPath = "path"
// EdgeGroup holds the string denoting the group edge name in mutations.
EdgeGroup = "group"
// EdgeDocumentTokens holds the string denoting the document_tokens edge name in mutations.
EdgeDocumentTokens = "document_tokens"
// Table holds the table name of the document in the database.
Table = "documents"
// GroupTable is the table that holds the group relation/edge.
GroupTable = "documents"
// GroupInverseTable is the table name for the Group entity.
// It exists in this package in order to avoid circular dependency with the "group" package.
GroupInverseTable = "groups"
// GroupColumn is the table column denoting the group relation/edge.
GroupColumn = "group_documents"
// DocumentTokensTable is the table that holds the document_tokens relation/edge.
DocumentTokensTable = "document_tokens"
// DocumentTokensInverseTable is the table name for the DocumentToken entity.
// It exists in this package in order to avoid circular dependency with the "documenttoken" package.
DocumentTokensInverseTable = "document_tokens"
// DocumentTokensColumn is the table column denoting the document_tokens relation/edge.
DocumentTokensColumn = "document_document_tokens"
)
// Columns holds all SQL columns for document fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldTitle,
FieldPath,
}
// ForeignKeys holds the SQL foreign-keys that are owned by the "documents"
// table and are not defined as standalone fields in the schema.
var ForeignKeys = []string{
"group_documents",
}
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
for i := range ForeignKeys {
if column == ForeignKeys[i] {
return true
}
}
return false
}
var (
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
// TitleValidator is a validator for the "title" field. It is called by the builders before save.
TitleValidator func(string) error
// PathValidator is a validator for the "path" field. It is called by the builders before save.
PathValidator func(string) error
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)

View file

@ -0,0 +1,525 @@
// Code generated by ent, DO NOT EDIT.
package document
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/predicate"
)
// ID filters vertices based on their ID field.
func ID(id uuid.UUID) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldID), id))
})
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id uuid.UUID) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldID), id))
})
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id uuid.UUID) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldID), id))
})
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...uuid.UUID) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
v := make([]interface{}, len(ids))
for i := range v {
v[i] = ids[i]
}
s.Where(sql.In(s.C(FieldID), v...))
})
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...uuid.UUID) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
v := make([]interface{}, len(ids))
for i := range v {
v[i] = ids[i]
}
s.Where(sql.NotIn(s.C(FieldID), v...))
})
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id uuid.UUID) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldID), id))
})
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id uuid.UUID) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldID), id))
})
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id uuid.UUID) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldID), id))
})
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id uuid.UUID) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldID), id))
})
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldCreatedAt), v))
})
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
})
}
// Title applies equality check predicate on the "title" field. It's identical to TitleEQ.
func Title(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldTitle), v))
})
}
// Path applies equality check predicate on the "path" field. It's identical to PathEQ.
func Path(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldPath), v))
})
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldCreatedAt), v))
})
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
})
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.Document {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldCreatedAt), v...))
})
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.Document {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
})
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldCreatedAt), v))
})
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldCreatedAt), v))
})
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldCreatedAt), v))
})
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldCreatedAt), v))
})
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.Document {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldUpdatedAt), v...))
})
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.Document {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
})
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
})
}
// TitleEQ applies the EQ predicate on the "title" field.
func TitleEQ(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldTitle), v))
})
}
// TitleNEQ applies the NEQ predicate on the "title" field.
func TitleNEQ(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldTitle), v))
})
}
// TitleIn applies the In predicate on the "title" field.
func TitleIn(vs ...string) predicate.Document {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldTitle), v...))
})
}
// TitleNotIn applies the NotIn predicate on the "title" field.
func TitleNotIn(vs ...string) predicate.Document {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldTitle), v...))
})
}
// TitleGT applies the GT predicate on the "title" field.
func TitleGT(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldTitle), v))
})
}
// TitleGTE applies the GTE predicate on the "title" field.
func TitleGTE(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldTitle), v))
})
}
// TitleLT applies the LT predicate on the "title" field.
func TitleLT(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldTitle), v))
})
}
// TitleLTE applies the LTE predicate on the "title" field.
func TitleLTE(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldTitle), v))
})
}
// TitleContains applies the Contains predicate on the "title" field.
func TitleContains(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.Contains(s.C(FieldTitle), v))
})
}
// TitleHasPrefix applies the HasPrefix predicate on the "title" field.
func TitleHasPrefix(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.HasPrefix(s.C(FieldTitle), v))
})
}
// TitleHasSuffix applies the HasSuffix predicate on the "title" field.
func TitleHasSuffix(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.HasSuffix(s.C(FieldTitle), v))
})
}
// TitleEqualFold applies the EqualFold predicate on the "title" field.
func TitleEqualFold(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.EqualFold(s.C(FieldTitle), v))
})
}
// TitleContainsFold applies the ContainsFold predicate on the "title" field.
func TitleContainsFold(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.ContainsFold(s.C(FieldTitle), v))
})
}
// PathEQ applies the EQ predicate on the "path" field.
func PathEQ(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldPath), v))
})
}
// PathNEQ applies the NEQ predicate on the "path" field.
func PathNEQ(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldPath), v))
})
}
// PathIn applies the In predicate on the "path" field.
func PathIn(vs ...string) predicate.Document {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldPath), v...))
})
}
// PathNotIn applies the NotIn predicate on the "path" field.
func PathNotIn(vs ...string) predicate.Document {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldPath), v...))
})
}
// PathGT applies the GT predicate on the "path" field.
func PathGT(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldPath), v))
})
}
// PathGTE applies the GTE predicate on the "path" field.
func PathGTE(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldPath), v))
})
}
// PathLT applies the LT predicate on the "path" field.
func PathLT(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldPath), v))
})
}
// PathLTE applies the LTE predicate on the "path" field.
func PathLTE(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldPath), v))
})
}
// PathContains applies the Contains predicate on the "path" field.
func PathContains(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.Contains(s.C(FieldPath), v))
})
}
// PathHasPrefix applies the HasPrefix predicate on the "path" field.
func PathHasPrefix(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.HasPrefix(s.C(FieldPath), v))
})
}
// PathHasSuffix applies the HasSuffix predicate on the "path" field.
func PathHasSuffix(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.HasSuffix(s.C(FieldPath), v))
})
}
// PathEqualFold applies the EqualFold predicate on the "path" field.
func PathEqualFold(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.EqualFold(s.C(FieldPath), v))
})
}
// PathContainsFold applies the ContainsFold predicate on the "path" field.
func PathContainsFold(v string) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s.Where(sql.ContainsFold(s.C(FieldPath), v))
})
}
// HasGroup applies the HasEdge predicate on the "group" edge.
func HasGroup() predicate.Document {
return predicate.Document(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(GroupTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
func HasGroupWith(preds ...predicate.Group) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(GroupInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
)
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasDocumentTokens applies the HasEdge predicate on the "document_tokens" edge.
func HasDocumentTokens() predicate.Document {
return predicate.Document(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DocumentTokensTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DocumentTokensTable, DocumentTokensColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasDocumentTokensWith applies the HasEdge predicate on the "document_tokens" edge with a given conditions (other predicates).
func HasDocumentTokensWith(preds ...predicate.DocumentToken) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DocumentTokensInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DocumentTokensTable, DocumentTokensColumn),
)
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Document) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for _, p := range predicates {
p(s1)
}
s.Where(s1.P())
})
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Document) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for i, p := range predicates {
if i > 0 {
s1.Or()
}
p(s1)
}
s.Where(s1.P())
})
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Document) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
p(s.Not())
})
}

View file

@ -0,0 +1,412 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/documenttoken"
"github.com/hay-kot/content/backend/ent/group"
)
// DocumentCreate is the builder for creating a Document entity.
type DocumentCreate struct {
config
mutation *DocumentMutation
hooks []Hook
}
// SetCreatedAt sets the "created_at" field.
func (dc *DocumentCreate) SetCreatedAt(t time.Time) *DocumentCreate {
dc.mutation.SetCreatedAt(t)
return dc
}
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
func (dc *DocumentCreate) SetNillableCreatedAt(t *time.Time) *DocumentCreate {
if t != nil {
dc.SetCreatedAt(*t)
}
return dc
}
// SetUpdatedAt sets the "updated_at" field.
func (dc *DocumentCreate) SetUpdatedAt(t time.Time) *DocumentCreate {
dc.mutation.SetUpdatedAt(t)
return dc
}
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
func (dc *DocumentCreate) SetNillableUpdatedAt(t *time.Time) *DocumentCreate {
if t != nil {
dc.SetUpdatedAt(*t)
}
return dc
}
// SetTitle sets the "title" field.
func (dc *DocumentCreate) SetTitle(s string) *DocumentCreate {
dc.mutation.SetTitle(s)
return dc
}
// SetPath sets the "path" field.
func (dc *DocumentCreate) SetPath(s string) *DocumentCreate {
dc.mutation.SetPath(s)
return dc
}
// SetID sets the "id" field.
func (dc *DocumentCreate) SetID(u uuid.UUID) *DocumentCreate {
dc.mutation.SetID(u)
return dc
}
// SetNillableID sets the "id" field if the given value is not nil.
func (dc *DocumentCreate) SetNillableID(u *uuid.UUID) *DocumentCreate {
if u != nil {
dc.SetID(*u)
}
return dc
}
// SetGroupID sets the "group" edge to the Group entity by ID.
func (dc *DocumentCreate) SetGroupID(id uuid.UUID) *DocumentCreate {
dc.mutation.SetGroupID(id)
return dc
}
// SetGroup sets the "group" edge to the Group entity.
func (dc *DocumentCreate) SetGroup(g *Group) *DocumentCreate {
return dc.SetGroupID(g.ID)
}
// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by IDs.
func (dc *DocumentCreate) AddDocumentTokenIDs(ids ...uuid.UUID) *DocumentCreate {
dc.mutation.AddDocumentTokenIDs(ids...)
return dc
}
// AddDocumentTokens adds the "document_tokens" edges to the DocumentToken entity.
func (dc *DocumentCreate) AddDocumentTokens(d ...*DocumentToken) *DocumentCreate {
ids := make([]uuid.UUID, len(d))
for i := range d {
ids[i] = d[i].ID
}
return dc.AddDocumentTokenIDs(ids...)
}
// Mutation returns the DocumentMutation object of the builder.
func (dc *DocumentCreate) Mutation() *DocumentMutation {
return dc.mutation
}
// Save creates the Document in the database.
func (dc *DocumentCreate) Save(ctx context.Context) (*Document, error) {
var (
err error
node *Document
)
dc.defaults()
if len(dc.hooks) == 0 {
if err = dc.check(); err != nil {
return nil, err
}
node, err = dc.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DocumentMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = dc.check(); err != nil {
return nil, err
}
dc.mutation = mutation
if node, err = dc.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(dc.hooks) - 1; i >= 0; i-- {
if dc.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = dc.hooks[i](mut)
}
v, err := mut.Mutate(ctx, dc.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Document)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from DocumentMutation", v)
}
node = nv
}
return node, err
}
// SaveX calls Save and panics if Save returns an error.
func (dc *DocumentCreate) SaveX(ctx context.Context) *Document {
v, err := dc.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (dc *DocumentCreate) Exec(ctx context.Context) error {
_, err := dc.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dc *DocumentCreate) ExecX(ctx context.Context) {
if err := dc.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dc *DocumentCreate) defaults() {
if _, ok := dc.mutation.CreatedAt(); !ok {
v := document.DefaultCreatedAt()
dc.mutation.SetCreatedAt(v)
}
if _, ok := dc.mutation.UpdatedAt(); !ok {
v := document.DefaultUpdatedAt()
dc.mutation.SetUpdatedAt(v)
}
if _, ok := dc.mutation.ID(); !ok {
v := document.DefaultID()
dc.mutation.SetID(v)
}
}
// check runs all checks and user-defined validators on the builder.
func (dc *DocumentCreate) check() error {
if _, ok := dc.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Document.created_at"`)}
}
if _, ok := dc.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Document.updated_at"`)}
}
if _, ok := dc.mutation.Title(); !ok {
return &ValidationError{Name: "title", err: errors.New(`ent: missing required field "Document.title"`)}
}
if v, ok := dc.mutation.Title(); ok {
if err := document.TitleValidator(v); err != nil {
return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Document.title": %w`, err)}
}
}
if _, ok := dc.mutation.Path(); !ok {
return &ValidationError{Name: "path", err: errors.New(`ent: missing required field "Document.path"`)}
}
if v, ok := dc.mutation.Path(); ok {
if err := document.PathValidator(v); err != nil {
return &ValidationError{Name: "path", err: fmt.Errorf(`ent: validator failed for field "Document.path": %w`, err)}
}
}
if _, ok := dc.mutation.GroupID(); !ok {
return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "Document.group"`)}
}
return nil
}
func (dc *DocumentCreate) sqlSave(ctx context.Context) (*Document, error) {
_node, _spec := dc.createSpec()
if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
if _spec.ID.Value != nil {
if id, ok := _spec.ID.Value.(*uuid.UUID); ok {
_node.ID = *id
} else if err := _node.ID.Scan(_spec.ID.Value); err != nil {
return nil, err
}
}
return _node, nil
}
func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) {
var (
_node = &Document{config: dc.config}
_spec = &sqlgraph.CreateSpec{
Table: document.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
}
)
if id, ok := dc.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = &id
}
if value, ok := dc.mutation.CreatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: document.FieldCreatedAt,
})
_node.CreatedAt = value
}
if value, ok := dc.mutation.UpdatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: document.FieldUpdatedAt,
})
_node.UpdatedAt = value
}
if value, ok := dc.mutation.Title(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: document.FieldTitle,
})
_node.Title = value
}
if value, ok := dc.mutation.Path(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: document.FieldPath,
})
_node.Path = value
}
if nodes := dc.mutation.GroupIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: document.GroupTable,
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: group.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_node.group_documents = &nodes[0]
_spec.Edges = append(_spec.Edges, edge)
}
if nodes := dc.mutation.DocumentTokensIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: document.DocumentTokensTable,
Columns: []string{document.DocumentTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: documenttoken.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
return _node, _spec
}
// DocumentCreateBulk is the builder for creating many Document entities in bulk.
type DocumentCreateBulk struct {
config
builders []*DocumentCreate
}
// Save creates the Document entities in the database.
func (dcb *DocumentCreateBulk) Save(ctx context.Context) ([]*Document, error) {
specs := make([]*sqlgraph.CreateSpec, len(dcb.builders))
nodes := make([]*Document, len(dcb.builders))
mutators := make([]Mutator, len(dcb.builders))
for i := range dcb.builders {
func(i int, root context.Context) {
builder := dcb.builders[i]
builder.defaults()
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DocumentMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err := builder.check(); err != nil {
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, dcb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
}
}
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
mutation.done = true
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
mut = builder.hooks[i](mut)
}
mutators[i] = mut
}(i, ctx)
}
if len(mutators) > 0 {
if _, err := mutators[0].Mutate(ctx, dcb.builders[0].mutation); err != nil {
return nil, err
}
}
return nodes, nil
}
// SaveX is like Save, but panics if an error occurs.
func (dcb *DocumentCreateBulk) SaveX(ctx context.Context) []*Document {
v, err := dcb.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (dcb *DocumentCreateBulk) Exec(ctx context.Context) error {
_, err := dcb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dcb *DocumentCreateBulk) ExecX(ctx context.Context) {
if err := dcb.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -0,0 +1,115 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/predicate"
)
// DocumentDelete is the builder for deleting a Document entity.
type DocumentDelete struct {
config
hooks []Hook
mutation *DocumentMutation
}
// Where appends a list predicates to the DocumentDelete builder.
func (dd *DocumentDelete) Where(ps ...predicate.Document) *DocumentDelete {
dd.mutation.Where(ps...)
return dd
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (dd *DocumentDelete) Exec(ctx context.Context) (int, error) {
var (
err error
affected int
)
if len(dd.hooks) == 0 {
affected, err = dd.sqlExec(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DocumentMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
dd.mutation = mutation
affected, err = dd.sqlExec(ctx)
mutation.done = true
return affected, err
})
for i := len(dd.hooks) - 1; i >= 0; i-- {
if dd.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = dd.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, dd.mutation); err != nil {
return 0, err
}
}
return affected, err
}
// ExecX is like Exec, but panics if an error occurs.
func (dd *DocumentDelete) ExecX(ctx context.Context) int {
n, err := dd.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (dd *DocumentDelete) sqlExec(ctx context.Context) (int, error) {
_spec := &sqlgraph.DeleteSpec{
Node: &sqlgraph.NodeSpec{
Table: document.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
if ps := dd.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, dd.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return affected, err
}
// DocumentDeleteOne is the builder for deleting a single Document entity.
type DocumentDeleteOne struct {
dd *DocumentDelete
}
// Exec executes the deletion query.
func (ddo *DocumentDeleteOne) Exec(ctx context.Context) error {
n, err := ddo.dd.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{document.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (ddo *DocumentDeleteOne) ExecX(ctx context.Context) {
ddo.dd.ExecX(ctx)
}

View file

@ -0,0 +1,687 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"database/sql/driver"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/documenttoken"
"github.com/hay-kot/content/backend/ent/group"
"github.com/hay-kot/content/backend/ent/predicate"
)
// DocumentQuery is the builder for querying Document entities.
type DocumentQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
predicates []predicate.Document
withGroup *GroupQuery
withDocumentTokens *DocumentTokenQuery
withFKs bool
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the DocumentQuery builder.
func (dq *DocumentQuery) Where(ps ...predicate.Document) *DocumentQuery {
dq.predicates = append(dq.predicates, ps...)
return dq
}
// Limit adds a limit step to the query.
func (dq *DocumentQuery) Limit(limit int) *DocumentQuery {
dq.limit = &limit
return dq
}
// Offset adds an offset step to the query.
func (dq *DocumentQuery) Offset(offset int) *DocumentQuery {
dq.offset = &offset
return dq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (dq *DocumentQuery) Unique(unique bool) *DocumentQuery {
dq.unique = &unique
return dq
}
// Order adds an order step to the query.
func (dq *DocumentQuery) Order(o ...OrderFunc) *DocumentQuery {
dq.order = append(dq.order, o...)
return dq
}
// QueryGroup chains the current query on the "group" edge.
func (dq *DocumentQuery) QueryGroup() *GroupQuery {
query := &GroupQuery{config: dq.config}
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := dq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := dq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(document.Table, document.FieldID, selector),
sqlgraph.To(group.Table, group.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, document.GroupTable, document.GroupColumn),
)
fromU = sqlgraph.SetNeighbors(dq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// QueryDocumentTokens chains the current query on the "document_tokens" edge.
func (dq *DocumentQuery) QueryDocumentTokens() *DocumentTokenQuery {
query := &DocumentTokenQuery{config: dq.config}
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := dq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := dq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(document.Table, document.FieldID, selector),
sqlgraph.To(documenttoken.Table, documenttoken.FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, document.DocumentTokensTable, document.DocumentTokensColumn),
)
fromU = sqlgraph.SetNeighbors(dq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first Document entity from the query.
// Returns a *NotFoundError when no Document was found.
func (dq *DocumentQuery) First(ctx context.Context) (*Document, error) {
nodes, err := dq.Limit(1).All(ctx)
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{document.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (dq *DocumentQuery) FirstX(ctx context.Context) *Document {
node, err := dq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first Document ID from the query.
// Returns a *NotFoundError when no Document ID was found.
func (dq *DocumentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
if ids, err = dq.Limit(1).IDs(ctx); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{document.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (dq *DocumentQuery) FirstIDX(ctx context.Context) uuid.UUID {
id, err := dq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single Document entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one Document entity is found.
// Returns a *NotFoundError when no Document entities are found.
func (dq *DocumentQuery) Only(ctx context.Context) (*Document, error) {
nodes, err := dq.Limit(2).All(ctx)
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{document.Label}
default:
return nil, &NotSingularError{document.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (dq *DocumentQuery) OnlyX(ctx context.Context) *Document {
node, err := dq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only Document ID in the query.
// Returns a *NotSingularError when more than one Document ID is found.
// Returns a *NotFoundError when no entities are found.
func (dq *DocumentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
if ids, err = dq.Limit(2).IDs(ctx); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{document.Label}
default:
err = &NotSingularError{document.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (dq *DocumentQuery) OnlyIDX(ctx context.Context) uuid.UUID {
id, err := dq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of Documents.
func (dq *DocumentQuery) All(ctx context.Context) ([]*Document, error) {
if err := dq.prepareQuery(ctx); err != nil {
return nil, err
}
return dq.sqlAll(ctx)
}
// AllX is like All, but panics if an error occurs.
func (dq *DocumentQuery) AllX(ctx context.Context) []*Document {
nodes, err := dq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of Document IDs.
func (dq *DocumentQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
var ids []uuid.UUID
if err := dq.Select(document.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (dq *DocumentQuery) IDsX(ctx context.Context) []uuid.UUID {
ids, err := dq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (dq *DocumentQuery) Count(ctx context.Context) (int, error) {
if err := dq.prepareQuery(ctx); err != nil {
return 0, err
}
return dq.sqlCount(ctx)
}
// CountX is like Count, but panics if an error occurs.
func (dq *DocumentQuery) CountX(ctx context.Context) int {
count, err := dq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (dq *DocumentQuery) Exist(ctx context.Context) (bool, error) {
if err := dq.prepareQuery(ctx); err != nil {
return false, err
}
return dq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
func (dq *DocumentQuery) ExistX(ctx context.Context) bool {
exist, err := dq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the DocumentQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (dq *DocumentQuery) Clone() *DocumentQuery {
if dq == nil {
return nil
}
return &DocumentQuery{
config: dq.config,
limit: dq.limit,
offset: dq.offset,
order: append([]OrderFunc{}, dq.order...),
predicates: append([]predicate.Document{}, dq.predicates...),
withGroup: dq.withGroup.Clone(),
withDocumentTokens: dq.withDocumentTokens.Clone(),
// clone intermediate query.
sql: dq.sql.Clone(),
path: dq.path,
unique: dq.unique,
}
}
// WithGroup tells the query-builder to eager-load the nodes that are connected to
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
func (dq *DocumentQuery) WithGroup(opts ...func(*GroupQuery)) *DocumentQuery {
query := &GroupQuery{config: dq.config}
for _, opt := range opts {
opt(query)
}
dq.withGroup = query
return dq
}
// WithDocumentTokens tells the query-builder to eager-load the nodes that are connected to
// the "document_tokens" edge. The optional arguments are used to configure the query builder of the edge.
func (dq *DocumentQuery) WithDocumentTokens(opts ...func(*DocumentTokenQuery)) *DocumentQuery {
query := &DocumentTokenQuery{config: dq.config}
for _, opt := range opts {
opt(query)
}
dq.withDocumentTokens = query
return dq
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.Document.Query().
// GroupBy(document.FieldCreatedAt).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (dq *DocumentQuery) GroupBy(field string, fields ...string) *DocumentGroupBy {
grbuild := &DocumentGroupBy{config: dq.config}
grbuild.fields = append([]string{field}, fields...)
grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
if err := dq.prepareQuery(ctx); err != nil {
return nil, err
}
return dq.sqlQuery(ctx), nil
}
grbuild.label = document.Label
grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// }
//
// client.Document.Query().
// Select(document.FieldCreatedAt).
// Scan(ctx, &v)
func (dq *DocumentQuery) Select(fields ...string) *DocumentSelect {
dq.fields = append(dq.fields, fields...)
selbuild := &DocumentSelect{DocumentQuery: dq}
selbuild.label = document.Label
selbuild.flds, selbuild.scan = &dq.fields, selbuild.Scan
return selbuild
}
func (dq *DocumentQuery) prepareQuery(ctx context.Context) error {
for _, f := range dq.fields {
if !document.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if dq.path != nil {
prev, err := dq.path(ctx)
if err != nil {
return err
}
dq.sql = prev
}
return nil
}
func (dq *DocumentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Document, error) {
var (
nodes = []*Document{}
withFKs = dq.withFKs
_spec = dq.querySpec()
loadedTypes = [2]bool{
dq.withGroup != nil,
dq.withDocumentTokens != nil,
}
)
if dq.withGroup != nil {
withFKs = true
}
if withFKs {
_spec.Node.Columns = append(_spec.Node.Columns, document.ForeignKeys...)
}
_spec.ScanValues = func(columns []string) ([]interface{}, error) {
return (*Document).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []interface{}) error {
node := &Document{config: dq.config}
nodes = append(nodes, node)
node.Edges.loadedTypes = loadedTypes
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, dq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
if query := dq.withGroup; query != nil {
if err := dq.loadGroup(ctx, query, nodes, nil,
func(n *Document, e *Group) { n.Edges.Group = e }); err != nil {
return nil, err
}
}
if query := dq.withDocumentTokens; query != nil {
if err := dq.loadDocumentTokens(ctx, query, nodes,
func(n *Document) { n.Edges.DocumentTokens = []*DocumentToken{} },
func(n *Document, e *DocumentToken) { n.Edges.DocumentTokens = append(n.Edges.DocumentTokens, e) }); err != nil {
return nil, err
}
}
return nodes, nil
}
func (dq *DocumentQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Document, init func(*Document), assign func(*Document, *Group)) error {
ids := make([]uuid.UUID, 0, len(nodes))
nodeids := make(map[uuid.UUID][]*Document)
for i := range nodes {
if nodes[i].group_documents == nil {
continue
}
fk := *nodes[i].group_documents
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
query.Where(group.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return fmt.Errorf(`unexpected foreign-key "group_documents" returned %v`, n.ID)
}
for i := range nodes {
assign(nodes[i], n)
}
}
return nil
}
func (dq *DocumentQuery) loadDocumentTokens(ctx context.Context, query *DocumentTokenQuery, nodes []*Document, init func(*Document), assign func(*Document, *DocumentToken)) error {
fks := make([]driver.Value, 0, len(nodes))
nodeids := make(map[uuid.UUID]*Document)
for i := range nodes {
fks = append(fks, nodes[i].ID)
nodeids[nodes[i].ID] = nodes[i]
if init != nil {
init(nodes[i])
}
}
query.withFKs = true
query.Where(predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.InValues(document.DocumentTokensColumn, fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
fk := n.document_document_tokens
if fk == nil {
return fmt.Errorf(`foreign-key "document_document_tokens" is nil for node %v`, n.ID)
}
node, ok := nodeids[*fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "document_document_tokens" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
return nil
}
func (dq *DocumentQuery) sqlCount(ctx context.Context) (int, error) {
_spec := dq.querySpec()
_spec.Node.Columns = dq.fields
if len(dq.fields) > 0 {
_spec.Unique = dq.unique != nil && *dq.unique
}
return sqlgraph.CountNodes(ctx, dq.driver, _spec)
}
func (dq *DocumentQuery) sqlExist(ctx context.Context) (bool, error) {
n, err := dq.sqlCount(ctx)
if err != nil {
return false, fmt.Errorf("ent: check existence: %w", err)
}
return n > 0, nil
}
func (dq *DocumentQuery) querySpec() *sqlgraph.QuerySpec {
_spec := &sqlgraph.QuerySpec{
Node: &sqlgraph.NodeSpec{
Table: document.Table,
Columns: document.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
From: dq.sql,
Unique: true,
}
if unique := dq.unique; unique != nil {
_spec.Unique = *unique
}
if fields := dq.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, document.FieldID)
for i := range fields {
if fields[i] != document.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
}
if ps := dq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := dq.limit; limit != nil {
_spec.Limit = *limit
}
if offset := dq.offset; offset != nil {
_spec.Offset = *offset
}
if ps := dq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (dq *DocumentQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(dq.driver.Dialect())
t1 := builder.Table(document.Table)
columns := dq.fields
if len(columns) == 0 {
columns = document.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if dq.sql != nil {
selector = dq.sql
selector.Select(selector.Columns(columns...)...)
}
if dq.unique != nil && *dq.unique {
selector.Distinct()
}
for _, p := range dq.predicates {
p(selector)
}
for _, p := range dq.order {
p(selector)
}
if offset := dq.offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := dq.limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// DocumentGroupBy is the group-by builder for Document entities.
type DocumentGroupBy struct {
config
selector
fields []string
fns []AggregateFunc
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Aggregate adds the given aggregation functions to the group-by query.
func (dgb *DocumentGroupBy) Aggregate(fns ...AggregateFunc) *DocumentGroupBy {
dgb.fns = append(dgb.fns, fns...)
return dgb
}
// Scan applies the group-by query and scans the result into the given value.
func (dgb *DocumentGroupBy) Scan(ctx context.Context, v interface{}) error {
query, err := dgb.path(ctx)
if err != nil {
return err
}
dgb.sql = query
return dgb.sqlScan(ctx, v)
}
func (dgb *DocumentGroupBy) sqlScan(ctx context.Context, v interface{}) error {
for _, f := range dgb.fields {
if !document.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
}
}
selector := dgb.sqlQuery()
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := dgb.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (dgb *DocumentGroupBy) sqlQuery() *sql.Selector {
selector := dgb.sql.Select()
aggregation := make([]string, 0, len(dgb.fns))
for _, fn := range dgb.fns {
aggregation = append(aggregation, fn(selector))
}
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(dgb.fields)+len(dgb.fns))
for _, f := range dgb.fields {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(dgb.fields...)...)
}
// DocumentSelect is the builder for selecting fields of Document entities.
type DocumentSelect struct {
*DocumentQuery
selector
// intermediate query (i.e. traversal path).
sql *sql.Selector
}
// Scan applies the selector query and scans the result into the given value.
func (ds *DocumentSelect) Scan(ctx context.Context, v interface{}) error {
if err := ds.prepareQuery(ctx); err != nil {
return err
}
ds.sql = ds.DocumentQuery.sqlQuery(ctx)
return ds.sqlScan(ctx, v)
}
func (ds *DocumentSelect) sqlScan(ctx context.Context, v interface{}) error {
rows := &sql.Rows{}
query, args := ds.sql.Query()
if err := ds.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}

View file

@ -0,0 +1,677 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/documenttoken"
"github.com/hay-kot/content/backend/ent/group"
"github.com/hay-kot/content/backend/ent/predicate"
)
// DocumentUpdate is the builder for updating Document entities.
type DocumentUpdate struct {
config
hooks []Hook
mutation *DocumentMutation
}
// Where appends a list predicates to the DocumentUpdate builder.
func (du *DocumentUpdate) Where(ps ...predicate.Document) *DocumentUpdate {
du.mutation.Where(ps...)
return du
}
// SetUpdatedAt sets the "updated_at" field.
func (du *DocumentUpdate) SetUpdatedAt(t time.Time) *DocumentUpdate {
du.mutation.SetUpdatedAt(t)
return du
}
// SetTitle sets the "title" field.
func (du *DocumentUpdate) SetTitle(s string) *DocumentUpdate {
du.mutation.SetTitle(s)
return du
}
// SetPath sets the "path" field.
func (du *DocumentUpdate) SetPath(s string) *DocumentUpdate {
du.mutation.SetPath(s)
return du
}
// SetGroupID sets the "group" edge to the Group entity by ID.
func (du *DocumentUpdate) SetGroupID(id uuid.UUID) *DocumentUpdate {
du.mutation.SetGroupID(id)
return du
}
// SetGroup sets the "group" edge to the Group entity.
func (du *DocumentUpdate) SetGroup(g *Group) *DocumentUpdate {
return du.SetGroupID(g.ID)
}
// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by IDs.
func (du *DocumentUpdate) AddDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdate {
du.mutation.AddDocumentTokenIDs(ids...)
return du
}
// AddDocumentTokens adds the "document_tokens" edges to the DocumentToken entity.
func (du *DocumentUpdate) AddDocumentTokens(d ...*DocumentToken) *DocumentUpdate {
ids := make([]uuid.UUID, len(d))
for i := range d {
ids[i] = d[i].ID
}
return du.AddDocumentTokenIDs(ids...)
}
// Mutation returns the DocumentMutation object of the builder.
func (du *DocumentUpdate) Mutation() *DocumentMutation {
return du.mutation
}
// ClearGroup clears the "group" edge to the Group entity.
func (du *DocumentUpdate) ClearGroup() *DocumentUpdate {
du.mutation.ClearGroup()
return du
}
// ClearDocumentTokens clears all "document_tokens" edges to the DocumentToken entity.
func (du *DocumentUpdate) ClearDocumentTokens() *DocumentUpdate {
du.mutation.ClearDocumentTokens()
return du
}
// RemoveDocumentTokenIDs removes the "document_tokens" edge to DocumentToken entities by IDs.
func (du *DocumentUpdate) RemoveDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdate {
du.mutation.RemoveDocumentTokenIDs(ids...)
return du
}
// RemoveDocumentTokens removes "document_tokens" edges to DocumentToken entities.
func (du *DocumentUpdate) RemoveDocumentTokens(d ...*DocumentToken) *DocumentUpdate {
ids := make([]uuid.UUID, len(d))
for i := range d {
ids[i] = d[i].ID
}
return du.RemoveDocumentTokenIDs(ids...)
}
// Save executes the query and returns the number of nodes affected by the update operation.
func (du *DocumentUpdate) Save(ctx context.Context) (int, error) {
var (
err error
affected int
)
du.defaults()
if len(du.hooks) == 0 {
if err = du.check(); err != nil {
return 0, err
}
affected, err = du.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DocumentMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = du.check(); err != nil {
return 0, err
}
du.mutation = mutation
affected, err = du.sqlSave(ctx)
mutation.done = true
return affected, err
})
for i := len(du.hooks) - 1; i >= 0; i-- {
if du.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = du.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, du.mutation); err != nil {
return 0, err
}
}
return affected, err
}
// SaveX is like Save, but panics if an error occurs.
func (du *DocumentUpdate) SaveX(ctx context.Context) int {
affected, err := du.Save(ctx)
if err != nil {
panic(err)
}
return affected
}
// Exec executes the query.
func (du *DocumentUpdate) Exec(ctx context.Context) error {
_, err := du.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (du *DocumentUpdate) ExecX(ctx context.Context) {
if err := du.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (du *DocumentUpdate) defaults() {
if _, ok := du.mutation.UpdatedAt(); !ok {
v := document.UpdateDefaultUpdatedAt()
du.mutation.SetUpdatedAt(v)
}
}
// check runs all checks and user-defined validators on the builder.
func (du *DocumentUpdate) check() error {
if v, ok := du.mutation.Title(); ok {
if err := document.TitleValidator(v); err != nil {
return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Document.title": %w`, err)}
}
}
if v, ok := du.mutation.Path(); ok {
if err := document.PathValidator(v); err != nil {
return &ValidationError{Name: "path", err: fmt.Errorf(`ent: validator failed for field "Document.path": %w`, err)}
}
}
if _, ok := du.mutation.GroupID(); du.mutation.GroupCleared() && !ok {
return errors.New(`ent: clearing a required unique edge "Document.group"`)
}
return nil
}
func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: document.Table,
Columns: document.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
if ps := du.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := du.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: document.FieldUpdatedAt,
})
}
if value, ok := du.mutation.Title(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: document.FieldTitle,
})
}
if value, ok := du.mutation.Path(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: document.FieldPath,
})
}
if du.mutation.GroupCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: document.GroupTable,
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: group.FieldID,
},
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := du.mutation.GroupIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: document.GroupTable,
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: group.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if du.mutation.DocumentTokensCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: document.DocumentTokensTable,
Columns: []string{document.DocumentTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: documenttoken.FieldID,
},
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := du.mutation.RemovedDocumentTokensIDs(); len(nodes) > 0 && !du.mutation.DocumentTokensCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: document.DocumentTokensTable,
Columns: []string{document.DocumentTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: documenttoken.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := du.mutation.DocumentTokensIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: document.DocumentTokensTable,
Columns: []string{document.DocumentTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: documenttoken.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if n, err = sqlgraph.UpdateNodes(ctx, du.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{document.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return 0, err
}
return n, nil
}
// DocumentUpdateOne is the builder for updating a single Document entity.
type DocumentUpdateOne struct {
config
fields []string
hooks []Hook
mutation *DocumentMutation
}
// SetUpdatedAt sets the "updated_at" field.
func (duo *DocumentUpdateOne) SetUpdatedAt(t time.Time) *DocumentUpdateOne {
duo.mutation.SetUpdatedAt(t)
return duo
}
// SetTitle sets the "title" field.
func (duo *DocumentUpdateOne) SetTitle(s string) *DocumentUpdateOne {
duo.mutation.SetTitle(s)
return duo
}
// SetPath sets the "path" field.
func (duo *DocumentUpdateOne) SetPath(s string) *DocumentUpdateOne {
duo.mutation.SetPath(s)
return duo
}
// SetGroupID sets the "group" edge to the Group entity by ID.
func (duo *DocumentUpdateOne) SetGroupID(id uuid.UUID) *DocumentUpdateOne {
duo.mutation.SetGroupID(id)
return duo
}
// SetGroup sets the "group" edge to the Group entity.
func (duo *DocumentUpdateOne) SetGroup(g *Group) *DocumentUpdateOne {
return duo.SetGroupID(g.ID)
}
// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by IDs.
func (duo *DocumentUpdateOne) AddDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdateOne {
duo.mutation.AddDocumentTokenIDs(ids...)
return duo
}
// AddDocumentTokens adds the "document_tokens" edges to the DocumentToken entity.
func (duo *DocumentUpdateOne) AddDocumentTokens(d ...*DocumentToken) *DocumentUpdateOne {
ids := make([]uuid.UUID, len(d))
for i := range d {
ids[i] = d[i].ID
}
return duo.AddDocumentTokenIDs(ids...)
}
// Mutation returns the DocumentMutation object of the builder.
func (duo *DocumentUpdateOne) Mutation() *DocumentMutation {
return duo.mutation
}
// ClearGroup clears the "group" edge to the Group entity.
func (duo *DocumentUpdateOne) ClearGroup() *DocumentUpdateOne {
duo.mutation.ClearGroup()
return duo
}
// ClearDocumentTokens clears all "document_tokens" edges to the DocumentToken entity.
func (duo *DocumentUpdateOne) ClearDocumentTokens() *DocumentUpdateOne {
duo.mutation.ClearDocumentTokens()
return duo
}
// RemoveDocumentTokenIDs removes the "document_tokens" edge to DocumentToken entities by IDs.
func (duo *DocumentUpdateOne) RemoveDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdateOne {
duo.mutation.RemoveDocumentTokenIDs(ids...)
return duo
}
// RemoveDocumentTokens removes "document_tokens" edges to DocumentToken entities.
func (duo *DocumentUpdateOne) RemoveDocumentTokens(d ...*DocumentToken) *DocumentUpdateOne {
ids := make([]uuid.UUID, len(d))
for i := range d {
ids[i] = d[i].ID
}
return duo.RemoveDocumentTokenIDs(ids...)
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (duo *DocumentUpdateOne) Select(field string, fields ...string) *DocumentUpdateOne {
duo.fields = append([]string{field}, fields...)
return duo
}
// Save executes the query and returns the updated Document entity.
func (duo *DocumentUpdateOne) Save(ctx context.Context) (*Document, error) {
var (
err error
node *Document
)
duo.defaults()
if len(duo.hooks) == 0 {
if err = duo.check(); err != nil {
return nil, err
}
node, err = duo.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DocumentMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = duo.check(); err != nil {
return nil, err
}
duo.mutation = mutation
node, err = duo.sqlSave(ctx)
mutation.done = true
return node, err
})
for i := len(duo.hooks) - 1; i >= 0; i-- {
if duo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = duo.hooks[i](mut)
}
v, err := mut.Mutate(ctx, duo.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Document)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from DocumentMutation", v)
}
node = nv
}
return node, err
}
// SaveX is like Save, but panics if an error occurs.
func (duo *DocumentUpdateOne) SaveX(ctx context.Context) *Document {
node, err := duo.Save(ctx)
if err != nil {
panic(err)
}
return node
}
// Exec executes the query on the entity.
func (duo *DocumentUpdateOne) Exec(ctx context.Context) error {
_, err := duo.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (duo *DocumentUpdateOne) ExecX(ctx context.Context) {
if err := duo.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (duo *DocumentUpdateOne) defaults() {
if _, ok := duo.mutation.UpdatedAt(); !ok {
v := document.UpdateDefaultUpdatedAt()
duo.mutation.SetUpdatedAt(v)
}
}
// check runs all checks and user-defined validators on the builder.
func (duo *DocumentUpdateOne) check() error {
if v, ok := duo.mutation.Title(); ok {
if err := document.TitleValidator(v); err != nil {
return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Document.title": %w`, err)}
}
}
if v, ok := duo.mutation.Path(); ok {
if err := document.PathValidator(v); err != nil {
return &ValidationError{Name: "path", err: fmt.Errorf(`ent: validator failed for field "Document.path": %w`, err)}
}
}
if _, ok := duo.mutation.GroupID(); duo.mutation.GroupCleared() && !ok {
return errors.New(`ent: clearing a required unique edge "Document.group"`)
}
return nil
}
func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: document.Table,
Columns: document.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
id, ok := duo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Document.id" for update`)}
}
_spec.Node.ID.Value = id
if fields := duo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, document.FieldID)
for _, f := range fields {
if !document.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != document.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := duo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := duo.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: document.FieldUpdatedAt,
})
}
if value, ok := duo.mutation.Title(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: document.FieldTitle,
})
}
if value, ok := duo.mutation.Path(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: document.FieldPath,
})
}
if duo.mutation.GroupCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: document.GroupTable,
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: group.FieldID,
},
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := duo.mutation.GroupIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: document.GroupTable,
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: group.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if duo.mutation.DocumentTokensCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: document.DocumentTokensTable,
Columns: []string{document.DocumentTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: documenttoken.FieldID,
},
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := duo.mutation.RemovedDocumentTokensIDs(); len(nodes) > 0 && !duo.mutation.DocumentTokensCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: document.DocumentTokensTable,
Columns: []string{document.DocumentTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: documenttoken.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := duo.mutation.DocumentTokensIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: document.DocumentTokensTable,
Columns: []string{document.DocumentTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: documenttoken.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
_node = &Document{config: duo.config}
_spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues
if err = sqlgraph.UpdateNode(ctx, duo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{document.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
return _node, nil
}

View file

@ -0,0 +1,190 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"fmt"
"strings"
"time"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/documenttoken"
)
// DocumentToken is the model entity for the DocumentToken schema.
type DocumentToken struct {
config `json:"-"`
// ID of the ent.
ID uuid.UUID `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// Token holds the value of the "token" field.
Token []byte `json:"token,omitempty"`
// Uses holds the value of the "uses" field.
Uses int `json:"uses,omitempty"`
// ExpiresAt holds the value of the "expires_at" field.
ExpiresAt time.Time `json:"expires_at,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the DocumentTokenQuery when eager-loading is set.
Edges DocumentTokenEdges `json:"edges"`
document_document_tokens *uuid.UUID
}
// DocumentTokenEdges holds the relations/edges for other nodes in the graph.
type DocumentTokenEdges struct {
// Document holds the value of the document edge.
Document *Document `json:"document,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool
}
// DocumentOrErr returns the Document value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e DocumentTokenEdges) DocumentOrErr() (*Document, error) {
if e.loadedTypes[0] {
if e.Document == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: document.Label}
}
return e.Document, nil
}
return nil, &NotLoadedError{edge: "document"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*DocumentToken) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns))
for i := range columns {
switch columns[i] {
case documenttoken.FieldToken:
values[i] = new([]byte)
case documenttoken.FieldUses:
values[i] = new(sql.NullInt64)
case documenttoken.FieldCreatedAt, documenttoken.FieldUpdatedAt, documenttoken.FieldExpiresAt:
values[i] = new(sql.NullTime)
case documenttoken.FieldID:
values[i] = new(uuid.UUID)
case documenttoken.ForeignKeys[0]: // document_document_tokens
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
return nil, fmt.Errorf("unexpected column %q for type DocumentToken", columns[i])
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the DocumentToken fields.
func (dt *DocumentToken) assignValues(columns []string, values []interface{}) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case documenttoken.FieldID:
if value, ok := values[i].(*uuid.UUID); !ok {
return fmt.Errorf("unexpected type %T for field id", values[i])
} else if value != nil {
dt.ID = *value
}
case documenttoken.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
dt.CreatedAt = value.Time
}
case documenttoken.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
dt.UpdatedAt = value.Time
}
case documenttoken.FieldToken:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field token", values[i])
} else if value != nil {
dt.Token = *value
}
case documenttoken.FieldUses:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field uses", values[i])
} else if value.Valid {
dt.Uses = int(value.Int64)
}
case documenttoken.FieldExpiresAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field expires_at", values[i])
} else if value.Valid {
dt.ExpiresAt = value.Time
}
case documenttoken.ForeignKeys[0]:
if value, ok := values[i].(*sql.NullScanner); !ok {
return fmt.Errorf("unexpected type %T for field document_document_tokens", values[i])
} else if value.Valid {
dt.document_document_tokens = new(uuid.UUID)
*dt.document_document_tokens = *value.S.(*uuid.UUID)
}
}
}
return nil
}
// QueryDocument queries the "document" edge of the DocumentToken entity.
func (dt *DocumentToken) QueryDocument() *DocumentQuery {
return (&DocumentTokenClient{config: dt.config}).QueryDocument(dt)
}
// Update returns a builder for updating this DocumentToken.
// Note that you need to call DocumentToken.Unwrap() before calling this method if this DocumentToken
// was returned from a transaction, and the transaction was committed or rolled back.
func (dt *DocumentToken) Update() *DocumentTokenUpdateOne {
return (&DocumentTokenClient{config: dt.config}).UpdateOne(dt)
}
// Unwrap unwraps the DocumentToken entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (dt *DocumentToken) Unwrap() *DocumentToken {
_tx, ok := dt.config.driver.(*txDriver)
if !ok {
panic("ent: DocumentToken is not a transactional entity")
}
dt.config.driver = _tx.drv
return dt
}
// String implements the fmt.Stringer.
func (dt *DocumentToken) String() string {
var builder strings.Builder
builder.WriteString("DocumentToken(")
builder.WriteString(fmt.Sprintf("id=%v, ", dt.ID))
builder.WriteString("created_at=")
builder.WriteString(dt.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(dt.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("token=")
builder.WriteString(fmt.Sprintf("%v", dt.Token))
builder.WriteString(", ")
builder.WriteString("uses=")
builder.WriteString(fmt.Sprintf("%v", dt.Uses))
builder.WriteString(", ")
builder.WriteString("expires_at=")
builder.WriteString(dt.ExpiresAt.Format(time.ANSIC))
builder.WriteByte(')')
return builder.String()
}
// DocumentTokens is a parsable slice of DocumentToken.
type DocumentTokens []*DocumentToken
func (dt DocumentTokens) config(cfg config) {
for _i := range dt {
dt[_i].config = cfg
}
}

View file

@ -0,0 +1,85 @@
// Code generated by ent, DO NOT EDIT.
package documenttoken
import (
"time"
"github.com/google/uuid"
)
const (
// Label holds the string label denoting the documenttoken type in the database.
Label = "document_token"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldToken holds the string denoting the token field in the database.
FieldToken = "token"
// FieldUses holds the string denoting the uses field in the database.
FieldUses = "uses"
// FieldExpiresAt holds the string denoting the expires_at field in the database.
FieldExpiresAt = "expires_at"
// EdgeDocument holds the string denoting the document edge name in mutations.
EdgeDocument = "document"
// Table holds the table name of the documenttoken in the database.
Table = "document_tokens"
// DocumentTable is the table that holds the document relation/edge.
DocumentTable = "document_tokens"
// DocumentInverseTable is the table name for the Document entity.
// It exists in this package in order to avoid circular dependency with the "document" package.
DocumentInverseTable = "documents"
// DocumentColumn is the table column denoting the document relation/edge.
DocumentColumn = "document_document_tokens"
)
// Columns holds all SQL columns for documenttoken fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldToken,
FieldUses,
FieldExpiresAt,
}
// ForeignKeys holds the SQL foreign-keys that are owned by the "document_tokens"
// table and are not defined as standalone fields in the schema.
var ForeignKeys = []string{
"document_document_tokens",
}
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
for i := range ForeignKeys {
if column == ForeignKeys[i] {
return true
}
}
return false
}
var (
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
// TokenValidator is a validator for the "token" field. It is called by the builders before save.
TokenValidator func([]byte) error
// DefaultUses holds the default value on creation for the "uses" field.
DefaultUses int
// DefaultExpiresAt holds the default value on creation for the "expires_at" field.
DefaultExpiresAt func() time.Time
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)

View file

@ -0,0 +1,498 @@
// Code generated by ent, DO NOT EDIT.
package documenttoken
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/predicate"
)
// ID filters vertices based on their ID field.
func ID(id uuid.UUID) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldID), id))
})
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id uuid.UUID) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldID), id))
})
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id uuid.UUID) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldID), id))
})
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...uuid.UUID) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
v := make([]interface{}, len(ids))
for i := range v {
v[i] = ids[i]
}
s.Where(sql.In(s.C(FieldID), v...))
})
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...uuid.UUID) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
v := make([]interface{}, len(ids))
for i := range v {
v[i] = ids[i]
}
s.Where(sql.NotIn(s.C(FieldID), v...))
})
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id uuid.UUID) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldID), id))
})
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id uuid.UUID) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldID), id))
})
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id uuid.UUID) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldID), id))
})
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id uuid.UUID) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldID), id))
})
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldCreatedAt), v))
})
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
})
}
// Token applies equality check predicate on the "token" field. It's identical to TokenEQ.
func Token(v []byte) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldToken), v))
})
}
// Uses applies equality check predicate on the "uses" field. It's identical to UsesEQ.
func Uses(v int) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUses), v))
})
}
// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ.
func ExpiresAt(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldExpiresAt), v))
})
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldCreatedAt), v))
})
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
})
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.DocumentToken {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldCreatedAt), v...))
})
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.DocumentToken {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
})
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldCreatedAt), v))
})
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldCreatedAt), v))
})
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldCreatedAt), v))
})
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldCreatedAt), v))
})
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.DocumentToken {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldUpdatedAt), v...))
})
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.DocumentToken {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
})
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
})
}
// TokenEQ applies the EQ predicate on the "token" field.
func TokenEQ(v []byte) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldToken), v))
})
}
// TokenNEQ applies the NEQ predicate on the "token" field.
func TokenNEQ(v []byte) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldToken), v))
})
}
// TokenIn applies the In predicate on the "token" field.
func TokenIn(vs ...[]byte) predicate.DocumentToken {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldToken), v...))
})
}
// TokenNotIn applies the NotIn predicate on the "token" field.
func TokenNotIn(vs ...[]byte) predicate.DocumentToken {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldToken), v...))
})
}
// TokenGT applies the GT predicate on the "token" field.
func TokenGT(v []byte) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldToken), v))
})
}
// TokenGTE applies the GTE predicate on the "token" field.
func TokenGTE(v []byte) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldToken), v))
})
}
// TokenLT applies the LT predicate on the "token" field.
func TokenLT(v []byte) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldToken), v))
})
}
// TokenLTE applies the LTE predicate on the "token" field.
func TokenLTE(v []byte) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldToken), v))
})
}
// UsesEQ applies the EQ predicate on the "uses" field.
func UsesEQ(v int) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUses), v))
})
}
// UsesNEQ applies the NEQ predicate on the "uses" field.
func UsesNEQ(v int) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldUses), v))
})
}
// UsesIn applies the In predicate on the "uses" field.
func UsesIn(vs ...int) predicate.DocumentToken {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldUses), v...))
})
}
// UsesNotIn applies the NotIn predicate on the "uses" field.
func UsesNotIn(vs ...int) predicate.DocumentToken {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldUses), v...))
})
}
// UsesGT applies the GT predicate on the "uses" field.
func UsesGT(v int) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldUses), v))
})
}
// UsesGTE applies the GTE predicate on the "uses" field.
func UsesGTE(v int) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldUses), v))
})
}
// UsesLT applies the LT predicate on the "uses" field.
func UsesLT(v int) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldUses), v))
})
}
// UsesLTE applies the LTE predicate on the "uses" field.
func UsesLTE(v int) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldUses), v))
})
}
// ExpiresAtEQ applies the EQ predicate on the "expires_at" field.
func ExpiresAtEQ(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldExpiresAt), v))
})
}
// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field.
func ExpiresAtNEQ(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldExpiresAt), v))
})
}
// ExpiresAtIn applies the In predicate on the "expires_at" field.
func ExpiresAtIn(vs ...time.Time) predicate.DocumentToken {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldExpiresAt), v...))
})
}
// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field.
func ExpiresAtNotIn(vs ...time.Time) predicate.DocumentToken {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldExpiresAt), v...))
})
}
// ExpiresAtGT applies the GT predicate on the "expires_at" field.
func ExpiresAtGT(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldExpiresAt), v))
})
}
// ExpiresAtGTE applies the GTE predicate on the "expires_at" field.
func ExpiresAtGTE(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldExpiresAt), v))
})
}
// ExpiresAtLT applies the LT predicate on the "expires_at" field.
func ExpiresAtLT(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldExpiresAt), v))
})
}
// ExpiresAtLTE applies the LTE predicate on the "expires_at" field.
func ExpiresAtLTE(v time.Time) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldExpiresAt), v))
})
}
// HasDocument applies the HasEdge predicate on the "document" edge.
func HasDocument() predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DocumentTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasDocumentWith applies the HasEdge predicate on the "document" edge with a given conditions (other predicates).
func HasDocumentWith(preds ...predicate.Document) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DocumentInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn),
)
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.DocumentToken) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for _, p := range predicates {
p(s1)
}
s.Where(s1.P())
})
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.DocumentToken) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for i, p := range predicates {
if i > 0 {
s1.Or()
}
p(s1)
}
s.Where(s1.P())
})
}
// Not applies the not operator on the given predicate.
func Not(p predicate.DocumentToken) predicate.DocumentToken {
return predicate.DocumentToken(func(s *sql.Selector) {
p(s.Not())
})
}

View file

@ -0,0 +1,418 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/documenttoken"
)
// DocumentTokenCreate is the builder for creating a DocumentToken entity.
type DocumentTokenCreate struct {
config
mutation *DocumentTokenMutation
hooks []Hook
}
// SetCreatedAt sets the "created_at" field.
func (dtc *DocumentTokenCreate) SetCreatedAt(t time.Time) *DocumentTokenCreate {
dtc.mutation.SetCreatedAt(t)
return dtc
}
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
func (dtc *DocumentTokenCreate) SetNillableCreatedAt(t *time.Time) *DocumentTokenCreate {
if t != nil {
dtc.SetCreatedAt(*t)
}
return dtc
}
// SetUpdatedAt sets the "updated_at" field.
func (dtc *DocumentTokenCreate) SetUpdatedAt(t time.Time) *DocumentTokenCreate {
dtc.mutation.SetUpdatedAt(t)
return dtc
}
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
func (dtc *DocumentTokenCreate) SetNillableUpdatedAt(t *time.Time) *DocumentTokenCreate {
if t != nil {
dtc.SetUpdatedAt(*t)
}
return dtc
}
// SetToken sets the "token" field.
func (dtc *DocumentTokenCreate) SetToken(b []byte) *DocumentTokenCreate {
dtc.mutation.SetToken(b)
return dtc
}
// SetUses sets the "uses" field.
func (dtc *DocumentTokenCreate) SetUses(i int) *DocumentTokenCreate {
dtc.mutation.SetUses(i)
return dtc
}
// SetNillableUses sets the "uses" field if the given value is not nil.
func (dtc *DocumentTokenCreate) SetNillableUses(i *int) *DocumentTokenCreate {
if i != nil {
dtc.SetUses(*i)
}
return dtc
}
// SetExpiresAt sets the "expires_at" field.
func (dtc *DocumentTokenCreate) SetExpiresAt(t time.Time) *DocumentTokenCreate {
dtc.mutation.SetExpiresAt(t)
return dtc
}
// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
func (dtc *DocumentTokenCreate) SetNillableExpiresAt(t *time.Time) *DocumentTokenCreate {
if t != nil {
dtc.SetExpiresAt(*t)
}
return dtc
}
// SetID sets the "id" field.
func (dtc *DocumentTokenCreate) SetID(u uuid.UUID) *DocumentTokenCreate {
dtc.mutation.SetID(u)
return dtc
}
// SetNillableID sets the "id" field if the given value is not nil.
func (dtc *DocumentTokenCreate) SetNillableID(u *uuid.UUID) *DocumentTokenCreate {
if u != nil {
dtc.SetID(*u)
}
return dtc
}
// SetDocumentID sets the "document" edge to the Document entity by ID.
func (dtc *DocumentTokenCreate) SetDocumentID(id uuid.UUID) *DocumentTokenCreate {
dtc.mutation.SetDocumentID(id)
return dtc
}
// SetNillableDocumentID sets the "document" edge to the Document entity by ID if the given value is not nil.
func (dtc *DocumentTokenCreate) SetNillableDocumentID(id *uuid.UUID) *DocumentTokenCreate {
if id != nil {
dtc = dtc.SetDocumentID(*id)
}
return dtc
}
// SetDocument sets the "document" edge to the Document entity.
func (dtc *DocumentTokenCreate) SetDocument(d *Document) *DocumentTokenCreate {
return dtc.SetDocumentID(d.ID)
}
// Mutation returns the DocumentTokenMutation object of the builder.
func (dtc *DocumentTokenCreate) Mutation() *DocumentTokenMutation {
return dtc.mutation
}
// Save creates the DocumentToken in the database.
func (dtc *DocumentTokenCreate) Save(ctx context.Context) (*DocumentToken, error) {
var (
err error
node *DocumentToken
)
dtc.defaults()
if len(dtc.hooks) == 0 {
if err = dtc.check(); err != nil {
return nil, err
}
node, err = dtc.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DocumentTokenMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = dtc.check(); err != nil {
return nil, err
}
dtc.mutation = mutation
if node, err = dtc.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(dtc.hooks) - 1; i >= 0; i-- {
if dtc.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = dtc.hooks[i](mut)
}
v, err := mut.Mutate(ctx, dtc.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*DocumentToken)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from DocumentTokenMutation", v)
}
node = nv
}
return node, err
}
// SaveX calls Save and panics if Save returns an error.
func (dtc *DocumentTokenCreate) SaveX(ctx context.Context) *DocumentToken {
v, err := dtc.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (dtc *DocumentTokenCreate) Exec(ctx context.Context) error {
_, err := dtc.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dtc *DocumentTokenCreate) ExecX(ctx context.Context) {
if err := dtc.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dtc *DocumentTokenCreate) defaults() {
if _, ok := dtc.mutation.CreatedAt(); !ok {
v := documenttoken.DefaultCreatedAt()
dtc.mutation.SetCreatedAt(v)
}
if _, ok := dtc.mutation.UpdatedAt(); !ok {
v := documenttoken.DefaultUpdatedAt()
dtc.mutation.SetUpdatedAt(v)
}
if _, ok := dtc.mutation.Uses(); !ok {
v := documenttoken.DefaultUses
dtc.mutation.SetUses(v)
}
if _, ok := dtc.mutation.ExpiresAt(); !ok {
v := documenttoken.DefaultExpiresAt()
dtc.mutation.SetExpiresAt(v)
}
if _, ok := dtc.mutation.ID(); !ok {
v := documenttoken.DefaultID()
dtc.mutation.SetID(v)
}
}
// check runs all checks and user-defined validators on the builder.
func (dtc *DocumentTokenCreate) check() error {
if _, ok := dtc.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "DocumentToken.created_at"`)}
}
if _, ok := dtc.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "DocumentToken.updated_at"`)}
}
if _, ok := dtc.mutation.Token(); !ok {
return &ValidationError{Name: "token", err: errors.New(`ent: missing required field "DocumentToken.token"`)}
}
if v, ok := dtc.mutation.Token(); ok {
if err := documenttoken.TokenValidator(v); err != nil {
return &ValidationError{Name: "token", err: fmt.Errorf(`ent: validator failed for field "DocumentToken.token": %w`, err)}
}
}
if _, ok := dtc.mutation.Uses(); !ok {
return &ValidationError{Name: "uses", err: errors.New(`ent: missing required field "DocumentToken.uses"`)}
}
if _, ok := dtc.mutation.ExpiresAt(); !ok {
return &ValidationError{Name: "expires_at", err: errors.New(`ent: missing required field "DocumentToken.expires_at"`)}
}
return nil
}
func (dtc *DocumentTokenCreate) sqlSave(ctx context.Context) (*DocumentToken, error) {
_node, _spec := dtc.createSpec()
if err := sqlgraph.CreateNode(ctx, dtc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
if _spec.ID.Value != nil {
if id, ok := _spec.ID.Value.(*uuid.UUID); ok {
_node.ID = *id
} else if err := _node.ID.Scan(_spec.ID.Value); err != nil {
return nil, err
}
}
return _node, nil
}
func (dtc *DocumentTokenCreate) createSpec() (*DocumentToken, *sqlgraph.CreateSpec) {
var (
_node = &DocumentToken{config: dtc.config}
_spec = &sqlgraph.CreateSpec{
Table: documenttoken.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: documenttoken.FieldID,
},
}
)
if id, ok := dtc.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = &id
}
if value, ok := dtc.mutation.CreatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: documenttoken.FieldCreatedAt,
})
_node.CreatedAt = value
}
if value, ok := dtc.mutation.UpdatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: documenttoken.FieldUpdatedAt,
})
_node.UpdatedAt = value
}
if value, ok := dtc.mutation.Token(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeBytes,
Value: value,
Column: documenttoken.FieldToken,
})
_node.Token = value
}
if value, ok := dtc.mutation.Uses(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt,
Value: value,
Column: documenttoken.FieldUses,
})
_node.Uses = value
}
if value, ok := dtc.mutation.ExpiresAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: documenttoken.FieldExpiresAt,
})
_node.ExpiresAt = value
}
if nodes := dtc.mutation.DocumentIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: documenttoken.DocumentTable,
Columns: []string{documenttoken.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_node.document_document_tokens = &nodes[0]
_spec.Edges = append(_spec.Edges, edge)
}
return _node, _spec
}
// DocumentTokenCreateBulk is the builder for creating many DocumentToken entities in bulk.
type DocumentTokenCreateBulk struct {
config
builders []*DocumentTokenCreate
}
// Save creates the DocumentToken entities in the database.
func (dtcb *DocumentTokenCreateBulk) Save(ctx context.Context) ([]*DocumentToken, error) {
specs := make([]*sqlgraph.CreateSpec, len(dtcb.builders))
nodes := make([]*DocumentToken, len(dtcb.builders))
mutators := make([]Mutator, len(dtcb.builders))
for i := range dtcb.builders {
func(i int, root context.Context) {
builder := dtcb.builders[i]
builder.defaults()
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DocumentTokenMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err := builder.check(); err != nil {
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, dtcb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, dtcb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
}
}
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
mutation.done = true
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
mut = builder.hooks[i](mut)
}
mutators[i] = mut
}(i, ctx)
}
if len(mutators) > 0 {
if _, err := mutators[0].Mutate(ctx, dtcb.builders[0].mutation); err != nil {
return nil, err
}
}
return nodes, nil
}
// SaveX is like Save, but panics if an error occurs.
func (dtcb *DocumentTokenCreateBulk) SaveX(ctx context.Context) []*DocumentToken {
v, err := dtcb.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (dtcb *DocumentTokenCreateBulk) Exec(ctx context.Context) error {
_, err := dtcb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dtcb *DocumentTokenCreateBulk) ExecX(ctx context.Context) {
if err := dtcb.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -0,0 +1,115 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/hay-kot/content/backend/ent/documenttoken"
"github.com/hay-kot/content/backend/ent/predicate"
)
// DocumentTokenDelete is the builder for deleting a DocumentToken entity.
type DocumentTokenDelete struct {
config
hooks []Hook
mutation *DocumentTokenMutation
}
// Where appends a list predicates to the DocumentTokenDelete builder.
func (dtd *DocumentTokenDelete) Where(ps ...predicate.DocumentToken) *DocumentTokenDelete {
dtd.mutation.Where(ps...)
return dtd
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (dtd *DocumentTokenDelete) Exec(ctx context.Context) (int, error) {
var (
err error
affected int
)
if len(dtd.hooks) == 0 {
affected, err = dtd.sqlExec(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DocumentTokenMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
dtd.mutation = mutation
affected, err = dtd.sqlExec(ctx)
mutation.done = true
return affected, err
})
for i := len(dtd.hooks) - 1; i >= 0; i-- {
if dtd.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = dtd.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, dtd.mutation); err != nil {
return 0, err
}
}
return affected, err
}
// ExecX is like Exec, but panics if an error occurs.
func (dtd *DocumentTokenDelete) ExecX(ctx context.Context) int {
n, err := dtd.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (dtd *DocumentTokenDelete) sqlExec(ctx context.Context) (int, error) {
_spec := &sqlgraph.DeleteSpec{
Node: &sqlgraph.NodeSpec{
Table: documenttoken.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: documenttoken.FieldID,
},
},
}
if ps := dtd.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, dtd.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return affected, err
}
// DocumentTokenDeleteOne is the builder for deleting a single DocumentToken entity.
type DocumentTokenDeleteOne struct {
dtd *DocumentTokenDelete
}
// Exec executes the deletion query.
func (dtdo *DocumentTokenDeleteOne) Exec(ctx context.Context) error {
n, err := dtdo.dtd.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{documenttoken.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (dtdo *DocumentTokenDeleteOne) ExecX(ctx context.Context) {
dtdo.dtd.ExecX(ctx)
}

View file

@ -0,0 +1,611 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/documenttoken"
"github.com/hay-kot/content/backend/ent/predicate"
)
// DocumentTokenQuery is the builder for querying DocumentToken entities.
type DocumentTokenQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
predicates []predicate.DocumentToken
withDocument *DocumentQuery
withFKs bool
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the DocumentTokenQuery builder.
func (dtq *DocumentTokenQuery) Where(ps ...predicate.DocumentToken) *DocumentTokenQuery {
dtq.predicates = append(dtq.predicates, ps...)
return dtq
}
// Limit adds a limit step to the query.
func (dtq *DocumentTokenQuery) Limit(limit int) *DocumentTokenQuery {
dtq.limit = &limit
return dtq
}
// Offset adds an offset step to the query.
func (dtq *DocumentTokenQuery) Offset(offset int) *DocumentTokenQuery {
dtq.offset = &offset
return dtq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (dtq *DocumentTokenQuery) Unique(unique bool) *DocumentTokenQuery {
dtq.unique = &unique
return dtq
}
// Order adds an order step to the query.
func (dtq *DocumentTokenQuery) Order(o ...OrderFunc) *DocumentTokenQuery {
dtq.order = append(dtq.order, o...)
return dtq
}
// QueryDocument chains the current query on the "document" edge.
func (dtq *DocumentTokenQuery) QueryDocument() *DocumentQuery {
query := &DocumentQuery{config: dtq.config}
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := dtq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := dtq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(documenttoken.Table, documenttoken.FieldID, selector),
sqlgraph.To(document.Table, document.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, documenttoken.DocumentTable, documenttoken.DocumentColumn),
)
fromU = sqlgraph.SetNeighbors(dtq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first DocumentToken entity from the query.
// Returns a *NotFoundError when no DocumentToken was found.
func (dtq *DocumentTokenQuery) First(ctx context.Context) (*DocumentToken, error) {
nodes, err := dtq.Limit(1).All(ctx)
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{documenttoken.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (dtq *DocumentTokenQuery) FirstX(ctx context.Context) *DocumentToken {
node, err := dtq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first DocumentToken ID from the query.
// Returns a *NotFoundError when no DocumentToken ID was found.
func (dtq *DocumentTokenQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
if ids, err = dtq.Limit(1).IDs(ctx); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{documenttoken.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (dtq *DocumentTokenQuery) FirstIDX(ctx context.Context) uuid.UUID {
id, err := dtq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single DocumentToken entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one DocumentToken entity is found.
// Returns a *NotFoundError when no DocumentToken entities are found.
func (dtq *DocumentTokenQuery) Only(ctx context.Context) (*DocumentToken, error) {
nodes, err := dtq.Limit(2).All(ctx)
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{documenttoken.Label}
default:
return nil, &NotSingularError{documenttoken.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (dtq *DocumentTokenQuery) OnlyX(ctx context.Context) *DocumentToken {
node, err := dtq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only DocumentToken ID in the query.
// Returns a *NotSingularError when more than one DocumentToken ID is found.
// Returns a *NotFoundError when no entities are found.
func (dtq *DocumentTokenQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
if ids, err = dtq.Limit(2).IDs(ctx); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{documenttoken.Label}
default:
err = &NotSingularError{documenttoken.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (dtq *DocumentTokenQuery) OnlyIDX(ctx context.Context) uuid.UUID {
id, err := dtq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of DocumentTokens.
func (dtq *DocumentTokenQuery) All(ctx context.Context) ([]*DocumentToken, error) {
if err := dtq.prepareQuery(ctx); err != nil {
return nil, err
}
return dtq.sqlAll(ctx)
}
// AllX is like All, but panics if an error occurs.
func (dtq *DocumentTokenQuery) AllX(ctx context.Context) []*DocumentToken {
nodes, err := dtq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of DocumentToken IDs.
func (dtq *DocumentTokenQuery) IDs(ctx context.Context) ([]uuid.UUID, error) {
var ids []uuid.UUID
if err := dtq.Select(documenttoken.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (dtq *DocumentTokenQuery) IDsX(ctx context.Context) []uuid.UUID {
ids, err := dtq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (dtq *DocumentTokenQuery) Count(ctx context.Context) (int, error) {
if err := dtq.prepareQuery(ctx); err != nil {
return 0, err
}
return dtq.sqlCount(ctx)
}
// CountX is like Count, but panics if an error occurs.
func (dtq *DocumentTokenQuery) CountX(ctx context.Context) int {
count, err := dtq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (dtq *DocumentTokenQuery) Exist(ctx context.Context) (bool, error) {
if err := dtq.prepareQuery(ctx); err != nil {
return false, err
}
return dtq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
func (dtq *DocumentTokenQuery) ExistX(ctx context.Context) bool {
exist, err := dtq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the DocumentTokenQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (dtq *DocumentTokenQuery) Clone() *DocumentTokenQuery {
if dtq == nil {
return nil
}
return &DocumentTokenQuery{
config: dtq.config,
limit: dtq.limit,
offset: dtq.offset,
order: append([]OrderFunc{}, dtq.order...),
predicates: append([]predicate.DocumentToken{}, dtq.predicates...),
withDocument: dtq.withDocument.Clone(),
// clone intermediate query.
sql: dtq.sql.Clone(),
path: dtq.path,
unique: dtq.unique,
}
}
// WithDocument tells the query-builder to eager-load the nodes that are connected to
// the "document" edge. The optional arguments are used to configure the query builder of the edge.
func (dtq *DocumentTokenQuery) WithDocument(opts ...func(*DocumentQuery)) *DocumentTokenQuery {
query := &DocumentQuery{config: dtq.config}
for _, opt := range opts {
opt(query)
}
dtq.withDocument = query
return dtq
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.DocumentToken.Query().
// GroupBy(documenttoken.FieldCreatedAt).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (dtq *DocumentTokenQuery) GroupBy(field string, fields ...string) *DocumentTokenGroupBy {
grbuild := &DocumentTokenGroupBy{config: dtq.config}
grbuild.fields = append([]string{field}, fields...)
grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
if err := dtq.prepareQuery(ctx); err != nil {
return nil, err
}
return dtq.sqlQuery(ctx), nil
}
grbuild.label = documenttoken.Label
grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// }
//
// client.DocumentToken.Query().
// Select(documenttoken.FieldCreatedAt).
// Scan(ctx, &v)
func (dtq *DocumentTokenQuery) Select(fields ...string) *DocumentTokenSelect {
dtq.fields = append(dtq.fields, fields...)
selbuild := &DocumentTokenSelect{DocumentTokenQuery: dtq}
selbuild.label = documenttoken.Label
selbuild.flds, selbuild.scan = &dtq.fields, selbuild.Scan
return selbuild
}
func (dtq *DocumentTokenQuery) prepareQuery(ctx context.Context) error {
for _, f := range dtq.fields {
if !documenttoken.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if dtq.path != nil {
prev, err := dtq.path(ctx)
if err != nil {
return err
}
dtq.sql = prev
}
return nil
}
func (dtq *DocumentTokenQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DocumentToken, error) {
var (
nodes = []*DocumentToken{}
withFKs = dtq.withFKs
_spec = dtq.querySpec()
loadedTypes = [1]bool{
dtq.withDocument != nil,
}
)
if dtq.withDocument != nil {
withFKs = true
}
if withFKs {
_spec.Node.Columns = append(_spec.Node.Columns, documenttoken.ForeignKeys...)
}
_spec.ScanValues = func(columns []string) ([]interface{}, error) {
return (*DocumentToken).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []interface{}) error {
node := &DocumentToken{config: dtq.config}
nodes = append(nodes, node)
node.Edges.loadedTypes = loadedTypes
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, dtq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
if query := dtq.withDocument; query != nil {
if err := dtq.loadDocument(ctx, query, nodes, nil,
func(n *DocumentToken, e *Document) { n.Edges.Document = e }); err != nil {
return nil, err
}
}
return nodes, nil
}
func (dtq *DocumentTokenQuery) loadDocument(ctx context.Context, query *DocumentQuery, nodes []*DocumentToken, init func(*DocumentToken), assign func(*DocumentToken, *Document)) error {
ids := make([]uuid.UUID, 0, len(nodes))
nodeids := make(map[uuid.UUID][]*DocumentToken)
for i := range nodes {
if nodes[i].document_document_tokens == nil {
continue
}
fk := *nodes[i].document_document_tokens
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
query.Where(document.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return fmt.Errorf(`unexpected foreign-key "document_document_tokens" returned %v`, n.ID)
}
for i := range nodes {
assign(nodes[i], n)
}
}
return nil
}
func (dtq *DocumentTokenQuery) sqlCount(ctx context.Context) (int, error) {
_spec := dtq.querySpec()
_spec.Node.Columns = dtq.fields
if len(dtq.fields) > 0 {
_spec.Unique = dtq.unique != nil && *dtq.unique
}
return sqlgraph.CountNodes(ctx, dtq.driver, _spec)
}
func (dtq *DocumentTokenQuery) sqlExist(ctx context.Context) (bool, error) {
n, err := dtq.sqlCount(ctx)
if err != nil {
return false, fmt.Errorf("ent: check existence: %w", err)
}
return n > 0, nil
}
func (dtq *DocumentTokenQuery) querySpec() *sqlgraph.QuerySpec {
_spec := &sqlgraph.QuerySpec{
Node: &sqlgraph.NodeSpec{
Table: documenttoken.Table,
Columns: documenttoken.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: documenttoken.FieldID,
},
},
From: dtq.sql,
Unique: true,
}
if unique := dtq.unique; unique != nil {
_spec.Unique = *unique
}
if fields := dtq.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, documenttoken.FieldID)
for i := range fields {
if fields[i] != documenttoken.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
}
if ps := dtq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := dtq.limit; limit != nil {
_spec.Limit = *limit
}
if offset := dtq.offset; offset != nil {
_spec.Offset = *offset
}
if ps := dtq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (dtq *DocumentTokenQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(dtq.driver.Dialect())
t1 := builder.Table(documenttoken.Table)
columns := dtq.fields
if len(columns) == 0 {
columns = documenttoken.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if dtq.sql != nil {
selector = dtq.sql
selector.Select(selector.Columns(columns...)...)
}
if dtq.unique != nil && *dtq.unique {
selector.Distinct()
}
for _, p := range dtq.predicates {
p(selector)
}
for _, p := range dtq.order {
p(selector)
}
if offset := dtq.offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := dtq.limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// DocumentTokenGroupBy is the group-by builder for DocumentToken entities.
type DocumentTokenGroupBy struct {
config
selector
fields []string
fns []AggregateFunc
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Aggregate adds the given aggregation functions to the group-by query.
func (dtgb *DocumentTokenGroupBy) Aggregate(fns ...AggregateFunc) *DocumentTokenGroupBy {
dtgb.fns = append(dtgb.fns, fns...)
return dtgb
}
// Scan applies the group-by query and scans the result into the given value.
func (dtgb *DocumentTokenGroupBy) Scan(ctx context.Context, v interface{}) error {
query, err := dtgb.path(ctx)
if err != nil {
return err
}
dtgb.sql = query
return dtgb.sqlScan(ctx, v)
}
func (dtgb *DocumentTokenGroupBy) sqlScan(ctx context.Context, v interface{}) error {
for _, f := range dtgb.fields {
if !documenttoken.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
}
}
selector := dtgb.sqlQuery()
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := dtgb.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (dtgb *DocumentTokenGroupBy) sqlQuery() *sql.Selector {
selector := dtgb.sql.Select()
aggregation := make([]string, 0, len(dtgb.fns))
for _, fn := range dtgb.fns {
aggregation = append(aggregation, fn(selector))
}
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(dtgb.fields)+len(dtgb.fns))
for _, f := range dtgb.fields {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(dtgb.fields...)...)
}
// DocumentTokenSelect is the builder for selecting fields of DocumentToken entities.
type DocumentTokenSelect struct {
*DocumentTokenQuery
selector
// intermediate query (i.e. traversal path).
sql *sql.Selector
}
// Scan applies the selector query and scans the result into the given value.
func (dts *DocumentTokenSelect) Scan(ctx context.Context, v interface{}) error {
if err := dts.prepareQuery(ctx); err != nil {
return err
}
dts.sql = dts.DocumentTokenQuery.sqlQuery(ctx)
return dts.sqlScan(ctx, v)
}
func (dts *DocumentTokenSelect) sqlScan(ctx context.Context, v interface{}) error {
rows := &sql.Rows{}
query, args := dts.sql.Query()
if err := dts.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}

View file

@ -0,0 +1,582 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/documenttoken"
"github.com/hay-kot/content/backend/ent/predicate"
)
// DocumentTokenUpdate is the builder for updating DocumentToken entities.
type DocumentTokenUpdate struct {
config
hooks []Hook
mutation *DocumentTokenMutation
}
// Where appends a list predicates to the DocumentTokenUpdate builder.
func (dtu *DocumentTokenUpdate) Where(ps ...predicate.DocumentToken) *DocumentTokenUpdate {
dtu.mutation.Where(ps...)
return dtu
}
// SetUpdatedAt sets the "updated_at" field.
func (dtu *DocumentTokenUpdate) SetUpdatedAt(t time.Time) *DocumentTokenUpdate {
dtu.mutation.SetUpdatedAt(t)
return dtu
}
// SetToken sets the "token" field.
func (dtu *DocumentTokenUpdate) SetToken(b []byte) *DocumentTokenUpdate {
dtu.mutation.SetToken(b)
return dtu
}
// SetUses sets the "uses" field.
func (dtu *DocumentTokenUpdate) SetUses(i int) *DocumentTokenUpdate {
dtu.mutation.ResetUses()
dtu.mutation.SetUses(i)
return dtu
}
// SetNillableUses sets the "uses" field if the given value is not nil.
func (dtu *DocumentTokenUpdate) SetNillableUses(i *int) *DocumentTokenUpdate {
if i != nil {
dtu.SetUses(*i)
}
return dtu
}
// AddUses adds i to the "uses" field.
func (dtu *DocumentTokenUpdate) AddUses(i int) *DocumentTokenUpdate {
dtu.mutation.AddUses(i)
return dtu
}
// SetExpiresAt sets the "expires_at" field.
func (dtu *DocumentTokenUpdate) SetExpiresAt(t time.Time) *DocumentTokenUpdate {
dtu.mutation.SetExpiresAt(t)
return dtu
}
// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
func (dtu *DocumentTokenUpdate) SetNillableExpiresAt(t *time.Time) *DocumentTokenUpdate {
if t != nil {
dtu.SetExpiresAt(*t)
}
return dtu
}
// SetDocumentID sets the "document" edge to the Document entity by ID.
func (dtu *DocumentTokenUpdate) SetDocumentID(id uuid.UUID) *DocumentTokenUpdate {
dtu.mutation.SetDocumentID(id)
return dtu
}
// SetNillableDocumentID sets the "document" edge to the Document entity by ID if the given value is not nil.
func (dtu *DocumentTokenUpdate) SetNillableDocumentID(id *uuid.UUID) *DocumentTokenUpdate {
if id != nil {
dtu = dtu.SetDocumentID(*id)
}
return dtu
}
// SetDocument sets the "document" edge to the Document entity.
func (dtu *DocumentTokenUpdate) SetDocument(d *Document) *DocumentTokenUpdate {
return dtu.SetDocumentID(d.ID)
}
// Mutation returns the DocumentTokenMutation object of the builder.
func (dtu *DocumentTokenUpdate) Mutation() *DocumentTokenMutation {
return dtu.mutation
}
// ClearDocument clears the "document" edge to the Document entity.
func (dtu *DocumentTokenUpdate) ClearDocument() *DocumentTokenUpdate {
dtu.mutation.ClearDocument()
return dtu
}
// Save executes the query and returns the number of nodes affected by the update operation.
func (dtu *DocumentTokenUpdate) Save(ctx context.Context) (int, error) {
var (
err error
affected int
)
dtu.defaults()
if len(dtu.hooks) == 0 {
if err = dtu.check(); err != nil {
return 0, err
}
affected, err = dtu.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DocumentTokenMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = dtu.check(); err != nil {
return 0, err
}
dtu.mutation = mutation
affected, err = dtu.sqlSave(ctx)
mutation.done = true
return affected, err
})
for i := len(dtu.hooks) - 1; i >= 0; i-- {
if dtu.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = dtu.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, dtu.mutation); err != nil {
return 0, err
}
}
return affected, err
}
// SaveX is like Save, but panics if an error occurs.
func (dtu *DocumentTokenUpdate) SaveX(ctx context.Context) int {
affected, err := dtu.Save(ctx)
if err != nil {
panic(err)
}
return affected
}
// Exec executes the query.
func (dtu *DocumentTokenUpdate) Exec(ctx context.Context) error {
_, err := dtu.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dtu *DocumentTokenUpdate) ExecX(ctx context.Context) {
if err := dtu.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dtu *DocumentTokenUpdate) defaults() {
if _, ok := dtu.mutation.UpdatedAt(); !ok {
v := documenttoken.UpdateDefaultUpdatedAt()
dtu.mutation.SetUpdatedAt(v)
}
}
// check runs all checks and user-defined validators on the builder.
func (dtu *DocumentTokenUpdate) check() error {
if v, ok := dtu.mutation.Token(); ok {
if err := documenttoken.TokenValidator(v); err != nil {
return &ValidationError{Name: "token", err: fmt.Errorf(`ent: validator failed for field "DocumentToken.token": %w`, err)}
}
}
return nil
}
func (dtu *DocumentTokenUpdate) sqlSave(ctx context.Context) (n int, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: documenttoken.Table,
Columns: documenttoken.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: documenttoken.FieldID,
},
},
}
if ps := dtu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := dtu.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: documenttoken.FieldUpdatedAt,
})
}
if value, ok := dtu.mutation.Token(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeBytes,
Value: value,
Column: documenttoken.FieldToken,
})
}
if value, ok := dtu.mutation.Uses(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeInt,
Value: value,
Column: documenttoken.FieldUses,
})
}
if value, ok := dtu.mutation.AddedUses(); ok {
_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
Type: field.TypeInt,
Value: value,
Column: documenttoken.FieldUses,
})
}
if value, ok := dtu.mutation.ExpiresAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: documenttoken.FieldExpiresAt,
})
}
if dtu.mutation.DocumentCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: documenttoken.DocumentTable,
Columns: []string{documenttoken.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := dtu.mutation.DocumentIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: documenttoken.DocumentTable,
Columns: []string{documenttoken.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if n, err = sqlgraph.UpdateNodes(ctx, dtu.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{documenttoken.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return 0, err
}
return n, nil
}
// DocumentTokenUpdateOne is the builder for updating a single DocumentToken entity.
type DocumentTokenUpdateOne struct {
config
fields []string
hooks []Hook
mutation *DocumentTokenMutation
}
// SetUpdatedAt sets the "updated_at" field.
func (dtuo *DocumentTokenUpdateOne) SetUpdatedAt(t time.Time) *DocumentTokenUpdateOne {
dtuo.mutation.SetUpdatedAt(t)
return dtuo
}
// SetToken sets the "token" field.
func (dtuo *DocumentTokenUpdateOne) SetToken(b []byte) *DocumentTokenUpdateOne {
dtuo.mutation.SetToken(b)
return dtuo
}
// SetUses sets the "uses" field.
func (dtuo *DocumentTokenUpdateOne) SetUses(i int) *DocumentTokenUpdateOne {
dtuo.mutation.ResetUses()
dtuo.mutation.SetUses(i)
return dtuo
}
// SetNillableUses sets the "uses" field if the given value is not nil.
func (dtuo *DocumentTokenUpdateOne) SetNillableUses(i *int) *DocumentTokenUpdateOne {
if i != nil {
dtuo.SetUses(*i)
}
return dtuo
}
// AddUses adds i to the "uses" field.
func (dtuo *DocumentTokenUpdateOne) AddUses(i int) *DocumentTokenUpdateOne {
dtuo.mutation.AddUses(i)
return dtuo
}
// SetExpiresAt sets the "expires_at" field.
func (dtuo *DocumentTokenUpdateOne) SetExpiresAt(t time.Time) *DocumentTokenUpdateOne {
dtuo.mutation.SetExpiresAt(t)
return dtuo
}
// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
func (dtuo *DocumentTokenUpdateOne) SetNillableExpiresAt(t *time.Time) *DocumentTokenUpdateOne {
if t != nil {
dtuo.SetExpiresAt(*t)
}
return dtuo
}
// SetDocumentID sets the "document" edge to the Document entity by ID.
func (dtuo *DocumentTokenUpdateOne) SetDocumentID(id uuid.UUID) *DocumentTokenUpdateOne {
dtuo.mutation.SetDocumentID(id)
return dtuo
}
// SetNillableDocumentID sets the "document" edge to the Document entity by ID if the given value is not nil.
func (dtuo *DocumentTokenUpdateOne) SetNillableDocumentID(id *uuid.UUID) *DocumentTokenUpdateOne {
if id != nil {
dtuo = dtuo.SetDocumentID(*id)
}
return dtuo
}
// SetDocument sets the "document" edge to the Document entity.
func (dtuo *DocumentTokenUpdateOne) SetDocument(d *Document) *DocumentTokenUpdateOne {
return dtuo.SetDocumentID(d.ID)
}
// Mutation returns the DocumentTokenMutation object of the builder.
func (dtuo *DocumentTokenUpdateOne) Mutation() *DocumentTokenMutation {
return dtuo.mutation
}
// ClearDocument clears the "document" edge to the Document entity.
func (dtuo *DocumentTokenUpdateOne) ClearDocument() *DocumentTokenUpdateOne {
dtuo.mutation.ClearDocument()
return dtuo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (dtuo *DocumentTokenUpdateOne) Select(field string, fields ...string) *DocumentTokenUpdateOne {
dtuo.fields = append([]string{field}, fields...)
return dtuo
}
// Save executes the query and returns the updated DocumentToken entity.
func (dtuo *DocumentTokenUpdateOne) Save(ctx context.Context) (*DocumentToken, error) {
var (
err error
node *DocumentToken
)
dtuo.defaults()
if len(dtuo.hooks) == 0 {
if err = dtuo.check(); err != nil {
return nil, err
}
node, err = dtuo.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DocumentTokenMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = dtuo.check(); err != nil {
return nil, err
}
dtuo.mutation = mutation
node, err = dtuo.sqlSave(ctx)
mutation.done = true
return node, err
})
for i := len(dtuo.hooks) - 1; i >= 0; i-- {
if dtuo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = dtuo.hooks[i](mut)
}
v, err := mut.Mutate(ctx, dtuo.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*DocumentToken)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from DocumentTokenMutation", v)
}
node = nv
}
return node, err
}
// SaveX is like Save, but panics if an error occurs.
func (dtuo *DocumentTokenUpdateOne) SaveX(ctx context.Context) *DocumentToken {
node, err := dtuo.Save(ctx)
if err != nil {
panic(err)
}
return node
}
// Exec executes the query on the entity.
func (dtuo *DocumentTokenUpdateOne) Exec(ctx context.Context) error {
_, err := dtuo.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dtuo *DocumentTokenUpdateOne) ExecX(ctx context.Context) {
if err := dtuo.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dtuo *DocumentTokenUpdateOne) defaults() {
if _, ok := dtuo.mutation.UpdatedAt(); !ok {
v := documenttoken.UpdateDefaultUpdatedAt()
dtuo.mutation.SetUpdatedAt(v)
}
}
// check runs all checks and user-defined validators on the builder.
func (dtuo *DocumentTokenUpdateOne) check() error {
if v, ok := dtuo.mutation.Token(); ok {
if err := documenttoken.TokenValidator(v); err != nil {
return &ValidationError{Name: "token", err: fmt.Errorf(`ent: validator failed for field "DocumentToken.token": %w`, err)}
}
}
return nil
}
func (dtuo *DocumentTokenUpdateOne) sqlSave(ctx context.Context) (_node *DocumentToken, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: documenttoken.Table,
Columns: documenttoken.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: documenttoken.FieldID,
},
},
}
id, ok := dtuo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DocumentToken.id" for update`)}
}
_spec.Node.ID.Value = id
if fields := dtuo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, documenttoken.FieldID)
for _, f := range fields {
if !documenttoken.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != documenttoken.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := dtuo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := dtuo.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: documenttoken.FieldUpdatedAt,
})
}
if value, ok := dtuo.mutation.Token(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeBytes,
Value: value,
Column: documenttoken.FieldToken,
})
}
if value, ok := dtuo.mutation.Uses(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeInt,
Value: value,
Column: documenttoken.FieldUses,
})
}
if value, ok := dtuo.mutation.AddedUses(); ok {
_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
Type: field.TypeInt,
Value: value,
Column: documenttoken.FieldUses,
})
}
if value, ok := dtuo.mutation.ExpiresAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: documenttoken.FieldExpiresAt,
})
}
if dtuo.mutation.DocumentCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: documenttoken.DocumentTable,
Columns: []string{documenttoken.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := dtuo.mutation.DocumentIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: documenttoken.DocumentTable,
Columns: []string{documenttoken.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
_node = &DocumentToken{config: dtuo.config}
_spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues
if err = sqlgraph.UpdateNode(ctx, dtuo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{documenttoken.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
return _node, nil
}

View file

@ -11,6 +11,8 @@ import (
"entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/dialect/sql/sqlgraph"
"github.com/hay-kot/content/backend/ent/authtokens" "github.com/hay-kot/content/backend/ent/authtokens"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/documenttoken"
"github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/group"
"github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/item"
"github.com/hay-kot/content/backend/ent/itemfield" "github.com/hay-kot/content/backend/ent/itemfield"
@ -37,13 +39,15 @@ type OrderFunc func(*sql.Selector)
// columnChecker returns a function indicates if the column exists in the given column. // columnChecker returns a function indicates if the column exists in the given column.
func columnChecker(table string) func(string) error { func columnChecker(table string) func(string) error {
checks := map[string]func(string) bool{ checks := map[string]func(string) bool{
authtokens.Table: authtokens.ValidColumn, authtokens.Table: authtokens.ValidColumn,
group.Table: group.ValidColumn, document.Table: document.ValidColumn,
item.Table: item.ValidColumn, documenttoken.Table: documenttoken.ValidColumn,
itemfield.Table: itemfield.ValidColumn, group.Table: group.ValidColumn,
label.Table: label.ValidColumn, item.Table: item.ValidColumn,
location.Table: location.ValidColumn, itemfield.Table: itemfield.ValidColumn,
user.Table: user.ValidColumn, label.Table: label.ValidColumn,
location.Table: location.ValidColumn,
user.Table: user.ValidColumn,
} }
check, ok := checks[table] check, ok := checks[table]
if !ok { if !ok {

View file

@ -40,9 +40,11 @@ type GroupEdges struct {
Items []*Item `json:"items,omitempty"` Items []*Item `json:"items,omitempty"`
// Labels holds the value of the labels edge. // Labels holds the value of the labels edge.
Labels []*Label `json:"labels,omitempty"` Labels []*Label `json:"labels,omitempty"`
// Documents holds the value of the documents edge.
Documents []*Document `json:"documents,omitempty"`
// loadedTypes holds the information for reporting if a // loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not. // type was loaded (or requested) in eager-loading or not.
loadedTypes [4]bool loadedTypes [5]bool
} }
// UsersOrErr returns the Users value or an error if the edge // UsersOrErr returns the Users value or an error if the edge
@ -81,6 +83,15 @@ func (e GroupEdges) LabelsOrErr() ([]*Label, error) {
return nil, &NotLoadedError{edge: "labels"} return nil, &NotLoadedError{edge: "labels"}
} }
// DocumentsOrErr returns the Documents value or an error if the edge
// was not loaded in eager-loading.
func (e GroupEdges) DocumentsOrErr() ([]*Document, error) {
if e.loadedTypes[4] {
return e.Documents, nil
}
return nil, &NotLoadedError{edge: "documents"}
}
// scanValues returns the types for scanning values from sql.Rows. // scanValues returns the types for scanning values from sql.Rows.
func (*Group) scanValues(columns []string) ([]interface{}, error) { func (*Group) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns)) values := make([]interface{}, len(columns))
@ -162,6 +173,11 @@ func (gr *Group) QueryLabels() *LabelQuery {
return (&GroupClient{config: gr.config}).QueryLabels(gr) return (&GroupClient{config: gr.config}).QueryLabels(gr)
} }
// QueryDocuments queries the "documents" edge of the Group entity.
func (gr *Group) QueryDocuments() *DocumentQuery {
return (&GroupClient{config: gr.config}).QueryDocuments(gr)
}
// Update returns a builder for updating this Group. // Update returns a builder for updating this Group.
// Note that you need to call Group.Unwrap() before calling this method if this Group // Note that you need to call Group.Unwrap() before calling this method if this Group
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.

View file

@ -30,6 +30,8 @@ const (
EdgeItems = "items" EdgeItems = "items"
// EdgeLabels holds the string denoting the labels edge name in mutations. // EdgeLabels holds the string denoting the labels edge name in mutations.
EdgeLabels = "labels" EdgeLabels = "labels"
// EdgeDocuments holds the string denoting the documents edge name in mutations.
EdgeDocuments = "documents"
// Table holds the table name of the group in the database. // Table holds the table name of the group in the database.
Table = "groups" Table = "groups"
// UsersTable is the table that holds the users relation/edge. // UsersTable is the table that holds the users relation/edge.
@ -60,6 +62,13 @@ const (
LabelsInverseTable = "labels" LabelsInverseTable = "labels"
// LabelsColumn is the table column denoting the labels relation/edge. // LabelsColumn is the table column denoting the labels relation/edge.
LabelsColumn = "group_labels" LabelsColumn = "group_labels"
// DocumentsTable is the table that holds the documents relation/edge.
DocumentsTable = "documents"
// DocumentsInverseTable is the table name for the Document entity.
// It exists in this package in order to avoid circular dependency with the "document" package.
DocumentsInverseTable = "documents"
// DocumentsColumn is the table column denoting the documents relation/edge.
DocumentsColumn = "group_documents"
) )
// Columns holds all SQL columns for group fields. // Columns holds all SQL columns for group fields.

View file

@ -478,6 +478,34 @@ func HasLabelsWith(preds ...predicate.Label) predicate.Group {
}) })
} }
// HasDocuments applies the HasEdge predicate on the "documents" edge.
func HasDocuments() predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DocumentsTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasDocumentsWith applies the HasEdge predicate on the "documents" edge with a given conditions (other predicates).
func HasDocumentsWith(preds ...predicate.Document) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DocumentsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn),
)
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them. // And groups predicates with the AND operator between them.
func And(predicates ...predicate.Group) predicate.Group { func And(predicates ...predicate.Group) predicate.Group {
return predicate.Group(func(s *sql.Selector) { return predicate.Group(func(s *sql.Selector) {

View file

@ -11,6 +11,7 @@ import (
"entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field" "entgo.io/ent/schema/field"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/group"
"github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/item"
"github.com/hay-kot/content/backend/ent/label" "github.com/hay-kot/content/backend/ent/label"
@ -147,6 +148,21 @@ func (gc *GroupCreate) AddLabels(l ...*Label) *GroupCreate {
return gc.AddLabelIDs(ids...) return gc.AddLabelIDs(ids...)
} }
// AddDocumentIDs adds the "documents" edge to the Document entity by IDs.
func (gc *GroupCreate) AddDocumentIDs(ids ...uuid.UUID) *GroupCreate {
gc.mutation.AddDocumentIDs(ids...)
return gc
}
// AddDocuments adds the "documents" edges to the Document entity.
func (gc *GroupCreate) AddDocuments(d ...*Document) *GroupCreate {
ids := make([]uuid.UUID, len(d))
for i := range d {
ids[i] = d[i].ID
}
return gc.AddDocumentIDs(ids...)
}
// Mutation returns the GroupMutation object of the builder. // Mutation returns the GroupMutation object of the builder.
func (gc *GroupCreate) Mutation() *GroupMutation { func (gc *GroupCreate) Mutation() *GroupMutation {
return gc.mutation return gc.mutation
@ -410,6 +426,25 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
} }
_spec.Edges = append(_spec.Edges, edge) _spec.Edges = append(_spec.Edges, edge)
} }
if nodes := gc.mutation.DocumentsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: group.DocumentsTable,
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
return _node, _spec return _node, _spec
} }

View file

@ -12,6 +12,7 @@ import (
"entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field" "entgo.io/ent/schema/field"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/group"
"github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/item"
"github.com/hay-kot/content/backend/ent/label" "github.com/hay-kot/content/backend/ent/label"
@ -33,6 +34,7 @@ type GroupQuery struct {
withLocations *LocationQuery withLocations *LocationQuery
withItems *ItemQuery withItems *ItemQuery
withLabels *LabelQuery withLabels *LabelQuery
withDocuments *DocumentQuery
// intermediate query (i.e. traversal path). // intermediate query (i.e. traversal path).
sql *sql.Selector sql *sql.Selector
path func(context.Context) (*sql.Selector, error) path func(context.Context) (*sql.Selector, error)
@ -157,6 +159,28 @@ func (gq *GroupQuery) QueryLabels() *LabelQuery {
return query return query
} }
// QueryDocuments chains the current query on the "documents" edge.
func (gq *GroupQuery) QueryDocuments() *DocumentQuery {
query := &DocumentQuery{config: gq.config}
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := gq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := gq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(group.Table, group.FieldID, selector),
sqlgraph.To(document.Table, document.FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, group.DocumentsTable, group.DocumentsColumn),
)
fromU = sqlgraph.SetNeighbors(gq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first Group entity from the query. // First returns the first Group entity from the query.
// Returns a *NotFoundError when no Group was found. // Returns a *NotFoundError when no Group was found.
func (gq *GroupQuery) First(ctx context.Context) (*Group, error) { func (gq *GroupQuery) First(ctx context.Context) (*Group, error) {
@ -342,6 +366,7 @@ func (gq *GroupQuery) Clone() *GroupQuery {
withLocations: gq.withLocations.Clone(), withLocations: gq.withLocations.Clone(),
withItems: gq.withItems.Clone(), withItems: gq.withItems.Clone(),
withLabels: gq.withLabels.Clone(), withLabels: gq.withLabels.Clone(),
withDocuments: gq.withDocuments.Clone(),
// clone intermediate query. // clone intermediate query.
sql: gq.sql.Clone(), sql: gq.sql.Clone(),
path: gq.path, path: gq.path,
@ -393,6 +418,17 @@ func (gq *GroupQuery) WithLabels(opts ...func(*LabelQuery)) *GroupQuery {
return gq return gq
} }
// WithDocuments tells the query-builder to eager-load the nodes that are connected to
// the "documents" edge. The optional arguments are used to configure the query builder of the edge.
func (gq *GroupQuery) WithDocuments(opts ...func(*DocumentQuery)) *GroupQuery {
query := &DocumentQuery{config: gq.config}
for _, opt := range opts {
opt(query)
}
gq.withDocuments = query
return gq
}
// GroupBy is used to group vertices by one or more fields/columns. // GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum. // It is often used with aggregate functions, like: count, max, mean, min, sum.
// //
@ -461,11 +497,12 @@ func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group,
var ( var (
nodes = []*Group{} nodes = []*Group{}
_spec = gq.querySpec() _spec = gq.querySpec()
loadedTypes = [4]bool{ loadedTypes = [5]bool{
gq.withUsers != nil, gq.withUsers != nil,
gq.withLocations != nil, gq.withLocations != nil,
gq.withItems != nil, gq.withItems != nil,
gq.withLabels != nil, gq.withLabels != nil,
gq.withDocuments != nil,
} }
) )
_spec.ScanValues = func(columns []string) ([]interface{}, error) { _spec.ScanValues = func(columns []string) ([]interface{}, error) {
@ -514,6 +551,13 @@ func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group,
return nil, err return nil, err
} }
} }
if query := gq.withDocuments; query != nil {
if err := gq.loadDocuments(ctx, query, nodes,
func(n *Group) { n.Edges.Documents = []*Document{} },
func(n *Group, e *Document) { n.Edges.Documents = append(n.Edges.Documents, e) }); err != nil {
return nil, err
}
}
return nodes, nil return nodes, nil
} }
@ -641,6 +685,37 @@ func (gq *GroupQuery) loadLabels(ctx context.Context, query *LabelQuery, nodes [
} }
return nil return nil
} }
func (gq *GroupQuery) loadDocuments(ctx context.Context, query *DocumentQuery, nodes []*Group, init func(*Group), assign func(*Group, *Document)) error {
fks := make([]driver.Value, 0, len(nodes))
nodeids := make(map[uuid.UUID]*Group)
for i := range nodes {
fks = append(fks, nodes[i].ID)
nodeids[nodes[i].ID] = nodes[i]
if init != nil {
init(nodes[i])
}
}
query.withFKs = true
query.Where(predicate.Document(func(s *sql.Selector) {
s.Where(sql.InValues(group.DocumentsColumn, fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
fk := n.group_documents
if fk == nil {
return fmt.Errorf(`foreign-key "group_documents" is nil for node %v`, n.ID)
}
node, ok := nodeids[*fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "group_documents" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
return nil
}
func (gq *GroupQuery) sqlCount(ctx context.Context) (int, error) { func (gq *GroupQuery) sqlCount(ctx context.Context) (int, error) {
_spec := gq.querySpec() _spec := gq.querySpec()

View file

@ -12,6 +12,7 @@ import (
"entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field" "entgo.io/ent/schema/field"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/group"
"github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/item"
"github.com/hay-kot/content/backend/ent/label" "github.com/hay-kot/content/backend/ent/label"
@ -119,6 +120,21 @@ func (gu *GroupUpdate) AddLabels(l ...*Label) *GroupUpdate {
return gu.AddLabelIDs(ids...) return gu.AddLabelIDs(ids...)
} }
// AddDocumentIDs adds the "documents" edge to the Document entity by IDs.
func (gu *GroupUpdate) AddDocumentIDs(ids ...uuid.UUID) *GroupUpdate {
gu.mutation.AddDocumentIDs(ids...)
return gu
}
// AddDocuments adds the "documents" edges to the Document entity.
func (gu *GroupUpdate) AddDocuments(d ...*Document) *GroupUpdate {
ids := make([]uuid.UUID, len(d))
for i := range d {
ids[i] = d[i].ID
}
return gu.AddDocumentIDs(ids...)
}
// Mutation returns the GroupMutation object of the builder. // Mutation returns the GroupMutation object of the builder.
func (gu *GroupUpdate) Mutation() *GroupMutation { func (gu *GroupUpdate) Mutation() *GroupMutation {
return gu.mutation return gu.mutation
@ -208,6 +224,27 @@ func (gu *GroupUpdate) RemoveLabels(l ...*Label) *GroupUpdate {
return gu.RemoveLabelIDs(ids...) return gu.RemoveLabelIDs(ids...)
} }
// ClearDocuments clears all "documents" edges to the Document entity.
func (gu *GroupUpdate) ClearDocuments() *GroupUpdate {
gu.mutation.ClearDocuments()
return gu
}
// RemoveDocumentIDs removes the "documents" edge to Document entities by IDs.
func (gu *GroupUpdate) RemoveDocumentIDs(ids ...uuid.UUID) *GroupUpdate {
gu.mutation.RemoveDocumentIDs(ids...)
return gu
}
// RemoveDocuments removes "documents" edges to Document entities.
func (gu *GroupUpdate) RemoveDocuments(d ...*Document) *GroupUpdate {
ids := make([]uuid.UUID, len(d))
for i := range d {
ids[i] = d[i].ID
}
return gu.RemoveDocumentIDs(ids...)
}
// Save executes the query and returns the number of nodes affected by the update operation. // Save executes the query and returns the number of nodes affected by the update operation.
func (gu *GroupUpdate) Save(ctx context.Context) (int, error) { func (gu *GroupUpdate) Save(ctx context.Context) (int, error) {
var ( var (
@ -547,6 +584,60 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
} }
_spec.Edges.Add = append(_spec.Edges.Add, edge) _spec.Edges.Add = append(_spec.Edges.Add, edge)
} }
if gu.mutation.DocumentsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: group.DocumentsTable,
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := gu.mutation.RemovedDocumentsIDs(); len(nodes) > 0 && !gu.mutation.DocumentsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: group.DocumentsTable,
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := gu.mutation.DocumentsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: group.DocumentsTable,
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if n, err = sqlgraph.UpdateNodes(ctx, gu.driver, _spec); err != nil { if n, err = sqlgraph.UpdateNodes(ctx, gu.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok { if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{group.Label} err = &NotFoundError{group.Label}
@ -652,6 +743,21 @@ func (guo *GroupUpdateOne) AddLabels(l ...*Label) *GroupUpdateOne {
return guo.AddLabelIDs(ids...) return guo.AddLabelIDs(ids...)
} }
// AddDocumentIDs adds the "documents" edge to the Document entity by IDs.
func (guo *GroupUpdateOne) AddDocumentIDs(ids ...uuid.UUID) *GroupUpdateOne {
guo.mutation.AddDocumentIDs(ids...)
return guo
}
// AddDocuments adds the "documents" edges to the Document entity.
func (guo *GroupUpdateOne) AddDocuments(d ...*Document) *GroupUpdateOne {
ids := make([]uuid.UUID, len(d))
for i := range d {
ids[i] = d[i].ID
}
return guo.AddDocumentIDs(ids...)
}
// Mutation returns the GroupMutation object of the builder. // Mutation returns the GroupMutation object of the builder.
func (guo *GroupUpdateOne) Mutation() *GroupMutation { func (guo *GroupUpdateOne) Mutation() *GroupMutation {
return guo.mutation return guo.mutation
@ -741,6 +847,27 @@ func (guo *GroupUpdateOne) RemoveLabels(l ...*Label) *GroupUpdateOne {
return guo.RemoveLabelIDs(ids...) return guo.RemoveLabelIDs(ids...)
} }
// ClearDocuments clears all "documents" edges to the Document entity.
func (guo *GroupUpdateOne) ClearDocuments() *GroupUpdateOne {
guo.mutation.ClearDocuments()
return guo
}
// RemoveDocumentIDs removes the "documents" edge to Document entities by IDs.
func (guo *GroupUpdateOne) RemoveDocumentIDs(ids ...uuid.UUID) *GroupUpdateOne {
guo.mutation.RemoveDocumentIDs(ids...)
return guo
}
// RemoveDocuments removes "documents" edges to Document entities.
func (guo *GroupUpdateOne) RemoveDocuments(d ...*Document) *GroupUpdateOne {
ids := make([]uuid.UUID, len(d))
for i := range d {
ids[i] = d[i].ID
}
return guo.RemoveDocumentIDs(ids...)
}
// Select allows selecting one or more fields (columns) of the returned entity. // Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema. // The default is selecting all fields defined in the entity schema.
func (guo *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOne { func (guo *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOne {
@ -1110,6 +1237,60 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
} }
_spec.Edges.Add = append(_spec.Edges.Add, edge) _spec.Edges.Add = append(_spec.Edges.Add, edge)
} }
if guo.mutation.DocumentsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: group.DocumentsTable,
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := guo.mutation.RemovedDocumentsIDs(); len(nodes) > 0 && !guo.mutation.DocumentsCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: group.DocumentsTable,
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := guo.mutation.DocumentsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: group.DocumentsTable,
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
_node = &Group{config: guo.config} _node = &Group{config: guo.config}
_spec.Assign = _node.assignValues _spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues _spec.ScanValues = _node.scanValues

View file

@ -22,6 +22,32 @@ func (f AuthTokensFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value,
return f(ctx, mv) return f(ctx, mv)
} }
// The DocumentFunc type is an adapter to allow the use of ordinary
// function as Document mutator.
type DocumentFunc func(context.Context, *ent.DocumentMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f DocumentFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.DocumentMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DocumentMutation", m)
}
return f(ctx, mv)
}
// The DocumentTokenFunc type is an adapter to allow the use of ordinary
// function as DocumentToken mutator.
type DocumentTokenFunc func(context.Context, *ent.DocumentTokenMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f DocumentTokenFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.DocumentTokenMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DocumentTokenMutation", m)
}
return f(ctx, mv)
}
// The GroupFunc type is an adapter to allow the use of ordinary // The GroupFunc type is an adapter to allow the use of ordinary
// function as Group mutator. // function as Group mutator.
type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error) type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error)

View file

@ -38,6 +38,60 @@ var (
}, },
}, },
} }
// DocumentsColumns holds the columns for the "documents" table.
DocumentsColumns = []*schema.Column{
{Name: "id", Type: field.TypeUUID},
{Name: "created_at", Type: field.TypeTime},
{Name: "updated_at", Type: field.TypeTime},
{Name: "title", Type: field.TypeString, Size: 255},
{Name: "path", Type: field.TypeString, Size: 500},
{Name: "group_documents", Type: field.TypeUUID},
}
// DocumentsTable holds the schema information for the "documents" table.
DocumentsTable = &schema.Table{
Name: "documents",
Columns: DocumentsColumns,
PrimaryKey: []*schema.Column{DocumentsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "documents_groups_documents",
Columns: []*schema.Column{DocumentsColumns[5]},
RefColumns: []*schema.Column{GroupsColumns[0]},
OnDelete: schema.Cascade,
},
},
}
// DocumentTokensColumns holds the columns for the "document_tokens" table.
DocumentTokensColumns = []*schema.Column{
{Name: "id", Type: field.TypeUUID},
{Name: "created_at", Type: field.TypeTime},
{Name: "updated_at", Type: field.TypeTime},
{Name: "token", Type: field.TypeBytes, Unique: true},
{Name: "uses", Type: field.TypeInt, Default: 1},
{Name: "expires_at", Type: field.TypeTime},
{Name: "document_document_tokens", Type: field.TypeUUID, Nullable: true},
}
// DocumentTokensTable holds the schema information for the "document_tokens" table.
DocumentTokensTable = &schema.Table{
Name: "document_tokens",
Columns: DocumentTokensColumns,
PrimaryKey: []*schema.Column{DocumentTokensColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "document_tokens_documents_document_tokens",
Columns: []*schema.Column{DocumentTokensColumns[6]},
RefColumns: []*schema.Column{DocumentsColumns[0]},
OnDelete: schema.Cascade,
},
},
Indexes: []*schema.Index{
{
Name: "documenttoken_token",
Unique: false,
Columns: []*schema.Column{DocumentTokensColumns[3]},
},
},
}
// GroupsColumns holds the columns for the "groups" table. // GroupsColumns holds the columns for the "groups" table.
GroupsColumns = []*schema.Column{ GroupsColumns = []*schema.Column{
{Name: "id", Type: field.TypeUUID}, {Name: "id", Type: field.TypeUUID},
@ -246,6 +300,8 @@ var (
// Tables holds all the tables in the schema. // Tables holds all the tables in the schema.
Tables = []*schema.Table{ Tables = []*schema.Table{
AuthTokensTable, AuthTokensTable,
DocumentsTable,
DocumentTokensTable,
GroupsTable, GroupsTable,
ItemsTable, ItemsTable,
ItemFieldsTable, ItemFieldsTable,
@ -258,6 +314,8 @@ var (
func init() { func init() {
AuthTokensTable.ForeignKeys[0].RefTable = UsersTable AuthTokensTable.ForeignKeys[0].RefTable = UsersTable
DocumentsTable.ForeignKeys[0].RefTable = GroupsTable
DocumentTokensTable.ForeignKeys[0].RefTable = DocumentsTable
ItemsTable.ForeignKeys[0].RefTable = GroupsTable ItemsTable.ForeignKeys[0].RefTable = GroupsTable
ItemsTable.ForeignKeys[1].RefTable = LocationsTable ItemsTable.ForeignKeys[1].RefTable = LocationsTable
ItemFieldsTable.ForeignKeys[0].RefTable = ItemsTable ItemFieldsTable.ForeignKeys[0].RefTable = ItemsTable

File diff suppressed because it is too large Load diff

View file

@ -9,6 +9,12 @@ import (
// AuthTokens is the predicate function for authtokens builders. // AuthTokens is the predicate function for authtokens builders.
type AuthTokens func(*sql.Selector) type AuthTokens func(*sql.Selector)
// Document is the predicate function for document builders.
type Document func(*sql.Selector)
// DocumentToken is the predicate function for documenttoken builders.
type DocumentToken func(*sql.Selector)
// Group is the predicate function for group builders. // Group is the predicate function for group builders.
type Group func(*sql.Selector) type Group func(*sql.Selector)

View file

@ -7,6 +7,8 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hay-kot/content/backend/ent/authtokens" "github.com/hay-kot/content/backend/ent/authtokens"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/documenttoken"
"github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/group"
"github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/item"
"github.com/hay-kot/content/backend/ent/itemfield" "github.com/hay-kot/content/backend/ent/itemfield"
@ -43,6 +45,92 @@ func init() {
authtokensDescID := authtokensMixinFields0[0].Descriptor() authtokensDescID := authtokensMixinFields0[0].Descriptor()
// authtokens.DefaultID holds the default value on creation for the id field. // authtokens.DefaultID holds the default value on creation for the id field.
authtokens.DefaultID = authtokensDescID.Default.(func() uuid.UUID) authtokens.DefaultID = authtokensDescID.Default.(func() uuid.UUID)
documentMixin := schema.Document{}.Mixin()
documentMixinFields0 := documentMixin[0].Fields()
_ = documentMixinFields0
documentFields := schema.Document{}.Fields()
_ = documentFields
// documentDescCreatedAt is the schema descriptor for created_at field.
documentDescCreatedAt := documentMixinFields0[1].Descriptor()
// document.DefaultCreatedAt holds the default value on creation for the created_at field.
document.DefaultCreatedAt = documentDescCreatedAt.Default.(func() time.Time)
// documentDescUpdatedAt is the schema descriptor for updated_at field.
documentDescUpdatedAt := documentMixinFields0[2].Descriptor()
// document.DefaultUpdatedAt holds the default value on creation for the updated_at field.
document.DefaultUpdatedAt = documentDescUpdatedAt.Default.(func() time.Time)
// document.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
document.UpdateDefaultUpdatedAt = documentDescUpdatedAt.UpdateDefault.(func() time.Time)
// documentDescTitle is the schema descriptor for title field.
documentDescTitle := documentFields[0].Descriptor()
// document.TitleValidator is a validator for the "title" field. It is called by the builders before save.
document.TitleValidator = func() func(string) error {
validators := documentDescTitle.Validators
fns := [...]func(string) error{
validators[0].(func(string) error),
validators[1].(func(string) error),
}
return func(title string) error {
for _, fn := range fns {
if err := fn(title); err != nil {
return err
}
}
return nil
}
}()
// documentDescPath is the schema descriptor for path field.
documentDescPath := documentFields[1].Descriptor()
// document.PathValidator is a validator for the "path" field. It is called by the builders before save.
document.PathValidator = func() func(string) error {
validators := documentDescPath.Validators
fns := [...]func(string) error{
validators[0].(func(string) error),
validators[1].(func(string) error),
}
return func(_path string) error {
for _, fn := range fns {
if err := fn(_path); err != nil {
return err
}
}
return nil
}
}()
// documentDescID is the schema descriptor for id field.
documentDescID := documentMixinFields0[0].Descriptor()
// document.DefaultID holds the default value on creation for the id field.
document.DefaultID = documentDescID.Default.(func() uuid.UUID)
documenttokenMixin := schema.DocumentToken{}.Mixin()
documenttokenMixinFields0 := documenttokenMixin[0].Fields()
_ = documenttokenMixinFields0
documenttokenFields := schema.DocumentToken{}.Fields()
_ = documenttokenFields
// documenttokenDescCreatedAt is the schema descriptor for created_at field.
documenttokenDescCreatedAt := documenttokenMixinFields0[1].Descriptor()
// documenttoken.DefaultCreatedAt holds the default value on creation for the created_at field.
documenttoken.DefaultCreatedAt = documenttokenDescCreatedAt.Default.(func() time.Time)
// documenttokenDescUpdatedAt is the schema descriptor for updated_at field.
documenttokenDescUpdatedAt := documenttokenMixinFields0[2].Descriptor()
// documenttoken.DefaultUpdatedAt holds the default value on creation for the updated_at field.
documenttoken.DefaultUpdatedAt = documenttokenDescUpdatedAt.Default.(func() time.Time)
// documenttoken.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
documenttoken.UpdateDefaultUpdatedAt = documenttokenDescUpdatedAt.UpdateDefault.(func() time.Time)
// documenttokenDescToken is the schema descriptor for token field.
documenttokenDescToken := documenttokenFields[0].Descriptor()
// documenttoken.TokenValidator is a validator for the "token" field. It is called by the builders before save.
documenttoken.TokenValidator = documenttokenDescToken.Validators[0].(func([]byte) error)
// documenttokenDescUses is the schema descriptor for uses field.
documenttokenDescUses := documenttokenFields[1].Descriptor()
// documenttoken.DefaultUses holds the default value on creation for the uses field.
documenttoken.DefaultUses = documenttokenDescUses.Default.(int)
// documenttokenDescExpiresAt is the schema descriptor for expires_at field.
documenttokenDescExpiresAt := documenttokenFields[2].Descriptor()
// documenttoken.DefaultExpiresAt holds the default value on creation for the expires_at field.
documenttoken.DefaultExpiresAt = documenttokenDescExpiresAt.Default.(func() time.Time)
// documenttokenDescID is the schema descriptor for id field.
documenttokenDescID := documenttokenMixinFields0[0].Descriptor()
// documenttoken.DefaultID holds the default value on creation for the id field.
documenttoken.DefaultID = documenttokenDescID.Default.(func() uuid.UUID)
groupMixin := schema.Group{}.Mixin() groupMixin := schema.Group{}.Mixin()
groupMixinFields0 := groupMixin[0].Fields() groupMixinFields0 := groupMixin[0].Fields()
_ = groupMixinFields0 _ = groupMixinFields0

View file

@ -42,7 +42,6 @@ func (AuthTokens) Edges() []ent.Edge {
func (AuthTokens) Indexes() []ent.Index { func (AuthTokens) Indexes() []ent.Index {
return []ent.Index{ return []ent.Index{
// non-unique index.
index.Fields("token"), index.Fields("token"),
} }
} }

View file

@ -0,0 +1,46 @@
package schema
import (
"entgo.io/ent"
"entgo.io/ent/dialect/entsql"
"entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field"
"github.com/hay-kot/content/backend/ent/schema/mixins"
)
// Document holds the schema definition for the Document entity.
type Document struct {
ent.Schema
}
func (Document) Mixin() []ent.Mixin {
return []ent.Mixin{
mixins.BaseMixin{},
}
}
// Fields of the Document.
func (Document) Fields() []ent.Field {
return []ent.Field{
field.String("title").
MaxLen(255).
NotEmpty(),
field.String("path").
MaxLen(500).
NotEmpty(),
}
}
// Edges of the Document.
func (Document) Edges() []ent.Edge {
return []ent.Edge{
edge.From("group", Group.Type).
Ref("documents").
Required().
Unique(),
edge.To("document_tokens", DocumentToken.Type).
Annotations(entsql.Annotation{
OnDelete: entsql.Cascade,
}),
}
}

View file

@ -0,0 +1,50 @@
package schema
import (
"time"
"entgo.io/ent"
"entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field"
"entgo.io/ent/schema/index"
"github.com/hay-kot/content/backend/ent/schema/mixins"
)
// DocumentToken holds the schema definition for the DocumentToken entity.
type DocumentToken struct {
ent.Schema
}
func (DocumentToken) Mixin() []ent.Mixin {
return []ent.Mixin{
mixins.BaseMixin{},
}
}
// Fields of the DocumentToken.
func (DocumentToken) Fields() []ent.Field {
return []ent.Field{
field.Bytes("token").
NotEmpty().
Unique(),
field.Int("uses").
Default(1),
field.Time("expires_at").
Default(func() time.Time { return time.Now().Add(time.Minute * 10) }),
}
}
// Edges of the DocumentToken.
func (DocumentToken) Edges() []ent.Edge {
return []ent.Edge{
edge.From("document", Document.Type).
Ref("document_tokens").
Unique(),
}
}
func (DocumentToken) Indexes() []ent.Index {
return []ent.Index{
index.Fields("token"),
}
}

View file

@ -34,17 +34,25 @@ func (Group) Fields() []ent.Field {
// Edges of the Home. // Edges of the Home.
func (Group) Edges() []ent.Edge { func (Group) Edges() []ent.Edge {
return []ent.Edge{ return []ent.Edge{
edge.To("users", User.Type).Annotations(entsql.Annotation{ edge.To("users", User.Type).
OnDelete: entsql.Cascade, Annotations(entsql.Annotation{
}), OnDelete: entsql.Cascade,
edge.To("locations", Location.Type).Annotations(entsql.Annotation{ }),
OnDelete: entsql.Cascade, edge.To("locations", Location.Type).
}), Annotations(entsql.Annotation{
edge.To("items", Item.Type).Annotations(entsql.Annotation{ OnDelete: entsql.Cascade,
OnDelete: entsql.Cascade, }),
}), edge.To("items", Item.Type).
edge.To("labels", Label.Type).Annotations(entsql.Annotation{ Annotations(entsql.Annotation{
OnDelete: entsql.Cascade, OnDelete: entsql.Cascade,
}), }),
edge.To("labels", Label.Type).
Annotations(entsql.Annotation{
OnDelete: entsql.Cascade,
}),
edge.To("documents", Document.Type).
Annotations(entsql.Annotation{
OnDelete: entsql.Cascade,
}),
} }
} }

View file

@ -14,6 +14,10 @@ type Tx struct {
config config
// AuthTokens is the client for interacting with the AuthTokens builders. // AuthTokens is the client for interacting with the AuthTokens builders.
AuthTokens *AuthTokensClient AuthTokens *AuthTokensClient
// Document is the client for interacting with the Document builders.
Document *DocumentClient
// DocumentToken is the client for interacting with the DocumentToken builders.
DocumentToken *DocumentTokenClient
// Group is the client for interacting with the Group builders. // Group is the client for interacting with the Group builders.
Group *GroupClient Group *GroupClient
// Item is the client for interacting with the Item builders. // Item is the client for interacting with the Item builders.
@ -162,6 +166,8 @@ func (tx *Tx) Client() *Client {
func (tx *Tx) init() { func (tx *Tx) init() {
tx.AuthTokens = NewAuthTokensClient(tx.config) tx.AuthTokens = NewAuthTokensClient(tx.config)
tx.Document = NewDocumentClient(tx.config)
tx.DocumentToken = NewDocumentTokenClient(tx.config)
tx.Group = NewGroupClient(tx.config) tx.Group = NewGroupClient(tx.config)
tx.Item = NewItemClient(tx.config) tx.Item = NewItemClient(tx.config)
tx.ItemField = NewItemFieldClient(tx.config) tx.ItemField = NewItemFieldClient(tx.config)

View file

@ -8,9 +8,9 @@ import (
func UserFactory() types.UserCreate { func UserFactory() types.UserCreate {
f := faker.NewFaker() f := faker.NewFaker()
return types.UserCreate{ return types.UserCreate{
Name: f.RandomString(10), Name: f.Str(10),
Email: f.RandomEmail(), Email: f.Email(),
Password: f.RandomString(10), Password: f.Str(10),
IsSuperuser: f.RandomBool(), IsSuperuser: f.Bool(),
} }
} }

View file

@ -0,0 +1,47 @@
package repo
import (
"context"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent"
"github.com/hay-kot/content/backend/ent/document"
"github.com/hay-kot/content/backend/ent/group"
"github.com/hay-kot/content/backend/internal/types"
)
// DocumentRepository is a repository for Document entity
type DocumentRepository struct {
db *ent.Client
}
func (r *DocumentRepository) Create(ctx context.Context, gid uuid.UUID, doc types.DocumentCreate) (*ent.Document, error) {
return r.db.Document.Create().
SetGroupID(gid).
SetTitle(doc.Title).
SetPath(doc.Path).
Save(ctx)
}
func (r *DocumentRepository) GetAll(ctx context.Context, gid uuid.UUID) ([]*ent.Document, error) {
return r.db.Document.Query().
Where(document.HasGroupWith(group.ID(gid))).
All(ctx)
}
func (r *DocumentRepository) Get(ctx context.Context, id uuid.UUID) (*ent.Document, error) {
return r.db.Document.Query().
Where(document.ID(id)).
Only(ctx)
}
func (r *DocumentRepository) Update(ctx context.Context, id uuid.UUID, doc types.DocumentUpdate) (*ent.Document, error) {
return r.db.Document.UpdateOneID(id).
SetTitle(doc.Title).
SetPath(doc.Path).
Save(ctx)
}
func (r *DocumentRepository) Delete(ctx context.Context, id uuid.UUID) error {
return r.db.Document.DeleteOneID(id).Exec(ctx)
}

View file

@ -0,0 +1,202 @@
package repo
import (
"context"
"testing"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent"
"github.com/hay-kot/content/backend/internal/types"
"github.com/stretchr/testify/assert"
)
func TestDocumentRepository_Create(t *testing.T) {
type args struct {
ctx context.Context
gid uuid.UUID
doc types.DocumentCreate
}
tests := []struct {
name string
args args
want *ent.Document
wantErr bool
}{
{
name: "create document",
args: args{
ctx: context.Background(),
gid: tGroup.ID,
doc: types.DocumentCreate{
Title: "test document",
Path: "/test/document",
},
},
want: &ent.Document{
Title: "test document",
Path: "/test/document",
},
wantErr: false,
},
{
name: "create document with empty title",
args: args{
ctx: context.Background(),
gid: tGroup.ID,
doc: types.DocumentCreate{
Title: "",
Path: "/test/document",
},
},
want: nil,
wantErr: true,
},
{
name: "create document with empty path",
args: args{
ctx: context.Background(),
gid: tGroup.ID,
doc: types.DocumentCreate{
Title: "test document",
Path: "",
},
},
want: nil,
wantErr: true,
},
}
ids := make([]uuid.UUID, 0, len(tests))
t.Cleanup(func() {
for _, id := range ids {
err := tRepos.Docs.Delete(context.Background(), id)
assert.NoError(t, err)
}
})
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tRepos.Docs.Create(tt.args.ctx, tt.args.gid, tt.args.doc)
if (err != nil) != tt.wantErr {
t.Errorf("DocumentRepository.Create() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.wantErr {
assert.Error(t, err)
assert.Nil(t, got)
return
}
assert.Equal(t, tt.want.Title, got.Title)
assert.Equal(t, tt.want.Path, got.Path)
ids = append(ids, got.ID)
})
}
}
func useDocs(t *testing.T, num int) []*ent.Document {
t.Helper()
results := make([]*ent.Document, 0, num)
ids := make([]uuid.UUID, 0, num)
for i := 0; i < num; i++ {
doc, err := tRepos.Docs.Create(context.Background(), tGroup.ID, types.DocumentCreate{
Title: fk.Str(10),
Path: fk.Path(),
})
assert.NoError(t, err)
assert.NotNil(t, doc)
results = append(results, doc)
ids = append(ids, doc.ID)
}
t.Cleanup(func() {
for _, id := range ids {
err := tRepos.Docs.Delete(context.Background(), id)
if err != nil {
assert.True(t, ent.IsNotFound(err))
}
}
})
return results
}
func TestDocumentRepository_GetAll(t *testing.T) {
entities := useDocs(t, 10)
for _, entity := range entities {
assert.NotNil(t, entity)
}
all, err := tRepos.Docs.GetAll(context.Background(), tGroup.ID)
assert.NoError(t, err)
assert.Len(t, all, 10)
for _, entity := range all {
assert.NotNil(t, entity)
for _, e := range entities {
if e.ID == entity.ID {
assert.Equal(t, e.Title, entity.Title)
assert.Equal(t, e.Path, entity.Path)
}
}
}
}
func TestDocumentRepository_Get(t *testing.T) {
entities := useDocs(t, 10)
for _, entity := range entities {
got, err := tRepos.Docs.Get(context.Background(), entity.ID)
assert.NoError(t, err)
assert.Equal(t, entity.ID, got.ID)
assert.Equal(t, entity.Title, got.Title)
assert.Equal(t, entity.Path, got.Path)
}
}
func TestDocumentRepository_Update(t *testing.T) {
entities := useDocs(t, 10)
for _, entity := range entities {
got, err := tRepos.Docs.Get(context.Background(), entity.ID)
assert.NoError(t, err)
assert.Equal(t, entity.ID, got.ID)
assert.Equal(t, entity.Title, got.Title)
assert.Equal(t, entity.Path, got.Path)
}
for _, entity := range entities {
updateData := types.DocumentUpdate{
Title: fk.Str(10),
Path: fk.Path(),
}
updated, err := tRepos.Docs.Update(context.Background(), entity.ID, updateData)
assert.NoError(t, err)
assert.Equal(t, entity.ID, updated.ID)
assert.Equal(t, updateData.Title, updated.Title)
assert.Equal(t, updateData.Path, updated.Path)
}
}
func TestDocumentRepository_Delete(t *testing.T) {
entities := useDocs(t, 10)
for _, entity := range entities {
err := tRepos.Docs.Delete(context.Background(), entity.ID)
assert.NoError(t, err)
_, err = tRepos.Docs.Get(context.Background(), entity.ID)
assert.Error(t, err)
}
}

View file

@ -0,0 +1,41 @@
package repo
import (
"context"
"time"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent"
"github.com/hay-kot/content/backend/ent/documenttoken"
"github.com/hay-kot/content/backend/internal/types"
)
// DocumentTokensRepository is a repository for Document entity
type DocumentTokensRepository struct {
db *ent.Client
}
func (r *DocumentTokensRepository) Create(ctx context.Context, data types.DocumentTokenCreate) (*ent.DocumentToken, error) {
result, err := r.db.DocumentToken.Create().
SetDocumentID(data.DocumentID).
SetToken(data.TokenHash).
SetExpiresAt(data.ExpiresAt).
Save(ctx)
if err != nil {
return nil, err
}
return r.db.DocumentToken.Query().
Where(documenttoken.ID(result.ID)).
WithDocument().
Only(ctx)
}
func (r *DocumentTokensRepository) PurgeExpiredTokens(ctx context.Context) (int, error) {
return r.db.DocumentToken.Delete().Where(documenttoken.ExpiresAtLT(time.Now())).Exec(ctx)
}
func (r *DocumentTokensRepository) Delete(ctx context.Context, id uuid.UUID) error {
return r.db.DocumentToken.DeleteOneID(id).Exec(ctx)
}

View file

@ -0,0 +1,149 @@
package repo
import (
"context"
"testing"
"time"
"github.com/google/uuid"
"github.com/hay-kot/content/backend/ent"
"github.com/hay-kot/content/backend/ent/documenttoken"
"github.com/hay-kot/content/backend/internal/types"
"github.com/stretchr/testify/assert"
)
func TestDocumentTokensRepository_Create(t *testing.T) {
entities := useDocs(t, 1)
doc := entities[0]
expires := fk.Time()
type args struct {
ctx context.Context
data types.DocumentTokenCreate
}
tests := []struct {
name string
args args
want *ent.DocumentToken
wantErr bool
}{
{
name: "create document token",
args: args{
ctx: context.Background(),
data: types.DocumentTokenCreate{
DocumentID: doc.ID,
TokenHash: []byte("token"),
ExpiresAt: expires,
},
},
want: &ent.DocumentToken{
Edges: ent.DocumentTokenEdges{
Document: doc,
},
Token: []byte("token"),
ExpiresAt: expires,
},
wantErr: false,
},
{
name: "create document token with empty token",
args: args{
ctx: context.Background(),
data: types.DocumentTokenCreate{
DocumentID: doc.ID,
TokenHash: []byte(""),
ExpiresAt: expires,
},
},
want: nil,
wantErr: true,
},
{
name: "create document token with empty document id",
args: args{
ctx: context.Background(),
data: types.DocumentTokenCreate{
DocumentID: uuid.Nil,
TokenHash: []byte("token"),
ExpiresAt: expires,
},
},
want: nil,
wantErr: true,
},
}
ids := make([]uuid.UUID, 0, len(tests))
t.Cleanup(func() {
for _, id := range ids {
_ = tRepos.DocTokens.Delete(context.Background(), id)
}
})
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tRepos.DocTokens.Create(tt.args.ctx, tt.args.data)
if (err != nil) != tt.wantErr {
t.Errorf("DocumentTokensRepository.Create() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.wantErr {
return
}
assert.Equal(t, tt.want.Token, got.Token)
assert.WithinDuration(t, tt.want.ExpiresAt, got.ExpiresAt, time.Duration(1)*time.Second)
assert.Equal(t, tt.want.Edges.Document.ID, got.Edges.Document.ID)
})
}
}
func useDocTokens(t *testing.T, num int) []*ent.DocumentToken {
entity := useDocs(t, 1)[0]
results := make([]*ent.DocumentToken, 0, num)
ids := make([]uuid.UUID, 0, num)
t.Cleanup(func() {
for _, id := range ids {
_ = tRepos.DocTokens.Delete(context.Background(), id)
}
})
for i := 0; i < num; i++ {
e, err := tRepos.DocTokens.Create(context.Background(), types.DocumentTokenCreate{
DocumentID: entity.ID,
TokenHash: []byte(fk.Str(10)),
ExpiresAt: fk.Time(),
})
assert.NoError(t, err)
results = append(results, e)
ids = append(ids, e.ID)
}
return results
}
func TestDocumentTokensRepository_PurgeExpiredTokens(t *testing.T) {
entities := useDocTokens(t, 2)
// set expired token
tRepos.DocTokens.db.DocumentToken.Update().
Where(documenttoken.ID(entities[0].ID)).
SetExpiresAt(time.Now().Add(-time.Hour)).
ExecX(context.Background())
count, err := tRepos.DocTokens.PurgeExpiredTokens(context.Background())
assert.NoError(t, err)
assert.Equal(t, 1, count)
all, err := tRepos.DocTokens.db.DocumentToken.Query().All(context.Background())
assert.NoError(t, err)
assert.Len(t, all, 1)
assert.Equal(t, entities[1].ID, all[0].ID)
}

View file

@ -12,8 +12,8 @@ import (
func itemFactory() types.ItemCreate { func itemFactory() types.ItemCreate {
return types.ItemCreate{ return types.ItemCreate{
Name: fk.RandomString(10), Name: fk.Str(10),
Description: fk.RandomString(100), Description: fk.Str(100),
} }
} }
@ -141,20 +141,20 @@ func TestItemsRepository_Update(t *testing.T) {
ID: entity.ID, ID: entity.ID,
Name: entity.Name, Name: entity.Name,
LocationID: entity.Edges.Location.ID, LocationID: entity.Edges.Location.ID,
SerialNumber: fk.RandomString(10), SerialNumber: fk.Str(10),
LabelIDs: nil, LabelIDs: nil,
ModelNumber: fk.RandomString(10), ModelNumber: fk.Str(10),
Manufacturer: fk.RandomString(10), Manufacturer: fk.Str(10),
PurchaseTime: time.Now(), PurchaseTime: time.Now(),
PurchaseFrom: fk.RandomString(10), PurchaseFrom: fk.Str(10),
PurchasePrice: 300.99, PurchasePrice: 300.99,
SoldTime: time.Now(), SoldTime: time.Now(),
SoldTo: fk.RandomString(10), SoldTo: fk.Str(10),
SoldPrice: 300.99, SoldPrice: 300.99,
SoldNotes: fk.RandomString(10), SoldNotes: fk.Str(10),
Notes: fk.RandomString(10), Notes: fk.Str(10),
WarrantyExpires: time.Now(), WarrantyExpires: time.Now(),
WarrantyDetails: fk.RandomString(10), WarrantyDetails: fk.Str(10),
LifetimeWarranty: true, LifetimeWarranty: true,
} }

View file

@ -11,8 +11,8 @@ import (
func labelFactory() types.LabelCreate { func labelFactory() types.LabelCreate {
return types.LabelCreate{ return types.LabelCreate{
Name: fk.RandomString(10), Name: fk.Str(10),
Description: fk.RandomString(100), Description: fk.Str(100),
} }
} }
@ -75,8 +75,8 @@ func TestLabelRepository_Update(t *testing.T) {
updateData := types.LabelUpdate{ updateData := types.LabelUpdate{
ID: loc.ID, ID: loc.ID,
Name: fk.RandomString(10), Name: fk.Str(10),
Description: fk.RandomString(100), Description: fk.Str(100),
} }
update, err := tRepos.Labels.Update(context.Background(), updateData) update, err := tRepos.Labels.Update(context.Background(), updateData)

View file

@ -10,8 +10,8 @@ import (
func locationFactory() types.LocationCreate { func locationFactory() types.LocationCreate {
return types.LocationCreate{ return types.LocationCreate{
Name: fk.RandomString(10), Name: fk.Str(10),
Description: fk.RandomString(100), Description: fk.Str(100),
} }
} }
@ -31,14 +31,14 @@ func TestLocationRepository_Get(t *testing.T) {
func TestLocationRepositoryGetAllWithCount(t *testing.T) { func TestLocationRepositoryGetAllWithCount(t *testing.T) {
ctx := context.Background() ctx := context.Background()
result, err := tRepos.Locations.Create(ctx, tGroup.ID, types.LocationCreate{ result, err := tRepos.Locations.Create(ctx, tGroup.ID, types.LocationCreate{
Name: fk.RandomString(10), Name: fk.Str(10),
Description: fk.RandomString(100), Description: fk.Str(100),
}) })
assert.NoError(t, err) assert.NoError(t, err)
_, err = tRepos.Items.Create(ctx, tGroup.ID, types.ItemCreate{ _, err = tRepos.Items.Create(ctx, tGroup.ID, types.ItemCreate{
Name: fk.RandomString(10), Name: fk.Str(10),
Description: fk.RandomString(100), Description: fk.Str(100),
LocationID: result.ID, LocationID: result.ID,
}) })
@ -74,8 +74,8 @@ func TestLocationRepository_Update(t *testing.T) {
updateData := types.LocationUpdate{ updateData := types.LocationUpdate{
ID: loc.ID, ID: loc.ID,
Name: fk.RandomString(10), Name: fk.Str(10),
Description: fk.RandomString(100), Description: fk.Str(100),
} }
update, err := tRepos.Locations.Update(context.Background(), updateData) update, err := tRepos.Locations.Update(context.Background(), updateData)

View file

@ -13,10 +13,10 @@ import (
func userFactory() types.UserCreate { func userFactory() types.UserCreate {
return types.UserCreate{ return types.UserCreate{
Name: fk.RandomString(10), Name: fk.Str(10),
Email: fk.RandomEmail(), Email: fk.Email(),
Password: fk.RandomString(10), Password: fk.Str(10),
IsSuperuser: fk.RandomBool(), IsSuperuser: fk.Bool(),
GroupID: tGroup.ID, GroupID: tGroup.ID,
} }
} }
@ -109,8 +109,8 @@ func TestUserRepo_Update(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
updateData := types.UserUpdate{ updateData := types.UserUpdate{
Name: fk.RandomString(10), Name: fk.Str(10),
Email: fk.RandomEmail(), Email: fk.Email(),
} }
// Update // Update

View file

@ -10,6 +10,8 @@ type AllRepos struct {
Locations *LocationRepository Locations *LocationRepository
Labels *LabelRepository Labels *LabelRepository
Items *ItemsRepository Items *ItemsRepository
Docs *DocumentRepository
DocTokens *DocumentTokensRepository
} }
func EntAllRepos(db *ent.Client) *AllRepos { func EntAllRepos(db *ent.Client) *AllRepos {
@ -20,5 +22,7 @@ func EntAllRepos(db *ent.Client) *AllRepos {
Locations: &LocationRepository{db}, Locations: &LocationRepository{db},
Labels: &LabelRepository{db}, Labels: &LabelRepository{db},
Items: &ItemsRepository{db}, Items: &ItemsRepository{db},
Docs: &DocumentRepository{db},
DocTokens: &DocumentTokensRepository{db},
} }
} }

View file

@ -36,10 +36,10 @@ func bootstrap() {
} }
tUser, err = tRepos.Users.Create(ctx, types.UserCreate{ tUser, err = tRepos.Users.Create(ctx, types.UserCreate{
Name: fk.RandomString(10), Name: fk.Str(10),
Email: fk.RandomEmail(), Email: fk.Email(),
Password: fk.RandomString(10), Password: fk.Str(10),
IsSuperuser: fk.RandomBool(), IsSuperuser: fk.Bool(),
GroupID: tGroup.ID, GroupID: tGroup.ID,
}) })
if err != nil { if err != nil {

View file

@ -0,0 +1,29 @@
package types
import (
"time"
"github.com/google/uuid"
)
type DocumentCreate struct {
Title string `json:"name"`
Path string `json:"path"`
}
type DocumentUpdate struct {
ID uuid.UUID `json:"id"`
Title string `json:"name"`
Path string `json:"path"`
}
type DocumentToken struct {
Raw string `json:"raw"`
ExpiresAt time.Time `json:"expiresAt"`
}
type DocumentTokenCreate struct {
TokenHash []byte `json:"tokenHash"`
DocumentID uuid.UUID `json:"documentId"`
ExpiresAt time.Time `json:"expiresAt"`
}

View file

@ -15,7 +15,11 @@ func NewFaker() *Faker {
return &Faker{} return &Faker{}
} }
func (f *Faker) RandomString(length int) string { func (f *Faker) Time() time.Time {
return time.Now().Add(time.Duration(f.Num(1, 100)) * time.Hour)
}
func (f *Faker) Str(length int) string {
b := make([]rune, length) b := make([]rune, length)
for i := range b { for i := range b {
@ -24,14 +28,18 @@ func (f *Faker) RandomString(length int) string {
return string(b) return string(b)
} }
func (f *Faker) RandomEmail() string { func (f *Faker) Path() string {
return f.RandomString(10) + "@email.com" return "/" + f.Str(10) + "/" + f.Str(10) + "/" + f.Str(10)
} }
func (f *Faker) RandomBool() bool { func (f *Faker) Email() string {
return f.Str(10) + "@email.com"
}
func (f *Faker) Bool() bool {
return rand.Intn(2) == 1 return rand.Intn(2) == 1
} }
func (f *Faker) RandomNumber(min, max int) int { func (f *Faker) Num(min, max int) int {
return rand.Intn(max-min) + min return rand.Intn(max-min) + min
} }

View file

@ -25,7 +25,7 @@ func Test_GetRandomString(t *testing.T) {
faker := NewFaker() faker := NewFaker()
for i := 0; i < Loops; i++ { for i := 0; i < Loops; i++ {
generated[i] = faker.RandomString(10) generated[i] = faker.Str(10)
} }
if !ValidateUnique(generated) { if !ValidateUnique(generated) {
@ -41,7 +41,7 @@ func Test_GetRandomEmail(t *testing.T) {
faker := NewFaker() faker := NewFaker()
for i := 0; i < Loops; i++ { for i := 0; i < Loops; i++ {
generated[i] = faker.RandomEmail() generated[i] = faker.Email()
} }
if !ValidateUnique(generated) { if !ValidateUnique(generated) {
@ -58,7 +58,7 @@ func Test_GetRandomBool(t *testing.T) {
faker := NewFaker() faker := NewFaker()
for i := 0; i < Loops; i++ { for i := 0; i < Loops; i++ {
if faker.RandomBool() { if faker.Bool() {
trues++ trues++
} else { } else {
falses++ falses++
@ -81,7 +81,7 @@ func Test_RandomNumber(t *testing.T) {
last := MIN - 1 last := MIN - 1
for i := 0; i < Loops; i++ { for i := 0; i < Loops; i++ {
n := f.RandomNumber(MIN, MAX) n := f.Num(MIN, MAX)
if n == last { if n == last {
t.Errorf("RandomNumber() failed to generate unique number") t.Errorf("RandomNumber() failed to generate unique number")

View file

@ -49,7 +49,7 @@ func Test_ErrorBuilder_AddError(t *testing.T) {
errorStrings := make([]string, 10) errorStrings := make([]string, 10)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
err := errors.New(f.RandomString(10)) err := errors.New(f.Str(10))
randomError[i] = err randomError[i] = err
errorStrings[i] = err.Error() errorStrings[i] = err.Error()
} }
@ -72,7 +72,7 @@ func Test_ErrorBuilder_Respond(t *testing.T) {
randomError := make([]error, 5) randomError := make([]error, 5)
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
err := errors.New(f.RandomString(5)) err := errors.New(f.Str(5))
randomError[i] = err randomError[i] = err
} }