From 1983a686c6f18947bf2c8dc55cb48176d767e240 Mon Sep 17 00:00:00 2001 From: Hayden <64056131+hay-kot@users.noreply.github.com> Date: Fri, 9 Sep 2022 21:50:19 -0800 Subject: [PATCH] repository for document and document tokens --- backend/ent/client.go | 294 +++- backend/ent/config.go | 16 +- backend/ent/document.go | 193 +++ backend/ent/document/document.go | 89 ++ backend/ent/document/where.go | 525 +++++++ backend/ent/document_create.go | 412 +++++ backend/ent/document_delete.go | 115 ++ backend/ent/document_query.go | 687 ++++++++ backend/ent/document_update.go | 677 ++++++++ backend/ent/documenttoken.go | 190 +++ backend/ent/documenttoken/documenttoken.go | 85 + backend/ent/documenttoken/where.go | 498 ++++++ backend/ent/documenttoken_create.go | 418 +++++ backend/ent/documenttoken_delete.go | 115 ++ backend/ent/documenttoken_query.go | 611 ++++++++ backend/ent/documenttoken_update.go | 582 +++++++ backend/ent/ent.go | 18 +- backend/ent/group.go | 18 +- backend/ent/group/group.go | 9 + backend/ent/group/where.go | 28 + backend/ent/group_create.go | 35 + backend/ent/group_query.go | 77 +- backend/ent/group_update.go | 181 +++ backend/ent/hook/hook.go | 26 + backend/ent/migrate/schema.go | 58 + backend/ent/mutation.go | 1376 ++++++++++++++++- backend/ent/predicate/predicate.go | 6 + backend/ent/runtime.go | 88 ++ backend/ent/schema/auth_tokens.go | 1 - backend/ent/schema/document.go | 46 + backend/ent/schema/document_token.go | 50 + backend/ent/schema/group.go | 32 +- backend/ent/tx.go | 6 + backend/internal/mocks/factories/users.go | 8 +- backend/internal/repo/repo_documents.go | 47 + backend/internal/repo/repo_documents_test.go | 202 +++ .../internal/repo/repo_documents_tokens.go | 41 + .../repo/repo_documents_tokens_test.go | 149 ++ backend/internal/repo/repo_items_test.go | 20 +- backend/internal/repo/repo_labels_test.go | 8 +- backend/internal/repo/repo_locations_test.go | 16 +- backend/internal/repo/repo_users_test.go | 12 +- backend/internal/repo/repos_all.go | 4 + backend/internal/services/main_test.go | 8 +- backend/internal/types/document_types.go | 29 + backend/pkgs/faker/random.go | 18 +- backend/pkgs/faker/randoms_test.go | 8 +- .../server/response_error_builder_test.go | 4 +- 48 files changed, 8032 insertions(+), 104 deletions(-) create mode 100644 backend/ent/document.go create mode 100644 backend/ent/document/document.go create mode 100644 backend/ent/document/where.go create mode 100644 backend/ent/document_create.go create mode 100644 backend/ent/document_delete.go create mode 100644 backend/ent/document_query.go create mode 100644 backend/ent/document_update.go create mode 100644 backend/ent/documenttoken.go create mode 100644 backend/ent/documenttoken/documenttoken.go create mode 100644 backend/ent/documenttoken/where.go create mode 100644 backend/ent/documenttoken_create.go create mode 100644 backend/ent/documenttoken_delete.go create mode 100644 backend/ent/documenttoken_query.go create mode 100644 backend/ent/documenttoken_update.go create mode 100644 backend/ent/schema/document.go create mode 100644 backend/ent/schema/document_token.go create mode 100644 backend/internal/repo/repo_documents.go create mode 100644 backend/internal/repo/repo_documents_test.go create mode 100644 backend/internal/repo/repo_documents_tokens.go create mode 100644 backend/internal/repo/repo_documents_tokens_test.go create mode 100644 backend/internal/types/document_types.go diff --git a/backend/ent/client.go b/backend/ent/client.go index 5c55b71..0af74ba 100644 --- a/backend/ent/client.go +++ b/backend/ent/client.go @@ -12,6 +12,8 @@ import ( "github.com/hay-kot/content/backend/ent/migrate" "github.com/hay-kot/content/backend/ent/authtokens" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/itemfield" @@ -31,6 +33,10 @@ type Client struct { Schema *migrate.Schema // AuthTokens is the client for interacting with the AuthTokens builders. AuthTokens *AuthTokensClient + // Document is the client for interacting with the Document builders. + Document *DocumentClient + // DocumentToken is the client for interacting with the DocumentToken builders. + DocumentToken *DocumentTokenClient // Group is the client for interacting with the Group builders. Group *GroupClient // Item is the client for interacting with the Item builders. @@ -57,6 +63,8 @@ func NewClient(opts ...Option) *Client { func (c *Client) init() { c.Schema = migrate.NewSchema(c.driver) c.AuthTokens = NewAuthTokensClient(c.config) + c.Document = NewDocumentClient(c.config) + c.DocumentToken = NewDocumentTokenClient(c.config) c.Group = NewGroupClient(c.config) c.Item = NewItemClient(c.config) c.ItemField = NewItemFieldClient(c.config) @@ -94,15 +102,17 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { cfg := c.config cfg.driver = tx return &Tx{ - ctx: ctx, - config: cfg, - AuthTokens: NewAuthTokensClient(cfg), - Group: NewGroupClient(cfg), - Item: NewItemClient(cfg), - ItemField: NewItemFieldClient(cfg), - Label: NewLabelClient(cfg), - Location: NewLocationClient(cfg), - User: NewUserClient(cfg), + ctx: ctx, + config: cfg, + AuthTokens: NewAuthTokensClient(cfg), + Document: NewDocumentClient(cfg), + DocumentToken: NewDocumentTokenClient(cfg), + Group: NewGroupClient(cfg), + Item: NewItemClient(cfg), + ItemField: NewItemFieldClient(cfg), + Label: NewLabelClient(cfg), + Location: NewLocationClient(cfg), + User: NewUserClient(cfg), }, nil } @@ -120,15 +130,17 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) cfg := c.config cfg.driver = &txDriver{tx: tx, drv: c.driver} return &Tx{ - ctx: ctx, - config: cfg, - AuthTokens: NewAuthTokensClient(cfg), - Group: NewGroupClient(cfg), - Item: NewItemClient(cfg), - ItemField: NewItemFieldClient(cfg), - Label: NewLabelClient(cfg), - Location: NewLocationClient(cfg), - User: NewUserClient(cfg), + ctx: ctx, + config: cfg, + AuthTokens: NewAuthTokensClient(cfg), + Document: NewDocumentClient(cfg), + DocumentToken: NewDocumentTokenClient(cfg), + Group: NewGroupClient(cfg), + Item: NewItemClient(cfg), + ItemField: NewItemFieldClient(cfg), + Label: NewLabelClient(cfg), + Location: NewLocationClient(cfg), + User: NewUserClient(cfg), }, nil } @@ -158,6 +170,8 @@ func (c *Client) Close() error { // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { c.AuthTokens.Use(hooks...) + c.Document.Use(hooks...) + c.DocumentToken.Use(hooks...) c.Group.Use(hooks...) c.Item.Use(hooks...) c.ItemField.Use(hooks...) @@ -272,6 +286,234 @@ func (c *AuthTokensClient) Hooks() []Hook { return c.hooks.AuthTokens } +// DocumentClient is a client for the Document schema. +type DocumentClient struct { + config +} + +// NewDocumentClient returns a client for the Document from the given config. +func NewDocumentClient(c config) *DocumentClient { + return &DocumentClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `document.Hooks(f(g(h())))`. +func (c *DocumentClient) Use(hooks ...Hook) { + c.hooks.Document = append(c.hooks.Document, hooks...) +} + +// Create returns a builder for creating a Document entity. +func (c *DocumentClient) Create() *DocumentCreate { + mutation := newDocumentMutation(c.config, OpCreate) + return &DocumentCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Document entities. +func (c *DocumentClient) CreateBulk(builders ...*DocumentCreate) *DocumentCreateBulk { + return &DocumentCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Document. +func (c *DocumentClient) Update() *DocumentUpdate { + mutation := newDocumentMutation(c.config, OpUpdate) + return &DocumentUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *DocumentClient) UpdateOne(d *Document) *DocumentUpdateOne { + mutation := newDocumentMutation(c.config, OpUpdateOne, withDocument(d)) + return &DocumentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *DocumentClient) UpdateOneID(id uuid.UUID) *DocumentUpdateOne { + mutation := newDocumentMutation(c.config, OpUpdateOne, withDocumentID(id)) + return &DocumentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Document. +func (c *DocumentClient) Delete() *DocumentDelete { + mutation := newDocumentMutation(c.config, OpDelete) + return &DocumentDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *DocumentClient) DeleteOne(d *Document) *DocumentDeleteOne { + return c.DeleteOneID(d.ID) +} + +// DeleteOne returns a builder for deleting the given entity by its id. +func (c *DocumentClient) DeleteOneID(id uuid.UUID) *DocumentDeleteOne { + builder := c.Delete().Where(document.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &DocumentDeleteOne{builder} +} + +// Query returns a query builder for Document. +func (c *DocumentClient) Query() *DocumentQuery { + return &DocumentQuery{ + config: c.config, + } +} + +// Get returns a Document entity by its id. +func (c *DocumentClient) Get(ctx context.Context, id uuid.UUID) (*Document, error) { + return c.Query().Where(document.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *DocumentClient) GetX(ctx context.Context, id uuid.UUID) *Document { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryGroup queries the group edge of a Document. +func (c *DocumentClient) QueryGroup(d *Document) *GroupQuery { + query := &GroupQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := d.ID + step := sqlgraph.NewStep( + sqlgraph.From(document.Table, document.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, document.GroupTable, document.GroupColumn), + ) + fromV = sqlgraph.Neighbors(d.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryDocumentTokens queries the document_tokens edge of a Document. +func (c *DocumentClient) QueryDocumentTokens(d *Document) *DocumentTokenQuery { + query := &DocumentTokenQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := d.ID + step := sqlgraph.NewStep( + sqlgraph.From(document.Table, document.FieldID, id), + sqlgraph.To(documenttoken.Table, documenttoken.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, document.DocumentTokensTable, document.DocumentTokensColumn), + ) + fromV = sqlgraph.Neighbors(d.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *DocumentClient) Hooks() []Hook { + return c.hooks.Document +} + +// DocumentTokenClient is a client for the DocumentToken schema. +type DocumentTokenClient struct { + config +} + +// NewDocumentTokenClient returns a client for the DocumentToken from the given config. +func NewDocumentTokenClient(c config) *DocumentTokenClient { + return &DocumentTokenClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `documenttoken.Hooks(f(g(h())))`. +func (c *DocumentTokenClient) Use(hooks ...Hook) { + c.hooks.DocumentToken = append(c.hooks.DocumentToken, hooks...) +} + +// Create returns a builder for creating a DocumentToken entity. +func (c *DocumentTokenClient) Create() *DocumentTokenCreate { + mutation := newDocumentTokenMutation(c.config, OpCreate) + return &DocumentTokenCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of DocumentToken entities. +func (c *DocumentTokenClient) CreateBulk(builders ...*DocumentTokenCreate) *DocumentTokenCreateBulk { + return &DocumentTokenCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for DocumentToken. +func (c *DocumentTokenClient) Update() *DocumentTokenUpdate { + mutation := newDocumentTokenMutation(c.config, OpUpdate) + return &DocumentTokenUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *DocumentTokenClient) UpdateOne(dt *DocumentToken) *DocumentTokenUpdateOne { + mutation := newDocumentTokenMutation(c.config, OpUpdateOne, withDocumentToken(dt)) + return &DocumentTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *DocumentTokenClient) UpdateOneID(id uuid.UUID) *DocumentTokenUpdateOne { + mutation := newDocumentTokenMutation(c.config, OpUpdateOne, withDocumentTokenID(id)) + return &DocumentTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for DocumentToken. +func (c *DocumentTokenClient) Delete() *DocumentTokenDelete { + mutation := newDocumentTokenMutation(c.config, OpDelete) + return &DocumentTokenDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *DocumentTokenClient) DeleteOne(dt *DocumentToken) *DocumentTokenDeleteOne { + return c.DeleteOneID(dt.ID) +} + +// DeleteOne returns a builder for deleting the given entity by its id. +func (c *DocumentTokenClient) DeleteOneID(id uuid.UUID) *DocumentTokenDeleteOne { + builder := c.Delete().Where(documenttoken.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &DocumentTokenDeleteOne{builder} +} + +// Query returns a query builder for DocumentToken. +func (c *DocumentTokenClient) Query() *DocumentTokenQuery { + return &DocumentTokenQuery{ + config: c.config, + } +} + +// Get returns a DocumentToken entity by its id. +func (c *DocumentTokenClient) Get(ctx context.Context, id uuid.UUID) (*DocumentToken, error) { + return c.Query().Where(documenttoken.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *DocumentTokenClient) GetX(ctx context.Context, id uuid.UUID) *DocumentToken { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryDocument queries the document edge of a DocumentToken. +func (c *DocumentTokenClient) QueryDocument(dt *DocumentToken) *DocumentQuery { + query := &DocumentQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := dt.ID + step := sqlgraph.NewStep( + sqlgraph.From(documenttoken.Table, documenttoken.FieldID, id), + sqlgraph.To(document.Table, document.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, documenttoken.DocumentTable, documenttoken.DocumentColumn), + ) + fromV = sqlgraph.Neighbors(dt.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *DocumentTokenClient) Hooks() []Hook { + return c.hooks.DocumentToken +} + // GroupClient is a client for the Group schema. type GroupClient struct { config @@ -421,6 +663,22 @@ func (c *GroupClient) QueryLabels(gr *Group) *LabelQuery { return query } +// QueryDocuments queries the documents edge of a Group. +func (c *GroupClient) QueryDocuments(gr *Group) *DocumentQuery { + query := &DocumentQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := gr.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(document.Table, document.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.DocumentsTable, group.DocumentsColumn), + ) + fromV = sqlgraph.Neighbors(gr.driver.Dialect(), step) + return fromV, nil + } + return query +} + // Hooks returns the client hooks. func (c *GroupClient) Hooks() []Hook { return c.hooks.Group diff --git a/backend/ent/config.go b/backend/ent/config.go index b0dbf9f..58bafc9 100644 --- a/backend/ent/config.go +++ b/backend/ent/config.go @@ -24,13 +24,15 @@ type config struct { // hooks per client, for fast access. type hooks struct { - AuthTokens []ent.Hook - Group []ent.Hook - Item []ent.Hook - ItemField []ent.Hook - Label []ent.Hook - Location []ent.Hook - User []ent.Hook + AuthTokens []ent.Hook + Document []ent.Hook + DocumentToken []ent.Hook + Group []ent.Hook + Item []ent.Hook + ItemField []ent.Hook + Label []ent.Hook + Location []ent.Hook + User []ent.Hook } // Options applies the options on the config object. diff --git a/backend/ent/document.go b/backend/ent/document.go new file mode 100644 index 0000000..c29a515 --- /dev/null +++ b/backend/ent/document.go @@ -0,0 +1,193 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/group" +) + +// Document is the model entity for the Document schema. +type Document struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Title holds the value of the "title" field. + Title string `json:"title,omitempty"` + // Path holds the value of the "path" field. + Path string `json:"path,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the DocumentQuery when eager-loading is set. + Edges DocumentEdges `json:"edges"` + group_documents *uuid.UUID +} + +// DocumentEdges holds the relations/edges for other nodes in the graph. +type DocumentEdges struct { + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // DocumentTokens holds the value of the document_tokens edge. + DocumentTokens []*DocumentToken `json:"document_tokens,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e DocumentEdges) GroupOrErr() (*Group, error) { + if e.loadedTypes[0] { + if e.Group == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: group.Label} + } + return e.Group, nil + } + return nil, &NotLoadedError{edge: "group"} +} + +// DocumentTokensOrErr returns the DocumentTokens value or an error if the edge +// was not loaded in eager-loading. +func (e DocumentEdges) DocumentTokensOrErr() ([]*DocumentToken, error) { + if e.loadedTypes[1] { + return e.DocumentTokens, nil + } + return nil, &NotLoadedError{edge: "document_tokens"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Document) scanValues(columns []string) ([]interface{}, error) { + values := make([]interface{}, len(columns)) + for i := range columns { + switch columns[i] { + case document.FieldTitle, document.FieldPath: + values[i] = new(sql.NullString) + case document.FieldCreatedAt, document.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case document.FieldID: + values[i] = new(uuid.UUID) + case document.ForeignKeys[0]: // group_documents + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + default: + return nil, fmt.Errorf("unexpected column %q for type Document", columns[i]) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Document fields. +func (d *Document) assignValues(columns []string, values []interface{}) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case document.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + d.ID = *value + } + case document.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + d.CreatedAt = value.Time + } + case document.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + d.UpdatedAt = value.Time + } + case document.FieldTitle: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field title", values[i]) + } else if value.Valid { + d.Title = value.String + } + case document.FieldPath: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field path", values[i]) + } else if value.Valid { + d.Path = value.String + } + case document.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field group_documents", values[i]) + } else if value.Valid { + d.group_documents = new(uuid.UUID) + *d.group_documents = *value.S.(*uuid.UUID) + } + } + } + return nil +} + +// QueryGroup queries the "group" edge of the Document entity. +func (d *Document) QueryGroup() *GroupQuery { + return (&DocumentClient{config: d.config}).QueryGroup(d) +} + +// QueryDocumentTokens queries the "document_tokens" edge of the Document entity. +func (d *Document) QueryDocumentTokens() *DocumentTokenQuery { + return (&DocumentClient{config: d.config}).QueryDocumentTokens(d) +} + +// Update returns a builder for updating this Document. +// Note that you need to call Document.Unwrap() before calling this method if this Document +// was returned from a transaction, and the transaction was committed or rolled back. +func (d *Document) Update() *DocumentUpdateOne { + return (&DocumentClient{config: d.config}).UpdateOne(d) +} + +// Unwrap unwraps the Document entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (d *Document) Unwrap() *Document { + _tx, ok := d.config.driver.(*txDriver) + if !ok { + panic("ent: Document is not a transactional entity") + } + d.config.driver = _tx.drv + return d +} + +// String implements the fmt.Stringer. +func (d *Document) String() string { + var builder strings.Builder + builder.WriteString("Document(") + builder.WriteString(fmt.Sprintf("id=%v, ", d.ID)) + builder.WriteString("created_at=") + builder.WriteString(d.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(d.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("title=") + builder.WriteString(d.Title) + builder.WriteString(", ") + builder.WriteString("path=") + builder.WriteString(d.Path) + builder.WriteByte(')') + return builder.String() +} + +// Documents is a parsable slice of Document. +type Documents []*Document + +func (d Documents) config(cfg config) { + for _i := range d { + d[_i].config = cfg + } +} diff --git a/backend/ent/document/document.go b/backend/ent/document/document.go new file mode 100644 index 0000000..b019a6f --- /dev/null +++ b/backend/ent/document/document.go @@ -0,0 +1,89 @@ +// Code generated by ent, DO NOT EDIT. + +package document + +import ( + "time" + + "github.com/google/uuid" +) + +const ( + // Label holds the string label denoting the document type in the database. + Label = "document" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldTitle holds the string denoting the title field in the database. + FieldTitle = "title" + // FieldPath holds the string denoting the path field in the database. + FieldPath = "path" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // EdgeDocumentTokens holds the string denoting the document_tokens edge name in mutations. + EdgeDocumentTokens = "document_tokens" + // Table holds the table name of the document in the database. + Table = "documents" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "documents" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_documents" + // DocumentTokensTable is the table that holds the document_tokens relation/edge. + DocumentTokensTable = "document_tokens" + // DocumentTokensInverseTable is the table name for the DocumentToken entity. + // It exists in this package in order to avoid circular dependency with the "documenttoken" package. + DocumentTokensInverseTable = "document_tokens" + // DocumentTokensColumn is the table column denoting the document_tokens relation/edge. + DocumentTokensColumn = "document_document_tokens" +) + +// Columns holds all SQL columns for document fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldTitle, + FieldPath, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "documents" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "group_documents", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // TitleValidator is a validator for the "title" field. It is called by the builders before save. + TitleValidator func(string) error + // PathValidator is a validator for the "path" field. It is called by the builders before save. + PathValidator func(string) error + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) diff --git a/backend/ent/document/where.go b/backend/ent/document/where.go new file mode 100644 index 0000000..e01f4bb --- /dev/null +++ b/backend/ent/document/where.go @@ -0,0 +1,525 @@ +// Code generated by ent, DO NOT EDIT. + +package document + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Title applies equality check predicate on the "title" field. It's identical to TitleEQ. +func Title(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldTitle), v)) + }) +} + +// Path applies equality check predicate on the "path" field. It's identical to PathEQ. +func Path(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldPath), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// TitleEQ applies the EQ predicate on the "title" field. +func TitleEQ(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldTitle), v)) + }) +} + +// TitleNEQ applies the NEQ predicate on the "title" field. +func TitleNEQ(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldTitle), v)) + }) +} + +// TitleIn applies the In predicate on the "title" field. +func TitleIn(vs ...string) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldTitle), v...)) + }) +} + +// TitleNotIn applies the NotIn predicate on the "title" field. +func TitleNotIn(vs ...string) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldTitle), v...)) + }) +} + +// TitleGT applies the GT predicate on the "title" field. +func TitleGT(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldTitle), v)) + }) +} + +// TitleGTE applies the GTE predicate on the "title" field. +func TitleGTE(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldTitle), v)) + }) +} + +// TitleLT applies the LT predicate on the "title" field. +func TitleLT(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldTitle), v)) + }) +} + +// TitleLTE applies the LTE predicate on the "title" field. +func TitleLTE(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldTitle), v)) + }) +} + +// TitleContains applies the Contains predicate on the "title" field. +func TitleContains(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldTitle), v)) + }) +} + +// TitleHasPrefix applies the HasPrefix predicate on the "title" field. +func TitleHasPrefix(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldTitle), v)) + }) +} + +// TitleHasSuffix applies the HasSuffix predicate on the "title" field. +func TitleHasSuffix(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldTitle), v)) + }) +} + +// TitleEqualFold applies the EqualFold predicate on the "title" field. +func TitleEqualFold(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldTitle), v)) + }) +} + +// TitleContainsFold applies the ContainsFold predicate on the "title" field. +func TitleContainsFold(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldTitle), v)) + }) +} + +// PathEQ applies the EQ predicate on the "path" field. +func PathEQ(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldPath), v)) + }) +} + +// PathNEQ applies the NEQ predicate on the "path" field. +func PathNEQ(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldPath), v)) + }) +} + +// PathIn applies the In predicate on the "path" field. +func PathIn(vs ...string) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldPath), v...)) + }) +} + +// PathNotIn applies the NotIn predicate on the "path" field. +func PathNotIn(vs ...string) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldPath), v...)) + }) +} + +// PathGT applies the GT predicate on the "path" field. +func PathGT(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldPath), v)) + }) +} + +// PathGTE applies the GTE predicate on the "path" field. +func PathGTE(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldPath), v)) + }) +} + +// PathLT applies the LT predicate on the "path" field. +func PathLT(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldPath), v)) + }) +} + +// PathLTE applies the LTE predicate on the "path" field. +func PathLTE(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldPath), v)) + }) +} + +// PathContains applies the Contains predicate on the "path" field. +func PathContains(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldPath), v)) + }) +} + +// PathHasPrefix applies the HasPrefix predicate on the "path" field. +func PathHasPrefix(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldPath), v)) + }) +} + +// PathHasSuffix applies the HasSuffix predicate on the "path" field. +func PathHasSuffix(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldPath), v)) + }) +} + +// PathEqualFold applies the EqualFold predicate on the "path" field. +func PathEqualFold(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldPath), v)) + }) +} + +// PathContainsFold applies the ContainsFold predicate on the "path" field. +func PathContainsFold(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldPath), v)) + }) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.Document { + return predicate.Document(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasDocumentTokens applies the HasEdge predicate on the "document_tokens" edge. +func HasDocumentTokens() predicate.Document { + return predicate.Document(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentTokensTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DocumentTokensTable, DocumentTokensColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDocumentTokensWith applies the HasEdge predicate on the "document_tokens" edge with a given conditions (other predicates). +func HasDocumentTokensWith(preds ...predicate.DocumentToken) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentTokensInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DocumentTokensTable, DocumentTokensColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Document) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Document) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Document) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/backend/ent/document_create.go b/backend/ent/document_create.go new file mode 100644 index 0000000..6ffce74 --- /dev/null +++ b/backend/ent/document_create.go @@ -0,0 +1,412 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/ent/group" +) + +// DocumentCreate is the builder for creating a Document entity. +type DocumentCreate struct { + config + mutation *DocumentMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (dc *DocumentCreate) SetCreatedAt(t time.Time) *DocumentCreate { + dc.mutation.SetCreatedAt(t) + return dc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (dc *DocumentCreate) SetNillableCreatedAt(t *time.Time) *DocumentCreate { + if t != nil { + dc.SetCreatedAt(*t) + } + return dc +} + +// SetUpdatedAt sets the "updated_at" field. +func (dc *DocumentCreate) SetUpdatedAt(t time.Time) *DocumentCreate { + dc.mutation.SetUpdatedAt(t) + return dc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (dc *DocumentCreate) SetNillableUpdatedAt(t *time.Time) *DocumentCreate { + if t != nil { + dc.SetUpdatedAt(*t) + } + return dc +} + +// SetTitle sets the "title" field. +func (dc *DocumentCreate) SetTitle(s string) *DocumentCreate { + dc.mutation.SetTitle(s) + return dc +} + +// SetPath sets the "path" field. +func (dc *DocumentCreate) SetPath(s string) *DocumentCreate { + dc.mutation.SetPath(s) + return dc +} + +// SetID sets the "id" field. +func (dc *DocumentCreate) SetID(u uuid.UUID) *DocumentCreate { + dc.mutation.SetID(u) + return dc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (dc *DocumentCreate) SetNillableID(u *uuid.UUID) *DocumentCreate { + if u != nil { + dc.SetID(*u) + } + return dc +} + +// SetGroupID sets the "group" edge to the Group entity by ID. +func (dc *DocumentCreate) SetGroupID(id uuid.UUID) *DocumentCreate { + dc.mutation.SetGroupID(id) + return dc +} + +// SetGroup sets the "group" edge to the Group entity. +func (dc *DocumentCreate) SetGroup(g *Group) *DocumentCreate { + return dc.SetGroupID(g.ID) +} + +// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by IDs. +func (dc *DocumentCreate) AddDocumentTokenIDs(ids ...uuid.UUID) *DocumentCreate { + dc.mutation.AddDocumentTokenIDs(ids...) + return dc +} + +// AddDocumentTokens adds the "document_tokens" edges to the DocumentToken entity. +func (dc *DocumentCreate) AddDocumentTokens(d ...*DocumentToken) *DocumentCreate { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return dc.AddDocumentTokenIDs(ids...) +} + +// Mutation returns the DocumentMutation object of the builder. +func (dc *DocumentCreate) Mutation() *DocumentMutation { + return dc.mutation +} + +// Save creates the Document in the database. +func (dc *DocumentCreate) Save(ctx context.Context) (*Document, error) { + var ( + err error + node *Document + ) + dc.defaults() + if len(dc.hooks) == 0 { + if err = dc.check(); err != nil { + return nil, err + } + node, err = dc.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = dc.check(); err != nil { + return nil, err + } + dc.mutation = mutation + if node, err = dc.sqlSave(ctx); err != nil { + return nil, err + } + mutation.id = &node.ID + mutation.done = true + return node, err + }) + for i := len(dc.hooks) - 1; i >= 0; i-- { + if dc.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dc.hooks[i](mut) + } + v, err := mut.Mutate(ctx, dc.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Document) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from DocumentMutation", v) + } + node = nv + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (dc *DocumentCreate) SaveX(ctx context.Context) *Document { + v, err := dc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dc *DocumentCreate) Exec(ctx context.Context) error { + _, err := dc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dc *DocumentCreate) ExecX(ctx context.Context) { + if err := dc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dc *DocumentCreate) defaults() { + if _, ok := dc.mutation.CreatedAt(); !ok { + v := document.DefaultCreatedAt() + dc.mutation.SetCreatedAt(v) + } + if _, ok := dc.mutation.UpdatedAt(); !ok { + v := document.DefaultUpdatedAt() + dc.mutation.SetUpdatedAt(v) + } + if _, ok := dc.mutation.ID(); !ok { + v := document.DefaultID() + dc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dc *DocumentCreate) check() error { + if _, ok := dc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Document.created_at"`)} + } + if _, ok := dc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Document.updated_at"`)} + } + if _, ok := dc.mutation.Title(); !ok { + return &ValidationError{Name: "title", err: errors.New(`ent: missing required field "Document.title"`)} + } + if v, ok := dc.mutation.Title(); ok { + if err := document.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Document.title": %w`, err)} + } + } + if _, ok := dc.mutation.Path(); !ok { + return &ValidationError{Name: "path", err: errors.New(`ent: missing required field "Document.path"`)} + } + if v, ok := dc.mutation.Path(); ok { + if err := document.PathValidator(v); err != nil { + return &ValidationError{Name: "path", err: fmt.Errorf(`ent: validator failed for field "Document.path": %w`, err)} + } + } + if _, ok := dc.mutation.GroupID(); !ok { + return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "Document.group"`)} + } + return nil +} + +func (dc *DocumentCreate) sqlSave(ctx context.Context) (*Document, error) { + _node, _spec := dc.createSpec() + if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + return _node, nil +} + +func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) { + var ( + _node = &Document{config: dc.config} + _spec = &sqlgraph.CreateSpec{ + Table: document.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + } + ) + if id, ok := dc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := dc.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: document.FieldCreatedAt, + }) + _node.CreatedAt = value + } + if value, ok := dc.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: document.FieldUpdatedAt, + }) + _node.UpdatedAt = value + } + if value, ok := dc.mutation.Title(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: document.FieldTitle, + }) + _node.Title = value + } + if value, ok := dc.mutation.Path(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: document.FieldPath, + }) + _node.Path = value + } + if nodes := dc.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: document.GroupTable, + Columns: []string{document.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: group.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.group_documents = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := dc.mutation.DocumentTokensIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// DocumentCreateBulk is the builder for creating many Document entities in bulk. +type DocumentCreateBulk struct { + config + builders []*DocumentCreate +} + +// Save creates the Document entities in the database. +func (dcb *DocumentCreateBulk) Save(ctx context.Context) ([]*Document, error) { + specs := make([]*sqlgraph.CreateSpec, len(dcb.builders)) + nodes := make([]*Document, len(dcb.builders)) + mutators := make([]Mutator, len(dcb.builders)) + for i := range dcb.builders { + func(i int, root context.Context) { + builder := dcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, dcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, dcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (dcb *DocumentCreateBulk) SaveX(ctx context.Context) []*Document { + v, err := dcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dcb *DocumentCreateBulk) Exec(ctx context.Context) error { + _, err := dcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcb *DocumentCreateBulk) ExecX(ctx context.Context) { + if err := dcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/document_delete.go b/backend/ent/document_delete.go new file mode 100644 index 0000000..2b5f19a --- /dev/null +++ b/backend/ent/document_delete.go @@ -0,0 +1,115 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// DocumentDelete is the builder for deleting a Document entity. +type DocumentDelete struct { + config + hooks []Hook + mutation *DocumentMutation +} + +// Where appends a list predicates to the DocumentDelete builder. +func (dd *DocumentDelete) Where(ps ...predicate.Document) *DocumentDelete { + dd.mutation.Where(ps...) + return dd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (dd *DocumentDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(dd.hooks) == 0 { + affected, err = dd.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + dd.mutation = mutation + affected, err = dd.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(dd.hooks) - 1; i >= 0; i-- { + if dd.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dd.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, dd.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dd *DocumentDelete) ExecX(ctx context.Context) int { + n, err := dd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (dd *DocumentDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: document.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + if ps := dd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, dd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return affected, err +} + +// DocumentDeleteOne is the builder for deleting a single Document entity. +type DocumentDeleteOne struct { + dd *DocumentDelete +} + +// Exec executes the deletion query. +func (ddo *DocumentDeleteOne) Exec(ctx context.Context) error { + n, err := ddo.dd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{document.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ddo *DocumentDeleteOne) ExecX(ctx context.Context) { + ddo.dd.ExecX(ctx) +} diff --git a/backend/ent/document_query.go b/backend/ent/document_query.go new file mode 100644 index 0000000..e8d9418 --- /dev/null +++ b/backend/ent/document_query.go @@ -0,0 +1,687 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/ent/group" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// DocumentQuery is the builder for querying Document entities. +type DocumentQuery struct { + config + limit *int + offset *int + unique *bool + order []OrderFunc + fields []string + predicates []predicate.Document + withGroup *GroupQuery + withDocumentTokens *DocumentTokenQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the DocumentQuery builder. +func (dq *DocumentQuery) Where(ps ...predicate.Document) *DocumentQuery { + dq.predicates = append(dq.predicates, ps...) + return dq +} + +// Limit adds a limit step to the query. +func (dq *DocumentQuery) Limit(limit int) *DocumentQuery { + dq.limit = &limit + return dq +} + +// Offset adds an offset step to the query. +func (dq *DocumentQuery) Offset(offset int) *DocumentQuery { + dq.offset = &offset + return dq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (dq *DocumentQuery) Unique(unique bool) *DocumentQuery { + dq.unique = &unique + return dq +} + +// Order adds an order step to the query. +func (dq *DocumentQuery) Order(o ...OrderFunc) *DocumentQuery { + dq.order = append(dq.order, o...) + return dq +} + +// QueryGroup chains the current query on the "group" edge. +func (dq *DocumentQuery) QueryGroup() *GroupQuery { + query := &GroupQuery{config: dq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(document.Table, document.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, document.GroupTable, document.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(dq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryDocumentTokens chains the current query on the "document_tokens" edge. +func (dq *DocumentQuery) QueryDocumentTokens() *DocumentTokenQuery { + query := &DocumentTokenQuery{config: dq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(document.Table, document.FieldID, selector), + sqlgraph.To(documenttoken.Table, documenttoken.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, document.DocumentTokensTable, document.DocumentTokensColumn), + ) + fromU = sqlgraph.SetNeighbors(dq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Document entity from the query. +// Returns a *NotFoundError when no Document was found. +func (dq *DocumentQuery) First(ctx context.Context) (*Document, error) { + nodes, err := dq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{document.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (dq *DocumentQuery) FirstX(ctx context.Context) *Document { + node, err := dq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Document ID from the query. +// Returns a *NotFoundError when no Document ID was found. +func (dq *DocumentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = dq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{document.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (dq *DocumentQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := dq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Document entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Document entity is found. +// Returns a *NotFoundError when no Document entities are found. +func (dq *DocumentQuery) Only(ctx context.Context) (*Document, error) { + nodes, err := dq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{document.Label} + default: + return nil, &NotSingularError{document.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (dq *DocumentQuery) OnlyX(ctx context.Context) *Document { + node, err := dq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Document ID in the query. +// Returns a *NotSingularError when more than one Document ID is found. +// Returns a *NotFoundError when no entities are found. +func (dq *DocumentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = dq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{document.Label} + default: + err = &NotSingularError{document.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (dq *DocumentQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := dq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Documents. +func (dq *DocumentQuery) All(ctx context.Context) ([]*Document, error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + return dq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (dq *DocumentQuery) AllX(ctx context.Context) []*Document { + nodes, err := dq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Document IDs. +func (dq *DocumentQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { + var ids []uuid.UUID + if err := dq.Select(document.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (dq *DocumentQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := dq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (dq *DocumentQuery) Count(ctx context.Context) (int, error) { + if err := dq.prepareQuery(ctx); err != nil { + return 0, err + } + return dq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (dq *DocumentQuery) CountX(ctx context.Context) int { + count, err := dq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (dq *DocumentQuery) Exist(ctx context.Context) (bool, error) { + if err := dq.prepareQuery(ctx); err != nil { + return false, err + } + return dq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (dq *DocumentQuery) ExistX(ctx context.Context) bool { + exist, err := dq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the DocumentQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (dq *DocumentQuery) Clone() *DocumentQuery { + if dq == nil { + return nil + } + return &DocumentQuery{ + config: dq.config, + limit: dq.limit, + offset: dq.offset, + order: append([]OrderFunc{}, dq.order...), + predicates: append([]predicate.Document{}, dq.predicates...), + withGroup: dq.withGroup.Clone(), + withDocumentTokens: dq.withDocumentTokens.Clone(), + // clone intermediate query. + sql: dq.sql.Clone(), + path: dq.path, + unique: dq.unique, + } +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (dq *DocumentQuery) WithGroup(opts ...func(*GroupQuery)) *DocumentQuery { + query := &GroupQuery{config: dq.config} + for _, opt := range opts { + opt(query) + } + dq.withGroup = query + return dq +} + +// WithDocumentTokens tells the query-builder to eager-load the nodes that are connected to +// the "document_tokens" edge. The optional arguments are used to configure the query builder of the edge. +func (dq *DocumentQuery) WithDocumentTokens(opts ...func(*DocumentTokenQuery)) *DocumentQuery { + query := &DocumentTokenQuery{config: dq.config} + for _, opt := range opts { + opt(query) + } + dq.withDocumentTokens = query + return dq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Document.Query(). +// GroupBy(document.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (dq *DocumentQuery) GroupBy(field string, fields ...string) *DocumentGroupBy { + grbuild := &DocumentGroupBy{config: dq.config} + grbuild.fields = append([]string{field}, fields...) + grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + return dq.sqlQuery(ctx), nil + } + grbuild.label = document.Label + grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Document.Query(). +// Select(document.FieldCreatedAt). +// Scan(ctx, &v) +func (dq *DocumentQuery) Select(fields ...string) *DocumentSelect { + dq.fields = append(dq.fields, fields...) + selbuild := &DocumentSelect{DocumentQuery: dq} + selbuild.label = document.Label + selbuild.flds, selbuild.scan = &dq.fields, selbuild.Scan + return selbuild +} + +func (dq *DocumentQuery) prepareQuery(ctx context.Context) error { + for _, f := range dq.fields { + if !document.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if dq.path != nil { + prev, err := dq.path(ctx) + if err != nil { + return err + } + dq.sql = prev + } + return nil +} + +func (dq *DocumentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Document, error) { + var ( + nodes = []*Document{} + withFKs = dq.withFKs + _spec = dq.querySpec() + loadedTypes = [2]bool{ + dq.withGroup != nil, + dq.withDocumentTokens != nil, + } + ) + if dq.withGroup != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, document.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]interface{}, error) { + return (*Document).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []interface{}) error { + node := &Document{config: dq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, dq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := dq.withGroup; query != nil { + if err := dq.loadGroup(ctx, query, nodes, nil, + func(n *Document, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + if query := dq.withDocumentTokens; query != nil { + if err := dq.loadDocumentTokens(ctx, query, nodes, + func(n *Document) { n.Edges.DocumentTokens = []*DocumentToken{} }, + func(n *Document, e *DocumentToken) { n.Edges.DocumentTokens = append(n.Edges.DocumentTokens, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (dq *DocumentQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Document, init func(*Document), assign func(*Document, *Group)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Document) + for i := range nodes { + if nodes[i].group_documents == nil { + continue + } + fk := *nodes[i].group_documents + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_documents" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (dq *DocumentQuery) loadDocumentTokens(ctx context.Context, query *DocumentTokenQuery, nodes []*Document, init func(*Document), assign func(*Document, *DocumentToken)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Document) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.InValues(document.DocumentTokensColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.document_document_tokens + if fk == nil { + return fmt.Errorf(`foreign-key "document_document_tokens" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected foreign-key "document_document_tokens" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (dq *DocumentQuery) sqlCount(ctx context.Context) (int, error) { + _spec := dq.querySpec() + _spec.Node.Columns = dq.fields + if len(dq.fields) > 0 { + _spec.Unique = dq.unique != nil && *dq.unique + } + return sqlgraph.CountNodes(ctx, dq.driver, _spec) +} + +func (dq *DocumentQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := dq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %w", err) + } + return n > 0, nil +} + +func (dq *DocumentQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: document.Table, + Columns: document.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + From: dq.sql, + Unique: true, + } + if unique := dq.unique; unique != nil { + _spec.Unique = *unique + } + if fields := dq.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, document.FieldID) + for i := range fields { + if fields[i] != document.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := dq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := dq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := dq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := dq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (dq *DocumentQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(dq.driver.Dialect()) + t1 := builder.Table(document.Table) + columns := dq.fields + if len(columns) == 0 { + columns = document.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if dq.sql != nil { + selector = dq.sql + selector.Select(selector.Columns(columns...)...) + } + if dq.unique != nil && *dq.unique { + selector.Distinct() + } + for _, p := range dq.predicates { + p(selector) + } + for _, p := range dq.order { + p(selector) + } + if offset := dq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := dq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// DocumentGroupBy is the group-by builder for Document entities. +type DocumentGroupBy struct { + config + selector + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (dgb *DocumentGroupBy) Aggregate(fns ...AggregateFunc) *DocumentGroupBy { + dgb.fns = append(dgb.fns, fns...) + return dgb +} + +// Scan applies the group-by query and scans the result into the given value. +func (dgb *DocumentGroupBy) Scan(ctx context.Context, v interface{}) error { + query, err := dgb.path(ctx) + if err != nil { + return err + } + dgb.sql = query + return dgb.sqlScan(ctx, v) +} + +func (dgb *DocumentGroupBy) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range dgb.fields { + if !document.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := dgb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (dgb *DocumentGroupBy) sqlQuery() *sql.Selector { + selector := dgb.sql.Select() + aggregation := make([]string, 0, len(dgb.fns)) + for _, fn := range dgb.fns { + aggregation = append(aggregation, fn(selector)) + } + // If no columns were selected in a custom aggregation function, the default + // selection is the fields used for "group-by", and the aggregation functions. + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(dgb.fields)+len(dgb.fns)) + for _, f := range dgb.fields { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + return selector.GroupBy(selector.Columns(dgb.fields...)...) +} + +// DocumentSelect is the builder for selecting fields of Document entities. +type DocumentSelect struct { + *DocumentQuery + selector + // intermediate query (i.e. traversal path). + sql *sql.Selector +} + +// Scan applies the selector query and scans the result into the given value. +func (ds *DocumentSelect) Scan(ctx context.Context, v interface{}) error { + if err := ds.prepareQuery(ctx); err != nil { + return err + } + ds.sql = ds.DocumentQuery.sqlQuery(ctx) + return ds.sqlScan(ctx, v) +} + +func (ds *DocumentSelect) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := ds.sql.Query() + if err := ds.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/document_update.go b/backend/ent/document_update.go new file mode 100644 index 0000000..dfd276d --- /dev/null +++ b/backend/ent/document_update.go @@ -0,0 +1,677 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/ent/group" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// DocumentUpdate is the builder for updating Document entities. +type DocumentUpdate struct { + config + hooks []Hook + mutation *DocumentMutation +} + +// Where appends a list predicates to the DocumentUpdate builder. +func (du *DocumentUpdate) Where(ps ...predicate.Document) *DocumentUpdate { + du.mutation.Where(ps...) + return du +} + +// SetUpdatedAt sets the "updated_at" field. +func (du *DocumentUpdate) SetUpdatedAt(t time.Time) *DocumentUpdate { + du.mutation.SetUpdatedAt(t) + return du +} + +// SetTitle sets the "title" field. +func (du *DocumentUpdate) SetTitle(s string) *DocumentUpdate { + du.mutation.SetTitle(s) + return du +} + +// SetPath sets the "path" field. +func (du *DocumentUpdate) SetPath(s string) *DocumentUpdate { + du.mutation.SetPath(s) + return du +} + +// SetGroupID sets the "group" edge to the Group entity by ID. +func (du *DocumentUpdate) SetGroupID(id uuid.UUID) *DocumentUpdate { + du.mutation.SetGroupID(id) + return du +} + +// SetGroup sets the "group" edge to the Group entity. +func (du *DocumentUpdate) SetGroup(g *Group) *DocumentUpdate { + return du.SetGroupID(g.ID) +} + +// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by IDs. +func (du *DocumentUpdate) AddDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdate { + du.mutation.AddDocumentTokenIDs(ids...) + return du +} + +// AddDocumentTokens adds the "document_tokens" edges to the DocumentToken entity. +func (du *DocumentUpdate) AddDocumentTokens(d ...*DocumentToken) *DocumentUpdate { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return du.AddDocumentTokenIDs(ids...) +} + +// Mutation returns the DocumentMutation object of the builder. +func (du *DocumentUpdate) Mutation() *DocumentMutation { + return du.mutation +} + +// ClearGroup clears the "group" edge to the Group entity. +func (du *DocumentUpdate) ClearGroup() *DocumentUpdate { + du.mutation.ClearGroup() + return du +} + +// ClearDocumentTokens clears all "document_tokens" edges to the DocumentToken entity. +func (du *DocumentUpdate) ClearDocumentTokens() *DocumentUpdate { + du.mutation.ClearDocumentTokens() + return du +} + +// RemoveDocumentTokenIDs removes the "document_tokens" edge to DocumentToken entities by IDs. +func (du *DocumentUpdate) RemoveDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdate { + du.mutation.RemoveDocumentTokenIDs(ids...) + return du +} + +// RemoveDocumentTokens removes "document_tokens" edges to DocumentToken entities. +func (du *DocumentUpdate) RemoveDocumentTokens(d ...*DocumentToken) *DocumentUpdate { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return du.RemoveDocumentTokenIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (du *DocumentUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + du.defaults() + if len(du.hooks) == 0 { + if err = du.check(); err != nil { + return 0, err + } + affected, err = du.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = du.check(); err != nil { + return 0, err + } + du.mutation = mutation + affected, err = du.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(du.hooks) - 1; i >= 0; i-- { + if du.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = du.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, du.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (du *DocumentUpdate) SaveX(ctx context.Context) int { + affected, err := du.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (du *DocumentUpdate) Exec(ctx context.Context) error { + _, err := du.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (du *DocumentUpdate) ExecX(ctx context.Context) { + if err := du.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (du *DocumentUpdate) defaults() { + if _, ok := du.mutation.UpdatedAt(); !ok { + v := document.UpdateDefaultUpdatedAt() + du.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (du *DocumentUpdate) check() error { + if v, ok := du.mutation.Title(); ok { + if err := document.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Document.title": %w`, err)} + } + } + if v, ok := du.mutation.Path(); ok { + if err := document.PathValidator(v); err != nil { + return &ValidationError{Name: "path", err: fmt.Errorf(`ent: validator failed for field "Document.path": %w`, err)} + } + } + if _, ok := du.mutation.GroupID(); du.mutation.GroupCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Document.group"`) + } + return nil +} + +func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: document.Table, + Columns: document.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + if ps := du.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := du.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: document.FieldUpdatedAt, + }) + } + if value, ok := du.mutation.Title(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: document.FieldTitle, + }) + } + if value, ok := du.mutation.Path(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: document.FieldPath, + }) + } + if du.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: document.GroupTable, + Columns: []string{document.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: group.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: document.GroupTable, + Columns: []string{document.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: group.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if du.mutation.DocumentTokensCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.RemovedDocumentTokensIDs(); len(nodes) > 0 && !du.mutation.DocumentTokensCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.DocumentTokensIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, du.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{document.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + return n, nil +} + +// DocumentUpdateOne is the builder for updating a single Document entity. +type DocumentUpdateOne struct { + config + fields []string + hooks []Hook + mutation *DocumentMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (duo *DocumentUpdateOne) SetUpdatedAt(t time.Time) *DocumentUpdateOne { + duo.mutation.SetUpdatedAt(t) + return duo +} + +// SetTitle sets the "title" field. +func (duo *DocumentUpdateOne) SetTitle(s string) *DocumentUpdateOne { + duo.mutation.SetTitle(s) + return duo +} + +// SetPath sets the "path" field. +func (duo *DocumentUpdateOne) SetPath(s string) *DocumentUpdateOne { + duo.mutation.SetPath(s) + return duo +} + +// SetGroupID sets the "group" edge to the Group entity by ID. +func (duo *DocumentUpdateOne) SetGroupID(id uuid.UUID) *DocumentUpdateOne { + duo.mutation.SetGroupID(id) + return duo +} + +// SetGroup sets the "group" edge to the Group entity. +func (duo *DocumentUpdateOne) SetGroup(g *Group) *DocumentUpdateOne { + return duo.SetGroupID(g.ID) +} + +// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by IDs. +func (duo *DocumentUpdateOne) AddDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdateOne { + duo.mutation.AddDocumentTokenIDs(ids...) + return duo +} + +// AddDocumentTokens adds the "document_tokens" edges to the DocumentToken entity. +func (duo *DocumentUpdateOne) AddDocumentTokens(d ...*DocumentToken) *DocumentUpdateOne { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return duo.AddDocumentTokenIDs(ids...) +} + +// Mutation returns the DocumentMutation object of the builder. +func (duo *DocumentUpdateOne) Mutation() *DocumentMutation { + return duo.mutation +} + +// ClearGroup clears the "group" edge to the Group entity. +func (duo *DocumentUpdateOne) ClearGroup() *DocumentUpdateOne { + duo.mutation.ClearGroup() + return duo +} + +// ClearDocumentTokens clears all "document_tokens" edges to the DocumentToken entity. +func (duo *DocumentUpdateOne) ClearDocumentTokens() *DocumentUpdateOne { + duo.mutation.ClearDocumentTokens() + return duo +} + +// RemoveDocumentTokenIDs removes the "document_tokens" edge to DocumentToken entities by IDs. +func (duo *DocumentUpdateOne) RemoveDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdateOne { + duo.mutation.RemoveDocumentTokenIDs(ids...) + return duo +} + +// RemoveDocumentTokens removes "document_tokens" edges to DocumentToken entities. +func (duo *DocumentUpdateOne) RemoveDocumentTokens(d ...*DocumentToken) *DocumentUpdateOne { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return duo.RemoveDocumentTokenIDs(ids...) +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (duo *DocumentUpdateOne) Select(field string, fields ...string) *DocumentUpdateOne { + duo.fields = append([]string{field}, fields...) + return duo +} + +// Save executes the query and returns the updated Document entity. +func (duo *DocumentUpdateOne) Save(ctx context.Context) (*Document, error) { + var ( + err error + node *Document + ) + duo.defaults() + if len(duo.hooks) == 0 { + if err = duo.check(); err != nil { + return nil, err + } + node, err = duo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = duo.check(); err != nil { + return nil, err + } + duo.mutation = mutation + node, err = duo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(duo.hooks) - 1; i >= 0; i-- { + if duo.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = duo.hooks[i](mut) + } + v, err := mut.Mutate(ctx, duo.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Document) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from DocumentMutation", v) + } + node = nv + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (duo *DocumentUpdateOne) SaveX(ctx context.Context) *Document { + node, err := duo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (duo *DocumentUpdateOne) Exec(ctx context.Context) error { + _, err := duo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (duo *DocumentUpdateOne) ExecX(ctx context.Context) { + if err := duo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (duo *DocumentUpdateOne) defaults() { + if _, ok := duo.mutation.UpdatedAt(); !ok { + v := document.UpdateDefaultUpdatedAt() + duo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (duo *DocumentUpdateOne) check() error { + if v, ok := duo.mutation.Title(); ok { + if err := document.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Document.title": %w`, err)} + } + } + if v, ok := duo.mutation.Path(); ok { + if err := document.PathValidator(v); err != nil { + return &ValidationError{Name: "path", err: fmt.Errorf(`ent: validator failed for field "Document.path": %w`, err)} + } + } + if _, ok := duo.mutation.GroupID(); duo.mutation.GroupCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Document.group"`) + } + return nil +} + +func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: document.Table, + Columns: document.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + id, ok := duo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Document.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := duo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, document.FieldID) + for _, f := range fields { + if !document.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != document.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := duo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := duo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: document.FieldUpdatedAt, + }) + } + if value, ok := duo.mutation.Title(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: document.FieldTitle, + }) + } + if value, ok := duo.mutation.Path(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: document.FieldPath, + }) + } + if duo.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: document.GroupTable, + Columns: []string{document.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: group.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: document.GroupTable, + Columns: []string{document.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: group.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if duo.mutation.DocumentTokensCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.RemovedDocumentTokensIDs(); len(nodes) > 0 && !duo.mutation.DocumentTokensCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.DocumentTokensIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Document{config: duo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, duo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{document.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} diff --git a/backend/ent/documenttoken.go b/backend/ent/documenttoken.go new file mode 100644 index 0000000..c3b0a9e --- /dev/null +++ b/backend/ent/documenttoken.go @@ -0,0 +1,190 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" +) + +// DocumentToken is the model entity for the DocumentToken schema. +type DocumentToken struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Token holds the value of the "token" field. + Token []byte `json:"token,omitempty"` + // Uses holds the value of the "uses" field. + Uses int `json:"uses,omitempty"` + // ExpiresAt holds the value of the "expires_at" field. + ExpiresAt time.Time `json:"expires_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the DocumentTokenQuery when eager-loading is set. + Edges DocumentTokenEdges `json:"edges"` + document_document_tokens *uuid.UUID +} + +// DocumentTokenEdges holds the relations/edges for other nodes in the graph. +type DocumentTokenEdges struct { + // Document holds the value of the document edge. + Document *Document `json:"document,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// DocumentOrErr returns the Document value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e DocumentTokenEdges) DocumentOrErr() (*Document, error) { + if e.loadedTypes[0] { + if e.Document == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: document.Label} + } + return e.Document, nil + } + return nil, &NotLoadedError{edge: "document"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*DocumentToken) scanValues(columns []string) ([]interface{}, error) { + values := make([]interface{}, len(columns)) + for i := range columns { + switch columns[i] { + case documenttoken.FieldToken: + values[i] = new([]byte) + case documenttoken.FieldUses: + values[i] = new(sql.NullInt64) + case documenttoken.FieldCreatedAt, documenttoken.FieldUpdatedAt, documenttoken.FieldExpiresAt: + values[i] = new(sql.NullTime) + case documenttoken.FieldID: + values[i] = new(uuid.UUID) + case documenttoken.ForeignKeys[0]: // document_document_tokens + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + default: + return nil, fmt.Errorf("unexpected column %q for type DocumentToken", columns[i]) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the DocumentToken fields. +func (dt *DocumentToken) assignValues(columns []string, values []interface{}) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case documenttoken.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + dt.ID = *value + } + case documenttoken.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + dt.CreatedAt = value.Time + } + case documenttoken.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + dt.UpdatedAt = value.Time + } + case documenttoken.FieldToken: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field token", values[i]) + } else if value != nil { + dt.Token = *value + } + case documenttoken.FieldUses: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field uses", values[i]) + } else if value.Valid { + dt.Uses = int(value.Int64) + } + case documenttoken.FieldExpiresAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expires_at", values[i]) + } else if value.Valid { + dt.ExpiresAt = value.Time + } + case documenttoken.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field document_document_tokens", values[i]) + } else if value.Valid { + dt.document_document_tokens = new(uuid.UUID) + *dt.document_document_tokens = *value.S.(*uuid.UUID) + } + } + } + return nil +} + +// QueryDocument queries the "document" edge of the DocumentToken entity. +func (dt *DocumentToken) QueryDocument() *DocumentQuery { + return (&DocumentTokenClient{config: dt.config}).QueryDocument(dt) +} + +// Update returns a builder for updating this DocumentToken. +// Note that you need to call DocumentToken.Unwrap() before calling this method if this DocumentToken +// was returned from a transaction, and the transaction was committed or rolled back. +func (dt *DocumentToken) Update() *DocumentTokenUpdateOne { + return (&DocumentTokenClient{config: dt.config}).UpdateOne(dt) +} + +// Unwrap unwraps the DocumentToken entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (dt *DocumentToken) Unwrap() *DocumentToken { + _tx, ok := dt.config.driver.(*txDriver) + if !ok { + panic("ent: DocumentToken is not a transactional entity") + } + dt.config.driver = _tx.drv + return dt +} + +// String implements the fmt.Stringer. +func (dt *DocumentToken) String() string { + var builder strings.Builder + builder.WriteString("DocumentToken(") + builder.WriteString(fmt.Sprintf("id=%v, ", dt.ID)) + builder.WriteString("created_at=") + builder.WriteString(dt.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(dt.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("token=") + builder.WriteString(fmt.Sprintf("%v", dt.Token)) + builder.WriteString(", ") + builder.WriteString("uses=") + builder.WriteString(fmt.Sprintf("%v", dt.Uses)) + builder.WriteString(", ") + builder.WriteString("expires_at=") + builder.WriteString(dt.ExpiresAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// DocumentTokens is a parsable slice of DocumentToken. +type DocumentTokens []*DocumentToken + +func (dt DocumentTokens) config(cfg config) { + for _i := range dt { + dt[_i].config = cfg + } +} diff --git a/backend/ent/documenttoken/documenttoken.go b/backend/ent/documenttoken/documenttoken.go new file mode 100644 index 0000000..ce05656 --- /dev/null +++ b/backend/ent/documenttoken/documenttoken.go @@ -0,0 +1,85 @@ +// Code generated by ent, DO NOT EDIT. + +package documenttoken + +import ( + "time" + + "github.com/google/uuid" +) + +const ( + // Label holds the string label denoting the documenttoken type in the database. + Label = "document_token" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldToken holds the string denoting the token field in the database. + FieldToken = "token" + // FieldUses holds the string denoting the uses field in the database. + FieldUses = "uses" + // FieldExpiresAt holds the string denoting the expires_at field in the database. + FieldExpiresAt = "expires_at" + // EdgeDocument holds the string denoting the document edge name in mutations. + EdgeDocument = "document" + // Table holds the table name of the documenttoken in the database. + Table = "document_tokens" + // DocumentTable is the table that holds the document relation/edge. + DocumentTable = "document_tokens" + // DocumentInverseTable is the table name for the Document entity. + // It exists in this package in order to avoid circular dependency with the "document" package. + DocumentInverseTable = "documents" + // DocumentColumn is the table column denoting the document relation/edge. + DocumentColumn = "document_document_tokens" +) + +// Columns holds all SQL columns for documenttoken fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldToken, + FieldUses, + FieldExpiresAt, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "document_tokens" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "document_document_tokens", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // TokenValidator is a validator for the "token" field. It is called by the builders before save. + TokenValidator func([]byte) error + // DefaultUses holds the default value on creation for the "uses" field. + DefaultUses int + // DefaultExpiresAt holds the default value on creation for the "expires_at" field. + DefaultExpiresAt func() time.Time + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) diff --git a/backend/ent/documenttoken/where.go b/backend/ent/documenttoken/where.go new file mode 100644 index 0000000..918b975 --- /dev/null +++ b/backend/ent/documenttoken/where.go @@ -0,0 +1,498 @@ +// Code generated by ent, DO NOT EDIT. + +package documenttoken + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Token applies equality check predicate on the "token" field. It's identical to TokenEQ. +func Token(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldToken), v)) + }) +} + +// Uses applies equality check predicate on the "uses" field. It's identical to UsesEQ. +func Uses(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUses), v)) + }) +} + +// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ. +func ExpiresAt(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldExpiresAt), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// TokenEQ applies the EQ predicate on the "token" field. +func TokenEQ(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldToken), v)) + }) +} + +// TokenNEQ applies the NEQ predicate on the "token" field. +func TokenNEQ(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldToken), v)) + }) +} + +// TokenIn applies the In predicate on the "token" field. +func TokenIn(vs ...[]byte) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldToken), v...)) + }) +} + +// TokenNotIn applies the NotIn predicate on the "token" field. +func TokenNotIn(vs ...[]byte) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldToken), v...)) + }) +} + +// TokenGT applies the GT predicate on the "token" field. +func TokenGT(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldToken), v)) + }) +} + +// TokenGTE applies the GTE predicate on the "token" field. +func TokenGTE(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldToken), v)) + }) +} + +// TokenLT applies the LT predicate on the "token" field. +func TokenLT(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldToken), v)) + }) +} + +// TokenLTE applies the LTE predicate on the "token" field. +func TokenLTE(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldToken), v)) + }) +} + +// UsesEQ applies the EQ predicate on the "uses" field. +func UsesEQ(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUses), v)) + }) +} + +// UsesNEQ applies the NEQ predicate on the "uses" field. +func UsesNEQ(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUses), v)) + }) +} + +// UsesIn applies the In predicate on the "uses" field. +func UsesIn(vs ...int) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUses), v...)) + }) +} + +// UsesNotIn applies the NotIn predicate on the "uses" field. +func UsesNotIn(vs ...int) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUses), v...)) + }) +} + +// UsesGT applies the GT predicate on the "uses" field. +func UsesGT(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUses), v)) + }) +} + +// UsesGTE applies the GTE predicate on the "uses" field. +func UsesGTE(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUses), v)) + }) +} + +// UsesLT applies the LT predicate on the "uses" field. +func UsesLT(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUses), v)) + }) +} + +// UsesLTE applies the LTE predicate on the "uses" field. +func UsesLTE(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUses), v)) + }) +} + +// ExpiresAtEQ applies the EQ predicate on the "expires_at" field. +func ExpiresAtEQ(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldExpiresAt), v)) + }) +} + +// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field. +func ExpiresAtNEQ(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldExpiresAt), v)) + }) +} + +// ExpiresAtIn applies the In predicate on the "expires_at" field. +func ExpiresAtIn(vs ...time.Time) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldExpiresAt), v...)) + }) +} + +// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field. +func ExpiresAtNotIn(vs ...time.Time) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldExpiresAt), v...)) + }) +} + +// ExpiresAtGT applies the GT predicate on the "expires_at" field. +func ExpiresAtGT(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldExpiresAt), v)) + }) +} + +// ExpiresAtGTE applies the GTE predicate on the "expires_at" field. +func ExpiresAtGTE(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldExpiresAt), v)) + }) +} + +// ExpiresAtLT applies the LT predicate on the "expires_at" field. +func ExpiresAtLT(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldExpiresAt), v)) + }) +} + +// ExpiresAtLTE applies the LTE predicate on the "expires_at" field. +func ExpiresAtLTE(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldExpiresAt), v)) + }) +} + +// HasDocument applies the HasEdge predicate on the "document" edge. +func HasDocument() predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDocumentWith applies the HasEdge predicate on the "document" edge with a given conditions (other predicates). +func HasDocumentWith(preds ...predicate.Document) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.DocumentToken) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.DocumentToken) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.DocumentToken) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/backend/ent/documenttoken_create.go b/backend/ent/documenttoken_create.go new file mode 100644 index 0000000..65908b6 --- /dev/null +++ b/backend/ent/documenttoken_create.go @@ -0,0 +1,418 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" +) + +// DocumentTokenCreate is the builder for creating a DocumentToken entity. +type DocumentTokenCreate struct { + config + mutation *DocumentTokenMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (dtc *DocumentTokenCreate) SetCreatedAt(t time.Time) *DocumentTokenCreate { + dtc.mutation.SetCreatedAt(t) + return dtc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (dtc *DocumentTokenCreate) SetNillableCreatedAt(t *time.Time) *DocumentTokenCreate { + if t != nil { + dtc.SetCreatedAt(*t) + } + return dtc +} + +// SetUpdatedAt sets the "updated_at" field. +func (dtc *DocumentTokenCreate) SetUpdatedAt(t time.Time) *DocumentTokenCreate { + dtc.mutation.SetUpdatedAt(t) + return dtc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (dtc *DocumentTokenCreate) SetNillableUpdatedAt(t *time.Time) *DocumentTokenCreate { + if t != nil { + dtc.SetUpdatedAt(*t) + } + return dtc +} + +// SetToken sets the "token" field. +func (dtc *DocumentTokenCreate) SetToken(b []byte) *DocumentTokenCreate { + dtc.mutation.SetToken(b) + return dtc +} + +// SetUses sets the "uses" field. +func (dtc *DocumentTokenCreate) SetUses(i int) *DocumentTokenCreate { + dtc.mutation.SetUses(i) + return dtc +} + +// SetNillableUses sets the "uses" field if the given value is not nil. +func (dtc *DocumentTokenCreate) SetNillableUses(i *int) *DocumentTokenCreate { + if i != nil { + dtc.SetUses(*i) + } + return dtc +} + +// SetExpiresAt sets the "expires_at" field. +func (dtc *DocumentTokenCreate) SetExpiresAt(t time.Time) *DocumentTokenCreate { + dtc.mutation.SetExpiresAt(t) + return dtc +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (dtc *DocumentTokenCreate) SetNillableExpiresAt(t *time.Time) *DocumentTokenCreate { + if t != nil { + dtc.SetExpiresAt(*t) + } + return dtc +} + +// SetID sets the "id" field. +func (dtc *DocumentTokenCreate) SetID(u uuid.UUID) *DocumentTokenCreate { + dtc.mutation.SetID(u) + return dtc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (dtc *DocumentTokenCreate) SetNillableID(u *uuid.UUID) *DocumentTokenCreate { + if u != nil { + dtc.SetID(*u) + } + return dtc +} + +// SetDocumentID sets the "document" edge to the Document entity by ID. +func (dtc *DocumentTokenCreate) SetDocumentID(id uuid.UUID) *DocumentTokenCreate { + dtc.mutation.SetDocumentID(id) + return dtc +} + +// SetNillableDocumentID sets the "document" edge to the Document entity by ID if the given value is not nil. +func (dtc *DocumentTokenCreate) SetNillableDocumentID(id *uuid.UUID) *DocumentTokenCreate { + if id != nil { + dtc = dtc.SetDocumentID(*id) + } + return dtc +} + +// SetDocument sets the "document" edge to the Document entity. +func (dtc *DocumentTokenCreate) SetDocument(d *Document) *DocumentTokenCreate { + return dtc.SetDocumentID(d.ID) +} + +// Mutation returns the DocumentTokenMutation object of the builder. +func (dtc *DocumentTokenCreate) Mutation() *DocumentTokenMutation { + return dtc.mutation +} + +// Save creates the DocumentToken in the database. +func (dtc *DocumentTokenCreate) Save(ctx context.Context) (*DocumentToken, error) { + var ( + err error + node *DocumentToken + ) + dtc.defaults() + if len(dtc.hooks) == 0 { + if err = dtc.check(); err != nil { + return nil, err + } + node, err = dtc.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = dtc.check(); err != nil { + return nil, err + } + dtc.mutation = mutation + if node, err = dtc.sqlSave(ctx); err != nil { + return nil, err + } + mutation.id = &node.ID + mutation.done = true + return node, err + }) + for i := len(dtc.hooks) - 1; i >= 0; i-- { + if dtc.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dtc.hooks[i](mut) + } + v, err := mut.Mutate(ctx, dtc.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*DocumentToken) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from DocumentTokenMutation", v) + } + node = nv + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (dtc *DocumentTokenCreate) SaveX(ctx context.Context) *DocumentToken { + v, err := dtc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dtc *DocumentTokenCreate) Exec(ctx context.Context) error { + _, err := dtc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtc *DocumentTokenCreate) ExecX(ctx context.Context) { + if err := dtc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dtc *DocumentTokenCreate) defaults() { + if _, ok := dtc.mutation.CreatedAt(); !ok { + v := documenttoken.DefaultCreatedAt() + dtc.mutation.SetCreatedAt(v) + } + if _, ok := dtc.mutation.UpdatedAt(); !ok { + v := documenttoken.DefaultUpdatedAt() + dtc.mutation.SetUpdatedAt(v) + } + if _, ok := dtc.mutation.Uses(); !ok { + v := documenttoken.DefaultUses + dtc.mutation.SetUses(v) + } + if _, ok := dtc.mutation.ExpiresAt(); !ok { + v := documenttoken.DefaultExpiresAt() + dtc.mutation.SetExpiresAt(v) + } + if _, ok := dtc.mutation.ID(); !ok { + v := documenttoken.DefaultID() + dtc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dtc *DocumentTokenCreate) check() error { + if _, ok := dtc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "DocumentToken.created_at"`)} + } + if _, ok := dtc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "DocumentToken.updated_at"`)} + } + if _, ok := dtc.mutation.Token(); !ok { + return &ValidationError{Name: "token", err: errors.New(`ent: missing required field "DocumentToken.token"`)} + } + if v, ok := dtc.mutation.Token(); ok { + if err := documenttoken.TokenValidator(v); err != nil { + return &ValidationError{Name: "token", err: fmt.Errorf(`ent: validator failed for field "DocumentToken.token": %w`, err)} + } + } + if _, ok := dtc.mutation.Uses(); !ok { + return &ValidationError{Name: "uses", err: errors.New(`ent: missing required field "DocumentToken.uses"`)} + } + if _, ok := dtc.mutation.ExpiresAt(); !ok { + return &ValidationError{Name: "expires_at", err: errors.New(`ent: missing required field "DocumentToken.expires_at"`)} + } + return nil +} + +func (dtc *DocumentTokenCreate) sqlSave(ctx context.Context) (*DocumentToken, error) { + _node, _spec := dtc.createSpec() + if err := sqlgraph.CreateNode(ctx, dtc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + return _node, nil +} + +func (dtc *DocumentTokenCreate) createSpec() (*DocumentToken, *sqlgraph.CreateSpec) { + var ( + _node = &DocumentToken{config: dtc.config} + _spec = &sqlgraph.CreateSpec{ + Table: documenttoken.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + } + ) + if id, ok := dtc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := dtc.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldCreatedAt, + }) + _node.CreatedAt = value + } + if value, ok := dtc.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldUpdatedAt, + }) + _node.UpdatedAt = value + } + if value, ok := dtc.mutation.Token(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeBytes, + Value: value, + Column: documenttoken.FieldToken, + }) + _node.Token = value + } + if value, ok := dtc.mutation.Uses(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: documenttoken.FieldUses, + }) + _node.Uses = value + } + if value, ok := dtc.mutation.ExpiresAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldExpiresAt, + }) + _node.ExpiresAt = value + } + if nodes := dtc.mutation.DocumentIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: documenttoken.DocumentTable, + Columns: []string{documenttoken.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.document_document_tokens = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// DocumentTokenCreateBulk is the builder for creating many DocumentToken entities in bulk. +type DocumentTokenCreateBulk struct { + config + builders []*DocumentTokenCreate +} + +// Save creates the DocumentToken entities in the database. +func (dtcb *DocumentTokenCreateBulk) Save(ctx context.Context) ([]*DocumentToken, error) { + specs := make([]*sqlgraph.CreateSpec, len(dtcb.builders)) + nodes := make([]*DocumentToken, len(dtcb.builders)) + mutators := make([]Mutator, len(dtcb.builders)) + for i := range dtcb.builders { + func(i int, root context.Context) { + builder := dtcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, dtcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, dtcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, dtcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (dtcb *DocumentTokenCreateBulk) SaveX(ctx context.Context) []*DocumentToken { + v, err := dtcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dtcb *DocumentTokenCreateBulk) Exec(ctx context.Context) error { + _, err := dtcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtcb *DocumentTokenCreateBulk) ExecX(ctx context.Context) { + if err := dtcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/documenttoken_delete.go b/backend/ent/documenttoken_delete.go new file mode 100644 index 0000000..bc8f488 --- /dev/null +++ b/backend/ent/documenttoken_delete.go @@ -0,0 +1,115 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// DocumentTokenDelete is the builder for deleting a DocumentToken entity. +type DocumentTokenDelete struct { + config + hooks []Hook + mutation *DocumentTokenMutation +} + +// Where appends a list predicates to the DocumentTokenDelete builder. +func (dtd *DocumentTokenDelete) Where(ps ...predicate.DocumentToken) *DocumentTokenDelete { + dtd.mutation.Where(ps...) + return dtd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (dtd *DocumentTokenDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(dtd.hooks) == 0 { + affected, err = dtd.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + dtd.mutation = mutation + affected, err = dtd.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(dtd.hooks) - 1; i >= 0; i-- { + if dtd.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dtd.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, dtd.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtd *DocumentTokenDelete) ExecX(ctx context.Context) int { + n, err := dtd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (dtd *DocumentTokenDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: documenttoken.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + if ps := dtd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, dtd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return affected, err +} + +// DocumentTokenDeleteOne is the builder for deleting a single DocumentToken entity. +type DocumentTokenDeleteOne struct { + dtd *DocumentTokenDelete +} + +// Exec executes the deletion query. +func (dtdo *DocumentTokenDeleteOne) Exec(ctx context.Context) error { + n, err := dtdo.dtd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{documenttoken.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtdo *DocumentTokenDeleteOne) ExecX(ctx context.Context) { + dtdo.dtd.ExecX(ctx) +} diff --git a/backend/ent/documenttoken_query.go b/backend/ent/documenttoken_query.go new file mode 100644 index 0000000..bd48c10 --- /dev/null +++ b/backend/ent/documenttoken_query.go @@ -0,0 +1,611 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// DocumentTokenQuery is the builder for querying DocumentToken entities. +type DocumentTokenQuery struct { + config + limit *int + offset *int + unique *bool + order []OrderFunc + fields []string + predicates []predicate.DocumentToken + withDocument *DocumentQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the DocumentTokenQuery builder. +func (dtq *DocumentTokenQuery) Where(ps ...predicate.DocumentToken) *DocumentTokenQuery { + dtq.predicates = append(dtq.predicates, ps...) + return dtq +} + +// Limit adds a limit step to the query. +func (dtq *DocumentTokenQuery) Limit(limit int) *DocumentTokenQuery { + dtq.limit = &limit + return dtq +} + +// Offset adds an offset step to the query. +func (dtq *DocumentTokenQuery) Offset(offset int) *DocumentTokenQuery { + dtq.offset = &offset + return dtq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (dtq *DocumentTokenQuery) Unique(unique bool) *DocumentTokenQuery { + dtq.unique = &unique + return dtq +} + +// Order adds an order step to the query. +func (dtq *DocumentTokenQuery) Order(o ...OrderFunc) *DocumentTokenQuery { + dtq.order = append(dtq.order, o...) + return dtq +} + +// QueryDocument chains the current query on the "document" edge. +func (dtq *DocumentTokenQuery) QueryDocument() *DocumentQuery { + query := &DocumentQuery{config: dtq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dtq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dtq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(documenttoken.Table, documenttoken.FieldID, selector), + sqlgraph.To(document.Table, document.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, documenttoken.DocumentTable, documenttoken.DocumentColumn), + ) + fromU = sqlgraph.SetNeighbors(dtq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first DocumentToken entity from the query. +// Returns a *NotFoundError when no DocumentToken was found. +func (dtq *DocumentTokenQuery) First(ctx context.Context) (*DocumentToken, error) { + nodes, err := dtq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{documenttoken.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (dtq *DocumentTokenQuery) FirstX(ctx context.Context) *DocumentToken { + node, err := dtq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first DocumentToken ID from the query. +// Returns a *NotFoundError when no DocumentToken ID was found. +func (dtq *DocumentTokenQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = dtq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{documenttoken.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (dtq *DocumentTokenQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := dtq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single DocumentToken entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one DocumentToken entity is found. +// Returns a *NotFoundError when no DocumentToken entities are found. +func (dtq *DocumentTokenQuery) Only(ctx context.Context) (*DocumentToken, error) { + nodes, err := dtq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{documenttoken.Label} + default: + return nil, &NotSingularError{documenttoken.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (dtq *DocumentTokenQuery) OnlyX(ctx context.Context) *DocumentToken { + node, err := dtq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only DocumentToken ID in the query. +// Returns a *NotSingularError when more than one DocumentToken ID is found. +// Returns a *NotFoundError when no entities are found. +func (dtq *DocumentTokenQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = dtq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{documenttoken.Label} + default: + err = &NotSingularError{documenttoken.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (dtq *DocumentTokenQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := dtq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of DocumentTokens. +func (dtq *DocumentTokenQuery) All(ctx context.Context) ([]*DocumentToken, error) { + if err := dtq.prepareQuery(ctx); err != nil { + return nil, err + } + return dtq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (dtq *DocumentTokenQuery) AllX(ctx context.Context) []*DocumentToken { + nodes, err := dtq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of DocumentToken IDs. +func (dtq *DocumentTokenQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { + var ids []uuid.UUID + if err := dtq.Select(documenttoken.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (dtq *DocumentTokenQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := dtq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (dtq *DocumentTokenQuery) Count(ctx context.Context) (int, error) { + if err := dtq.prepareQuery(ctx); err != nil { + return 0, err + } + return dtq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (dtq *DocumentTokenQuery) CountX(ctx context.Context) int { + count, err := dtq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (dtq *DocumentTokenQuery) Exist(ctx context.Context) (bool, error) { + if err := dtq.prepareQuery(ctx); err != nil { + return false, err + } + return dtq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (dtq *DocumentTokenQuery) ExistX(ctx context.Context) bool { + exist, err := dtq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the DocumentTokenQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (dtq *DocumentTokenQuery) Clone() *DocumentTokenQuery { + if dtq == nil { + return nil + } + return &DocumentTokenQuery{ + config: dtq.config, + limit: dtq.limit, + offset: dtq.offset, + order: append([]OrderFunc{}, dtq.order...), + predicates: append([]predicate.DocumentToken{}, dtq.predicates...), + withDocument: dtq.withDocument.Clone(), + // clone intermediate query. + sql: dtq.sql.Clone(), + path: dtq.path, + unique: dtq.unique, + } +} + +// WithDocument tells the query-builder to eager-load the nodes that are connected to +// the "document" edge. The optional arguments are used to configure the query builder of the edge. +func (dtq *DocumentTokenQuery) WithDocument(opts ...func(*DocumentQuery)) *DocumentTokenQuery { + query := &DocumentQuery{config: dtq.config} + for _, opt := range opts { + opt(query) + } + dtq.withDocument = query + return dtq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.DocumentToken.Query(). +// GroupBy(documenttoken.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (dtq *DocumentTokenQuery) GroupBy(field string, fields ...string) *DocumentTokenGroupBy { + grbuild := &DocumentTokenGroupBy{config: dtq.config} + grbuild.fields = append([]string{field}, fields...) + grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := dtq.prepareQuery(ctx); err != nil { + return nil, err + } + return dtq.sqlQuery(ctx), nil + } + grbuild.label = documenttoken.Label + grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.DocumentToken.Query(). +// Select(documenttoken.FieldCreatedAt). +// Scan(ctx, &v) +func (dtq *DocumentTokenQuery) Select(fields ...string) *DocumentTokenSelect { + dtq.fields = append(dtq.fields, fields...) + selbuild := &DocumentTokenSelect{DocumentTokenQuery: dtq} + selbuild.label = documenttoken.Label + selbuild.flds, selbuild.scan = &dtq.fields, selbuild.Scan + return selbuild +} + +func (dtq *DocumentTokenQuery) prepareQuery(ctx context.Context) error { + for _, f := range dtq.fields { + if !documenttoken.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if dtq.path != nil { + prev, err := dtq.path(ctx) + if err != nil { + return err + } + dtq.sql = prev + } + return nil +} + +func (dtq *DocumentTokenQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DocumentToken, error) { + var ( + nodes = []*DocumentToken{} + withFKs = dtq.withFKs + _spec = dtq.querySpec() + loadedTypes = [1]bool{ + dtq.withDocument != nil, + } + ) + if dtq.withDocument != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, documenttoken.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]interface{}, error) { + return (*DocumentToken).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []interface{}) error { + node := &DocumentToken{config: dtq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, dtq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := dtq.withDocument; query != nil { + if err := dtq.loadDocument(ctx, query, nodes, nil, + func(n *DocumentToken, e *Document) { n.Edges.Document = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (dtq *DocumentTokenQuery) loadDocument(ctx context.Context, query *DocumentQuery, nodes []*DocumentToken, init func(*DocumentToken), assign func(*DocumentToken, *Document)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*DocumentToken) + for i := range nodes { + if nodes[i].document_document_tokens == nil { + continue + } + fk := *nodes[i].document_document_tokens + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + query.Where(document.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "document_document_tokens" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (dtq *DocumentTokenQuery) sqlCount(ctx context.Context) (int, error) { + _spec := dtq.querySpec() + _spec.Node.Columns = dtq.fields + if len(dtq.fields) > 0 { + _spec.Unique = dtq.unique != nil && *dtq.unique + } + return sqlgraph.CountNodes(ctx, dtq.driver, _spec) +} + +func (dtq *DocumentTokenQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := dtq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %w", err) + } + return n > 0, nil +} + +func (dtq *DocumentTokenQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: documenttoken.Table, + Columns: documenttoken.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + From: dtq.sql, + Unique: true, + } + if unique := dtq.unique; unique != nil { + _spec.Unique = *unique + } + if fields := dtq.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, documenttoken.FieldID) + for i := range fields { + if fields[i] != documenttoken.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := dtq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := dtq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := dtq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := dtq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (dtq *DocumentTokenQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(dtq.driver.Dialect()) + t1 := builder.Table(documenttoken.Table) + columns := dtq.fields + if len(columns) == 0 { + columns = documenttoken.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if dtq.sql != nil { + selector = dtq.sql + selector.Select(selector.Columns(columns...)...) + } + if dtq.unique != nil && *dtq.unique { + selector.Distinct() + } + for _, p := range dtq.predicates { + p(selector) + } + for _, p := range dtq.order { + p(selector) + } + if offset := dtq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := dtq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// DocumentTokenGroupBy is the group-by builder for DocumentToken entities. +type DocumentTokenGroupBy struct { + config + selector + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (dtgb *DocumentTokenGroupBy) Aggregate(fns ...AggregateFunc) *DocumentTokenGroupBy { + dtgb.fns = append(dtgb.fns, fns...) + return dtgb +} + +// Scan applies the group-by query and scans the result into the given value. +func (dtgb *DocumentTokenGroupBy) Scan(ctx context.Context, v interface{}) error { + query, err := dtgb.path(ctx) + if err != nil { + return err + } + dtgb.sql = query + return dtgb.sqlScan(ctx, v) +} + +func (dtgb *DocumentTokenGroupBy) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range dtgb.fields { + if !documenttoken.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := dtgb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dtgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (dtgb *DocumentTokenGroupBy) sqlQuery() *sql.Selector { + selector := dtgb.sql.Select() + aggregation := make([]string, 0, len(dtgb.fns)) + for _, fn := range dtgb.fns { + aggregation = append(aggregation, fn(selector)) + } + // If no columns were selected in a custom aggregation function, the default + // selection is the fields used for "group-by", and the aggregation functions. + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(dtgb.fields)+len(dtgb.fns)) + for _, f := range dtgb.fields { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + return selector.GroupBy(selector.Columns(dtgb.fields...)...) +} + +// DocumentTokenSelect is the builder for selecting fields of DocumentToken entities. +type DocumentTokenSelect struct { + *DocumentTokenQuery + selector + // intermediate query (i.e. traversal path). + sql *sql.Selector +} + +// Scan applies the selector query and scans the result into the given value. +func (dts *DocumentTokenSelect) Scan(ctx context.Context, v interface{}) error { + if err := dts.prepareQuery(ctx); err != nil { + return err + } + dts.sql = dts.DocumentTokenQuery.sqlQuery(ctx) + return dts.sqlScan(ctx, v) +} + +func (dts *DocumentTokenSelect) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := dts.sql.Query() + if err := dts.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/documenttoken_update.go b/backend/ent/documenttoken_update.go new file mode 100644 index 0000000..e4586be --- /dev/null +++ b/backend/ent/documenttoken_update.go @@ -0,0 +1,582 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// DocumentTokenUpdate is the builder for updating DocumentToken entities. +type DocumentTokenUpdate struct { + config + hooks []Hook + mutation *DocumentTokenMutation +} + +// Where appends a list predicates to the DocumentTokenUpdate builder. +func (dtu *DocumentTokenUpdate) Where(ps ...predicate.DocumentToken) *DocumentTokenUpdate { + dtu.mutation.Where(ps...) + return dtu +} + +// SetUpdatedAt sets the "updated_at" field. +func (dtu *DocumentTokenUpdate) SetUpdatedAt(t time.Time) *DocumentTokenUpdate { + dtu.mutation.SetUpdatedAt(t) + return dtu +} + +// SetToken sets the "token" field. +func (dtu *DocumentTokenUpdate) SetToken(b []byte) *DocumentTokenUpdate { + dtu.mutation.SetToken(b) + return dtu +} + +// SetUses sets the "uses" field. +func (dtu *DocumentTokenUpdate) SetUses(i int) *DocumentTokenUpdate { + dtu.mutation.ResetUses() + dtu.mutation.SetUses(i) + return dtu +} + +// SetNillableUses sets the "uses" field if the given value is not nil. +func (dtu *DocumentTokenUpdate) SetNillableUses(i *int) *DocumentTokenUpdate { + if i != nil { + dtu.SetUses(*i) + } + return dtu +} + +// AddUses adds i to the "uses" field. +func (dtu *DocumentTokenUpdate) AddUses(i int) *DocumentTokenUpdate { + dtu.mutation.AddUses(i) + return dtu +} + +// SetExpiresAt sets the "expires_at" field. +func (dtu *DocumentTokenUpdate) SetExpiresAt(t time.Time) *DocumentTokenUpdate { + dtu.mutation.SetExpiresAt(t) + return dtu +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (dtu *DocumentTokenUpdate) SetNillableExpiresAt(t *time.Time) *DocumentTokenUpdate { + if t != nil { + dtu.SetExpiresAt(*t) + } + return dtu +} + +// SetDocumentID sets the "document" edge to the Document entity by ID. +func (dtu *DocumentTokenUpdate) SetDocumentID(id uuid.UUID) *DocumentTokenUpdate { + dtu.mutation.SetDocumentID(id) + return dtu +} + +// SetNillableDocumentID sets the "document" edge to the Document entity by ID if the given value is not nil. +func (dtu *DocumentTokenUpdate) SetNillableDocumentID(id *uuid.UUID) *DocumentTokenUpdate { + if id != nil { + dtu = dtu.SetDocumentID(*id) + } + return dtu +} + +// SetDocument sets the "document" edge to the Document entity. +func (dtu *DocumentTokenUpdate) SetDocument(d *Document) *DocumentTokenUpdate { + return dtu.SetDocumentID(d.ID) +} + +// Mutation returns the DocumentTokenMutation object of the builder. +func (dtu *DocumentTokenUpdate) Mutation() *DocumentTokenMutation { + return dtu.mutation +} + +// ClearDocument clears the "document" edge to the Document entity. +func (dtu *DocumentTokenUpdate) ClearDocument() *DocumentTokenUpdate { + dtu.mutation.ClearDocument() + return dtu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (dtu *DocumentTokenUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + dtu.defaults() + if len(dtu.hooks) == 0 { + if err = dtu.check(); err != nil { + return 0, err + } + affected, err = dtu.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = dtu.check(); err != nil { + return 0, err + } + dtu.mutation = mutation + affected, err = dtu.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(dtu.hooks) - 1; i >= 0; i-- { + if dtu.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dtu.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, dtu.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (dtu *DocumentTokenUpdate) SaveX(ctx context.Context) int { + affected, err := dtu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (dtu *DocumentTokenUpdate) Exec(ctx context.Context) error { + _, err := dtu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtu *DocumentTokenUpdate) ExecX(ctx context.Context) { + if err := dtu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dtu *DocumentTokenUpdate) defaults() { + if _, ok := dtu.mutation.UpdatedAt(); !ok { + v := documenttoken.UpdateDefaultUpdatedAt() + dtu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dtu *DocumentTokenUpdate) check() error { + if v, ok := dtu.mutation.Token(); ok { + if err := documenttoken.TokenValidator(v); err != nil { + return &ValidationError{Name: "token", err: fmt.Errorf(`ent: validator failed for field "DocumentToken.token": %w`, err)} + } + } + return nil +} + +func (dtu *DocumentTokenUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: documenttoken.Table, + Columns: documenttoken.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + if ps := dtu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := dtu.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldUpdatedAt, + }) + } + if value, ok := dtu.mutation.Token(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBytes, + Value: value, + Column: documenttoken.FieldToken, + }) + } + if value, ok := dtu.mutation.Uses(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: documenttoken.FieldUses, + }) + } + if value, ok := dtu.mutation.AddedUses(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: documenttoken.FieldUses, + }) + } + if value, ok := dtu.mutation.ExpiresAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldExpiresAt, + }) + } + if dtu.mutation.DocumentCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: documenttoken.DocumentTable, + Columns: []string{documenttoken.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dtu.mutation.DocumentIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: documenttoken.DocumentTable, + Columns: []string{documenttoken.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, dtu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{documenttoken.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + return n, nil +} + +// DocumentTokenUpdateOne is the builder for updating a single DocumentToken entity. +type DocumentTokenUpdateOne struct { + config + fields []string + hooks []Hook + mutation *DocumentTokenMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (dtuo *DocumentTokenUpdateOne) SetUpdatedAt(t time.Time) *DocumentTokenUpdateOne { + dtuo.mutation.SetUpdatedAt(t) + return dtuo +} + +// SetToken sets the "token" field. +func (dtuo *DocumentTokenUpdateOne) SetToken(b []byte) *DocumentTokenUpdateOne { + dtuo.mutation.SetToken(b) + return dtuo +} + +// SetUses sets the "uses" field. +func (dtuo *DocumentTokenUpdateOne) SetUses(i int) *DocumentTokenUpdateOne { + dtuo.mutation.ResetUses() + dtuo.mutation.SetUses(i) + return dtuo +} + +// SetNillableUses sets the "uses" field if the given value is not nil. +func (dtuo *DocumentTokenUpdateOne) SetNillableUses(i *int) *DocumentTokenUpdateOne { + if i != nil { + dtuo.SetUses(*i) + } + return dtuo +} + +// AddUses adds i to the "uses" field. +func (dtuo *DocumentTokenUpdateOne) AddUses(i int) *DocumentTokenUpdateOne { + dtuo.mutation.AddUses(i) + return dtuo +} + +// SetExpiresAt sets the "expires_at" field. +func (dtuo *DocumentTokenUpdateOne) SetExpiresAt(t time.Time) *DocumentTokenUpdateOne { + dtuo.mutation.SetExpiresAt(t) + return dtuo +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (dtuo *DocumentTokenUpdateOne) SetNillableExpiresAt(t *time.Time) *DocumentTokenUpdateOne { + if t != nil { + dtuo.SetExpiresAt(*t) + } + return dtuo +} + +// SetDocumentID sets the "document" edge to the Document entity by ID. +func (dtuo *DocumentTokenUpdateOne) SetDocumentID(id uuid.UUID) *DocumentTokenUpdateOne { + dtuo.mutation.SetDocumentID(id) + return dtuo +} + +// SetNillableDocumentID sets the "document" edge to the Document entity by ID if the given value is not nil. +func (dtuo *DocumentTokenUpdateOne) SetNillableDocumentID(id *uuid.UUID) *DocumentTokenUpdateOne { + if id != nil { + dtuo = dtuo.SetDocumentID(*id) + } + return dtuo +} + +// SetDocument sets the "document" edge to the Document entity. +func (dtuo *DocumentTokenUpdateOne) SetDocument(d *Document) *DocumentTokenUpdateOne { + return dtuo.SetDocumentID(d.ID) +} + +// Mutation returns the DocumentTokenMutation object of the builder. +func (dtuo *DocumentTokenUpdateOne) Mutation() *DocumentTokenMutation { + return dtuo.mutation +} + +// ClearDocument clears the "document" edge to the Document entity. +func (dtuo *DocumentTokenUpdateOne) ClearDocument() *DocumentTokenUpdateOne { + dtuo.mutation.ClearDocument() + return dtuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (dtuo *DocumentTokenUpdateOne) Select(field string, fields ...string) *DocumentTokenUpdateOne { + dtuo.fields = append([]string{field}, fields...) + return dtuo +} + +// Save executes the query and returns the updated DocumentToken entity. +func (dtuo *DocumentTokenUpdateOne) Save(ctx context.Context) (*DocumentToken, error) { + var ( + err error + node *DocumentToken + ) + dtuo.defaults() + if len(dtuo.hooks) == 0 { + if err = dtuo.check(); err != nil { + return nil, err + } + node, err = dtuo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = dtuo.check(); err != nil { + return nil, err + } + dtuo.mutation = mutation + node, err = dtuo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(dtuo.hooks) - 1; i >= 0; i-- { + if dtuo.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dtuo.hooks[i](mut) + } + v, err := mut.Mutate(ctx, dtuo.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*DocumentToken) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from DocumentTokenMutation", v) + } + node = nv + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (dtuo *DocumentTokenUpdateOne) SaveX(ctx context.Context) *DocumentToken { + node, err := dtuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (dtuo *DocumentTokenUpdateOne) Exec(ctx context.Context) error { + _, err := dtuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtuo *DocumentTokenUpdateOne) ExecX(ctx context.Context) { + if err := dtuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dtuo *DocumentTokenUpdateOne) defaults() { + if _, ok := dtuo.mutation.UpdatedAt(); !ok { + v := documenttoken.UpdateDefaultUpdatedAt() + dtuo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dtuo *DocumentTokenUpdateOne) check() error { + if v, ok := dtuo.mutation.Token(); ok { + if err := documenttoken.TokenValidator(v); err != nil { + return &ValidationError{Name: "token", err: fmt.Errorf(`ent: validator failed for field "DocumentToken.token": %w`, err)} + } + } + return nil +} + +func (dtuo *DocumentTokenUpdateOne) sqlSave(ctx context.Context) (_node *DocumentToken, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: documenttoken.Table, + Columns: documenttoken.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + id, ok := dtuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DocumentToken.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := dtuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, documenttoken.FieldID) + for _, f := range fields { + if !documenttoken.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != documenttoken.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := dtuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := dtuo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldUpdatedAt, + }) + } + if value, ok := dtuo.mutation.Token(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBytes, + Value: value, + Column: documenttoken.FieldToken, + }) + } + if value, ok := dtuo.mutation.Uses(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: documenttoken.FieldUses, + }) + } + if value, ok := dtuo.mutation.AddedUses(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: documenttoken.FieldUses, + }) + } + if value, ok := dtuo.mutation.ExpiresAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldExpiresAt, + }) + } + if dtuo.mutation.DocumentCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: documenttoken.DocumentTable, + Columns: []string{documenttoken.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dtuo.mutation.DocumentIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: documenttoken.DocumentTable, + Columns: []string{documenttoken.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &DocumentToken{config: dtuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, dtuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{documenttoken.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} diff --git a/backend/ent/ent.go b/backend/ent/ent.go index f976756..74b405e 100644 --- a/backend/ent/ent.go +++ b/backend/ent/ent.go @@ -11,6 +11,8 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "github.com/hay-kot/content/backend/ent/authtokens" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/itemfield" @@ -37,13 +39,15 @@ type OrderFunc func(*sql.Selector) // columnChecker returns a function indicates if the column exists in the given column. func columnChecker(table string) func(string) error { checks := map[string]func(string) bool{ - authtokens.Table: authtokens.ValidColumn, - group.Table: group.ValidColumn, - item.Table: item.ValidColumn, - itemfield.Table: itemfield.ValidColumn, - label.Table: label.ValidColumn, - location.Table: location.ValidColumn, - user.Table: user.ValidColumn, + authtokens.Table: authtokens.ValidColumn, + document.Table: document.ValidColumn, + documenttoken.Table: documenttoken.ValidColumn, + group.Table: group.ValidColumn, + item.Table: item.ValidColumn, + itemfield.Table: itemfield.ValidColumn, + label.Table: label.ValidColumn, + location.Table: location.ValidColumn, + user.Table: user.ValidColumn, } check, ok := checks[table] if !ok { diff --git a/backend/ent/group.go b/backend/ent/group.go index 6724ec5..39f38ca 100644 --- a/backend/ent/group.go +++ b/backend/ent/group.go @@ -40,9 +40,11 @@ type GroupEdges struct { Items []*Item `json:"items,omitempty"` // Labels holds the value of the labels edge. Labels []*Label `json:"labels,omitempty"` + // Documents holds the value of the documents edge. + Documents []*Document `json:"documents,omitempty"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. - loadedTypes [4]bool + loadedTypes [5]bool } // UsersOrErr returns the Users value or an error if the edge @@ -81,6 +83,15 @@ func (e GroupEdges) LabelsOrErr() ([]*Label, error) { return nil, &NotLoadedError{edge: "labels"} } +// DocumentsOrErr returns the Documents value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) DocumentsOrErr() ([]*Document, error) { + if e.loadedTypes[4] { + return e.Documents, nil + } + return nil, &NotLoadedError{edge: "documents"} +} + // scanValues returns the types for scanning values from sql.Rows. func (*Group) scanValues(columns []string) ([]interface{}, error) { values := make([]interface{}, len(columns)) @@ -162,6 +173,11 @@ func (gr *Group) QueryLabels() *LabelQuery { return (&GroupClient{config: gr.config}).QueryLabels(gr) } +// QueryDocuments queries the "documents" edge of the Group entity. +func (gr *Group) QueryDocuments() *DocumentQuery { + return (&GroupClient{config: gr.config}).QueryDocuments(gr) +} + // Update returns a builder for updating this Group. // Note that you need to call Group.Unwrap() before calling this method if this Group // was returned from a transaction, and the transaction was committed or rolled back. diff --git a/backend/ent/group/group.go b/backend/ent/group/group.go index c7ccf72..d30b781 100644 --- a/backend/ent/group/group.go +++ b/backend/ent/group/group.go @@ -30,6 +30,8 @@ const ( EdgeItems = "items" // EdgeLabels holds the string denoting the labels edge name in mutations. EdgeLabels = "labels" + // EdgeDocuments holds the string denoting the documents edge name in mutations. + EdgeDocuments = "documents" // Table holds the table name of the group in the database. Table = "groups" // UsersTable is the table that holds the users relation/edge. @@ -60,6 +62,13 @@ const ( LabelsInverseTable = "labels" // LabelsColumn is the table column denoting the labels relation/edge. LabelsColumn = "group_labels" + // DocumentsTable is the table that holds the documents relation/edge. + DocumentsTable = "documents" + // DocumentsInverseTable is the table name for the Document entity. + // It exists in this package in order to avoid circular dependency with the "document" package. + DocumentsInverseTable = "documents" + // DocumentsColumn is the table column denoting the documents relation/edge. + DocumentsColumn = "group_documents" ) // Columns holds all SQL columns for group fields. diff --git a/backend/ent/group/where.go b/backend/ent/group/where.go index 35e3c7d..f6c759d 100644 --- a/backend/ent/group/where.go +++ b/backend/ent/group/where.go @@ -478,6 +478,34 @@ func HasLabelsWith(preds ...predicate.Label) predicate.Group { }) } +// HasDocuments applies the HasEdge predicate on the "documents" edge. +func HasDocuments() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentsTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDocumentsWith applies the HasEdge predicate on the "documents" edge with a given conditions (other predicates). +func HasDocumentsWith(preds ...predicate.Document) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + // And groups predicates with the AND operator between them. func And(predicates ...predicate.Group) predicate.Group { return predicate.Group(func(s *sql.Selector) { diff --git a/backend/ent/group_create.go b/backend/ent/group_create.go index eda86d6..a72eefe 100644 --- a/backend/ent/group_create.go +++ b/backend/ent/group_create.go @@ -11,6 +11,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/label" @@ -147,6 +148,21 @@ func (gc *GroupCreate) AddLabels(l ...*Label) *GroupCreate { return gc.AddLabelIDs(ids...) } +// AddDocumentIDs adds the "documents" edge to the Document entity by IDs. +func (gc *GroupCreate) AddDocumentIDs(ids ...uuid.UUID) *GroupCreate { + gc.mutation.AddDocumentIDs(ids...) + return gc +} + +// AddDocuments adds the "documents" edges to the Document entity. +func (gc *GroupCreate) AddDocuments(d ...*Document) *GroupCreate { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return gc.AddDocumentIDs(ids...) +} + // Mutation returns the GroupMutation object of the builder. func (gc *GroupCreate) Mutation() *GroupMutation { return gc.mutation @@ -410,6 +426,25 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } + if nodes := gc.mutation.DocumentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } return _node, _spec } diff --git a/backend/ent/group_query.go b/backend/ent/group_query.go index 8206f68..cb3a1c6 100644 --- a/backend/ent/group_query.go +++ b/backend/ent/group_query.go @@ -12,6 +12,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/label" @@ -33,6 +34,7 @@ type GroupQuery struct { withLocations *LocationQuery withItems *ItemQuery withLabels *LabelQuery + withDocuments *DocumentQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -157,6 +159,28 @@ func (gq *GroupQuery) QueryLabels() *LabelQuery { return query } +// QueryDocuments chains the current query on the "documents" edge. +func (gq *GroupQuery) QueryDocuments() *DocumentQuery { + query := &DocumentQuery{config: gq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := gq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := gq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(document.Table, document.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.DocumentsTable, group.DocumentsColumn), + ) + fromU = sqlgraph.SetNeighbors(gq.driver.Dialect(), step) + return fromU, nil + } + return query +} + // First returns the first Group entity from the query. // Returns a *NotFoundError when no Group was found. func (gq *GroupQuery) First(ctx context.Context) (*Group, error) { @@ -342,6 +366,7 @@ func (gq *GroupQuery) Clone() *GroupQuery { withLocations: gq.withLocations.Clone(), withItems: gq.withItems.Clone(), withLabels: gq.withLabels.Clone(), + withDocuments: gq.withDocuments.Clone(), // clone intermediate query. sql: gq.sql.Clone(), path: gq.path, @@ -393,6 +418,17 @@ func (gq *GroupQuery) WithLabels(opts ...func(*LabelQuery)) *GroupQuery { return gq } +// WithDocuments tells the query-builder to eager-load the nodes that are connected to +// the "documents" edge. The optional arguments are used to configure the query builder of the edge. +func (gq *GroupQuery) WithDocuments(opts ...func(*DocumentQuery)) *GroupQuery { + query := &DocumentQuery{config: gq.config} + for _, opt := range opts { + opt(query) + } + gq.withDocuments = query + return gq +} + // GroupBy is used to group vertices by one or more fields/columns. // It is often used with aggregate functions, like: count, max, mean, min, sum. // @@ -461,11 +497,12 @@ func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, var ( nodes = []*Group{} _spec = gq.querySpec() - loadedTypes = [4]bool{ + loadedTypes = [5]bool{ gq.withUsers != nil, gq.withLocations != nil, gq.withItems != nil, gq.withLabels != nil, + gq.withDocuments != nil, } ) _spec.ScanValues = func(columns []string) ([]interface{}, error) { @@ -514,6 +551,13 @@ func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, return nil, err } } + if query := gq.withDocuments; query != nil { + if err := gq.loadDocuments(ctx, query, nodes, + func(n *Group) { n.Edges.Documents = []*Document{} }, + func(n *Group, e *Document) { n.Edges.Documents = append(n.Edges.Documents, e) }); err != nil { + return nil, err + } + } return nodes, nil } @@ -641,6 +685,37 @@ func (gq *GroupQuery) loadLabels(ctx context.Context, query *LabelQuery, nodes [ } return nil } +func (gq *GroupQuery) loadDocuments(ctx context.Context, query *DocumentQuery, nodes []*Group, init func(*Group), assign func(*Group, *Document)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Document(func(s *sql.Selector) { + s.Where(sql.InValues(group.DocumentsColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.group_documents + if fk == nil { + return fmt.Errorf(`foreign-key "group_documents" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_documents" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} func (gq *GroupQuery) sqlCount(ctx context.Context) (int, error) { _spec := gq.querySpec() diff --git a/backend/ent/group_update.go b/backend/ent/group_update.go index 4fa63a8..4ebf709 100644 --- a/backend/ent/group_update.go +++ b/backend/ent/group_update.go @@ -12,6 +12,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/label" @@ -119,6 +120,21 @@ func (gu *GroupUpdate) AddLabels(l ...*Label) *GroupUpdate { return gu.AddLabelIDs(ids...) } +// AddDocumentIDs adds the "documents" edge to the Document entity by IDs. +func (gu *GroupUpdate) AddDocumentIDs(ids ...uuid.UUID) *GroupUpdate { + gu.mutation.AddDocumentIDs(ids...) + return gu +} + +// AddDocuments adds the "documents" edges to the Document entity. +func (gu *GroupUpdate) AddDocuments(d ...*Document) *GroupUpdate { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return gu.AddDocumentIDs(ids...) +} + // Mutation returns the GroupMutation object of the builder. func (gu *GroupUpdate) Mutation() *GroupMutation { return gu.mutation @@ -208,6 +224,27 @@ func (gu *GroupUpdate) RemoveLabels(l ...*Label) *GroupUpdate { return gu.RemoveLabelIDs(ids...) } +// ClearDocuments clears all "documents" edges to the Document entity. +func (gu *GroupUpdate) ClearDocuments() *GroupUpdate { + gu.mutation.ClearDocuments() + return gu +} + +// RemoveDocumentIDs removes the "documents" edge to Document entities by IDs. +func (gu *GroupUpdate) RemoveDocumentIDs(ids ...uuid.UUID) *GroupUpdate { + gu.mutation.RemoveDocumentIDs(ids...) + return gu +} + +// RemoveDocuments removes "documents" edges to Document entities. +func (gu *GroupUpdate) RemoveDocuments(d ...*Document) *GroupUpdate { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return gu.RemoveDocumentIDs(ids...) +} + // Save executes the query and returns the number of nodes affected by the update operation. func (gu *GroupUpdate) Save(ctx context.Context) (int, error) { var ( @@ -547,6 +584,60 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if gu.mutation.DocumentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := gu.mutation.RemovedDocumentsIDs(); len(nodes) > 0 && !gu.mutation.DocumentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := gu.mutation.DocumentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if n, err = sqlgraph.UpdateNodes(ctx, gu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{group.Label} @@ -652,6 +743,21 @@ func (guo *GroupUpdateOne) AddLabels(l ...*Label) *GroupUpdateOne { return guo.AddLabelIDs(ids...) } +// AddDocumentIDs adds the "documents" edge to the Document entity by IDs. +func (guo *GroupUpdateOne) AddDocumentIDs(ids ...uuid.UUID) *GroupUpdateOne { + guo.mutation.AddDocumentIDs(ids...) + return guo +} + +// AddDocuments adds the "documents" edges to the Document entity. +func (guo *GroupUpdateOne) AddDocuments(d ...*Document) *GroupUpdateOne { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return guo.AddDocumentIDs(ids...) +} + // Mutation returns the GroupMutation object of the builder. func (guo *GroupUpdateOne) Mutation() *GroupMutation { return guo.mutation @@ -741,6 +847,27 @@ func (guo *GroupUpdateOne) RemoveLabels(l ...*Label) *GroupUpdateOne { return guo.RemoveLabelIDs(ids...) } +// ClearDocuments clears all "documents" edges to the Document entity. +func (guo *GroupUpdateOne) ClearDocuments() *GroupUpdateOne { + guo.mutation.ClearDocuments() + return guo +} + +// RemoveDocumentIDs removes the "documents" edge to Document entities by IDs. +func (guo *GroupUpdateOne) RemoveDocumentIDs(ids ...uuid.UUID) *GroupUpdateOne { + guo.mutation.RemoveDocumentIDs(ids...) + return guo +} + +// RemoveDocuments removes "documents" edges to Document entities. +func (guo *GroupUpdateOne) RemoveDocuments(d ...*Document) *GroupUpdateOne { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return guo.RemoveDocumentIDs(ids...) +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (guo *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOne { @@ -1110,6 +1237,60 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if guo.mutation.DocumentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := guo.mutation.RemovedDocumentsIDs(); len(nodes) > 0 && !guo.mutation.DocumentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := guo.mutation.DocumentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } _node = &Group{config: guo.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go index 6bffd21..645a568 100644 --- a/backend/ent/hook/hook.go +++ b/backend/ent/hook/hook.go @@ -22,6 +22,32 @@ func (f AuthTokensFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, return f(ctx, mv) } +// The DocumentFunc type is an adapter to allow the use of ordinary +// function as Document mutator. +type DocumentFunc func(context.Context, *ent.DocumentMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f DocumentFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.DocumentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DocumentMutation", m) + } + return f(ctx, mv) +} + +// The DocumentTokenFunc type is an adapter to allow the use of ordinary +// function as DocumentToken mutator. +type DocumentTokenFunc func(context.Context, *ent.DocumentTokenMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f DocumentTokenFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.DocumentTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DocumentTokenMutation", m) + } + return f(ctx, mv) +} + // The GroupFunc type is an adapter to allow the use of ordinary // function as Group mutator. type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error) diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index a705906..c2cfc00 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -38,6 +38,60 @@ var ( }, }, } + // DocumentsColumns holds the columns for the "documents" table. + DocumentsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "title", Type: field.TypeString, Size: 255}, + {Name: "path", Type: field.TypeString, Size: 500}, + {Name: "group_documents", Type: field.TypeUUID}, + } + // DocumentsTable holds the schema information for the "documents" table. + DocumentsTable = &schema.Table{ + Name: "documents", + Columns: DocumentsColumns, + PrimaryKey: []*schema.Column{DocumentsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "documents_groups_documents", + Columns: []*schema.Column{DocumentsColumns[5]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + } + // DocumentTokensColumns holds the columns for the "document_tokens" table. + DocumentTokensColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "token", Type: field.TypeBytes, Unique: true}, + {Name: "uses", Type: field.TypeInt, Default: 1}, + {Name: "expires_at", Type: field.TypeTime}, + {Name: "document_document_tokens", Type: field.TypeUUID, Nullable: true}, + } + // DocumentTokensTable holds the schema information for the "document_tokens" table. + DocumentTokensTable = &schema.Table{ + Name: "document_tokens", + Columns: DocumentTokensColumns, + PrimaryKey: []*schema.Column{DocumentTokensColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "document_tokens_documents_document_tokens", + Columns: []*schema.Column{DocumentTokensColumns[6]}, + RefColumns: []*schema.Column{DocumentsColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + Indexes: []*schema.Index{ + { + Name: "documenttoken_token", + Unique: false, + Columns: []*schema.Column{DocumentTokensColumns[3]}, + }, + }, + } // GroupsColumns holds the columns for the "groups" table. GroupsColumns = []*schema.Column{ {Name: "id", Type: field.TypeUUID}, @@ -246,6 +300,8 @@ var ( // Tables holds all the tables in the schema. Tables = []*schema.Table{ AuthTokensTable, + DocumentsTable, + DocumentTokensTable, GroupsTable, ItemsTable, ItemFieldsTable, @@ -258,6 +314,8 @@ var ( func init() { AuthTokensTable.ForeignKeys[0].RefTable = UsersTable + DocumentsTable.ForeignKeys[0].RefTable = GroupsTable + DocumentTokensTable.ForeignKeys[0].RefTable = DocumentsTable ItemsTable.ForeignKeys[0].RefTable = GroupsTable ItemsTable.ForeignKeys[1].RefTable = LocationsTable ItemFieldsTable.ForeignKeys[0].RefTable = ItemsTable diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index 1f5157b..fc3d35b 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -11,6 +11,8 @@ import ( "github.com/google/uuid" "github.com/hay-kot/content/backend/ent/authtokens" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/itemfield" @@ -31,13 +33,15 @@ const ( OpUpdateOne = ent.OpUpdateOne // Node types. - TypeAuthTokens = "AuthTokens" - TypeGroup = "Group" - TypeItem = "Item" - TypeItemField = "ItemField" - TypeLabel = "Label" - TypeLocation = "Location" - TypeUser = "User" + TypeAuthTokens = "AuthTokens" + TypeDocument = "Document" + TypeDocumentToken = "DocumentToken" + TypeGroup = "Group" + TypeItem = "Item" + TypeItemField = "ItemField" + TypeLabel = "Label" + TypeLocation = "Location" + TypeUser = "User" ) // AuthTokensMutation represents an operation that mutates the AuthTokens nodes in the graph. @@ -588,6 +592,1275 @@ func (m *AuthTokensMutation) ResetEdge(name string) error { return fmt.Errorf("unknown AuthTokens edge %s", name) } +// DocumentMutation represents an operation that mutates the Document nodes in the graph. +type DocumentMutation struct { + config + op Op + typ string + id *uuid.UUID + created_at *time.Time + updated_at *time.Time + title *string + _path *string + clearedFields map[string]struct{} + group *uuid.UUID + clearedgroup bool + document_tokens map[uuid.UUID]struct{} + removeddocument_tokens map[uuid.UUID]struct{} + cleareddocument_tokens bool + done bool + oldValue func(context.Context) (*Document, error) + predicates []predicate.Document +} + +var _ ent.Mutation = (*DocumentMutation)(nil) + +// documentOption allows management of the mutation configuration using functional options. +type documentOption func(*DocumentMutation) + +// newDocumentMutation creates new mutation for the Document entity. +func newDocumentMutation(c config, op Op, opts ...documentOption) *DocumentMutation { + m := &DocumentMutation{ + config: c, + op: op, + typ: TypeDocument, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withDocumentID sets the ID field of the mutation. +func withDocumentID(id uuid.UUID) documentOption { + return func(m *DocumentMutation) { + var ( + err error + once sync.Once + value *Document + ) + m.oldValue = func(ctx context.Context) (*Document, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Document.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withDocument sets the old Document of the mutation. +func withDocument(node *Document) documentOption { + return func(m *DocumentMutation) { + m.oldValue = func(context.Context) (*Document, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m DocumentMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m DocumentMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Document entities. +func (m *DocumentMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *DocumentMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *DocumentMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Document.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *DocumentMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *DocumentMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Document entity. +// If the Document object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *DocumentMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *DocumentMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *DocumentMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Document entity. +// If the Document object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *DocumentMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetTitle sets the "title" field. +func (m *DocumentMutation) SetTitle(s string) { + m.title = &s +} + +// Title returns the value of the "title" field in the mutation. +func (m *DocumentMutation) Title() (r string, exists bool) { + v := m.title + if v == nil { + return + } + return *v, true +} + +// OldTitle returns the old "title" field's value of the Document entity. +// If the Document object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentMutation) OldTitle(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTitle is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTitle requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTitle: %w", err) + } + return oldValue.Title, nil +} + +// ResetTitle resets all changes to the "title" field. +func (m *DocumentMutation) ResetTitle() { + m.title = nil +} + +// SetPath sets the "path" field. +func (m *DocumentMutation) SetPath(s string) { + m._path = &s +} + +// Path returns the value of the "path" field in the mutation. +func (m *DocumentMutation) Path() (r string, exists bool) { + v := m._path + if v == nil { + return + } + return *v, true +} + +// OldPath returns the old "path" field's value of the Document entity. +// If the Document object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentMutation) OldPath(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPath is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPath requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPath: %w", err) + } + return oldValue.Path, nil +} + +// ResetPath resets all changes to the "path" field. +func (m *DocumentMutation) ResetPath() { + m._path = nil +} + +// SetGroupID sets the "group" edge to the Group entity by id. +func (m *DocumentMutation) SetGroupID(id uuid.UUID) { + m.group = &id +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *DocumentMutation) ClearGroup() { + m.clearedgroup = true +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *DocumentMutation) GroupCleared() bool { + return m.clearedgroup +} + +// GroupID returns the "group" edge ID in the mutation. +func (m *DocumentMutation) GroupID() (id uuid.UUID, exists bool) { + if m.group != nil { + return *m.group, true + } + return +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *DocumentMutation) GroupIDs() (ids []uuid.UUID) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *DocumentMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by ids. +func (m *DocumentMutation) AddDocumentTokenIDs(ids ...uuid.UUID) { + if m.document_tokens == nil { + m.document_tokens = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.document_tokens[ids[i]] = struct{}{} + } +} + +// ClearDocumentTokens clears the "document_tokens" edge to the DocumentToken entity. +func (m *DocumentMutation) ClearDocumentTokens() { + m.cleareddocument_tokens = true +} + +// DocumentTokensCleared reports if the "document_tokens" edge to the DocumentToken entity was cleared. +func (m *DocumentMutation) DocumentTokensCleared() bool { + return m.cleareddocument_tokens +} + +// RemoveDocumentTokenIDs removes the "document_tokens" edge to the DocumentToken entity by IDs. +func (m *DocumentMutation) RemoveDocumentTokenIDs(ids ...uuid.UUID) { + if m.removeddocument_tokens == nil { + m.removeddocument_tokens = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.document_tokens, ids[i]) + m.removeddocument_tokens[ids[i]] = struct{}{} + } +} + +// RemovedDocumentTokens returns the removed IDs of the "document_tokens" edge to the DocumentToken entity. +func (m *DocumentMutation) RemovedDocumentTokensIDs() (ids []uuid.UUID) { + for id := range m.removeddocument_tokens { + ids = append(ids, id) + } + return +} + +// DocumentTokensIDs returns the "document_tokens" edge IDs in the mutation. +func (m *DocumentMutation) DocumentTokensIDs() (ids []uuid.UUID) { + for id := range m.document_tokens { + ids = append(ids, id) + } + return +} + +// ResetDocumentTokens resets all changes to the "document_tokens" edge. +func (m *DocumentMutation) ResetDocumentTokens() { + m.document_tokens = nil + m.cleareddocument_tokens = false + m.removeddocument_tokens = nil +} + +// Where appends a list predicates to the DocumentMutation builder. +func (m *DocumentMutation) Where(ps ...predicate.Document) { + m.predicates = append(m.predicates, ps...) +} + +// Op returns the operation name. +func (m *DocumentMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Document). +func (m *DocumentMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *DocumentMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.created_at != nil { + fields = append(fields, document.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, document.FieldUpdatedAt) + } + if m.title != nil { + fields = append(fields, document.FieldTitle) + } + if m._path != nil { + fields = append(fields, document.FieldPath) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *DocumentMutation) Field(name string) (ent.Value, bool) { + switch name { + case document.FieldCreatedAt: + return m.CreatedAt() + case document.FieldUpdatedAt: + return m.UpdatedAt() + case document.FieldTitle: + return m.Title() + case document.FieldPath: + return m.Path() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *DocumentMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case document.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case document.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case document.FieldTitle: + return m.OldTitle(ctx) + case document.FieldPath: + return m.OldPath(ctx) + } + return nil, fmt.Errorf("unknown Document field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DocumentMutation) SetField(name string, value ent.Value) error { + switch name { + case document.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case document.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case document.FieldTitle: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTitle(v) + return nil + case document.FieldPath: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPath(v) + return nil + } + return fmt.Errorf("unknown Document field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *DocumentMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *DocumentMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DocumentMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Document numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *DocumentMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *DocumentMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *DocumentMutation) ClearField(name string) error { + return fmt.Errorf("unknown Document nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *DocumentMutation) ResetField(name string) error { + switch name { + case document.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case document.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case document.FieldTitle: + m.ResetTitle() + return nil + case document.FieldPath: + m.ResetPath() + return nil + } + return fmt.Errorf("unknown Document field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *DocumentMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.group != nil { + edges = append(edges, document.EdgeGroup) + } + if m.document_tokens != nil { + edges = append(edges, document.EdgeDocumentTokens) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *DocumentMutation) AddedIDs(name string) []ent.Value { + switch name { + case document.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + case document.EdgeDocumentTokens: + ids := make([]ent.Value, 0, len(m.document_tokens)) + for id := range m.document_tokens { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *DocumentMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + if m.removeddocument_tokens != nil { + edges = append(edges, document.EdgeDocumentTokens) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *DocumentMutation) RemovedIDs(name string) []ent.Value { + switch name { + case document.EdgeDocumentTokens: + ids := make([]ent.Value, 0, len(m.removeddocument_tokens)) + for id := range m.removeddocument_tokens { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *DocumentMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedgroup { + edges = append(edges, document.EdgeGroup) + } + if m.cleareddocument_tokens { + edges = append(edges, document.EdgeDocumentTokens) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *DocumentMutation) EdgeCleared(name string) bool { + switch name { + case document.EdgeGroup: + return m.clearedgroup + case document.EdgeDocumentTokens: + return m.cleareddocument_tokens + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *DocumentMutation) ClearEdge(name string) error { + switch name { + case document.EdgeGroup: + m.ClearGroup() + return nil + } + return fmt.Errorf("unknown Document unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *DocumentMutation) ResetEdge(name string) error { + switch name { + case document.EdgeGroup: + m.ResetGroup() + return nil + case document.EdgeDocumentTokens: + m.ResetDocumentTokens() + return nil + } + return fmt.Errorf("unknown Document edge %s", name) +} + +// DocumentTokenMutation represents an operation that mutates the DocumentToken nodes in the graph. +type DocumentTokenMutation struct { + config + op Op + typ string + id *uuid.UUID + created_at *time.Time + updated_at *time.Time + token *[]byte + uses *int + adduses *int + expires_at *time.Time + clearedFields map[string]struct{} + document *uuid.UUID + cleareddocument bool + done bool + oldValue func(context.Context) (*DocumentToken, error) + predicates []predicate.DocumentToken +} + +var _ ent.Mutation = (*DocumentTokenMutation)(nil) + +// documenttokenOption allows management of the mutation configuration using functional options. +type documenttokenOption func(*DocumentTokenMutation) + +// newDocumentTokenMutation creates new mutation for the DocumentToken entity. +func newDocumentTokenMutation(c config, op Op, opts ...documenttokenOption) *DocumentTokenMutation { + m := &DocumentTokenMutation{ + config: c, + op: op, + typ: TypeDocumentToken, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withDocumentTokenID sets the ID field of the mutation. +func withDocumentTokenID(id uuid.UUID) documenttokenOption { + return func(m *DocumentTokenMutation) { + var ( + err error + once sync.Once + value *DocumentToken + ) + m.oldValue = func(ctx context.Context) (*DocumentToken, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().DocumentToken.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withDocumentToken sets the old DocumentToken of the mutation. +func withDocumentToken(node *DocumentToken) documenttokenOption { + return func(m *DocumentTokenMutation) { + m.oldValue = func(context.Context) (*DocumentToken, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m DocumentTokenMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m DocumentTokenMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of DocumentToken entities. +func (m *DocumentTokenMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *DocumentTokenMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *DocumentTokenMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().DocumentToken.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *DocumentTokenMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *DocumentTokenMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the DocumentToken entity. +// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentTokenMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *DocumentTokenMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *DocumentTokenMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *DocumentTokenMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the DocumentToken entity. +// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentTokenMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *DocumentTokenMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetToken sets the "token" field. +func (m *DocumentTokenMutation) SetToken(b []byte) { + m.token = &b +} + +// Token returns the value of the "token" field in the mutation. +func (m *DocumentTokenMutation) Token() (r []byte, exists bool) { + v := m.token + if v == nil { + return + } + return *v, true +} + +// OldToken returns the old "token" field's value of the DocumentToken entity. +// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentTokenMutation) OldToken(ctx context.Context) (v []byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldToken is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldToken requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldToken: %w", err) + } + return oldValue.Token, nil +} + +// ResetToken resets all changes to the "token" field. +func (m *DocumentTokenMutation) ResetToken() { + m.token = nil +} + +// SetUses sets the "uses" field. +func (m *DocumentTokenMutation) SetUses(i int) { + m.uses = &i + m.adduses = nil +} + +// Uses returns the value of the "uses" field in the mutation. +func (m *DocumentTokenMutation) Uses() (r int, exists bool) { + v := m.uses + if v == nil { + return + } + return *v, true +} + +// OldUses returns the old "uses" field's value of the DocumentToken entity. +// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentTokenMutation) OldUses(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUses is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUses requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUses: %w", err) + } + return oldValue.Uses, nil +} + +// AddUses adds i to the "uses" field. +func (m *DocumentTokenMutation) AddUses(i int) { + if m.adduses != nil { + *m.adduses += i + } else { + m.adduses = &i + } +} + +// AddedUses returns the value that was added to the "uses" field in this mutation. +func (m *DocumentTokenMutation) AddedUses() (r int, exists bool) { + v := m.adduses + if v == nil { + return + } + return *v, true +} + +// ResetUses resets all changes to the "uses" field. +func (m *DocumentTokenMutation) ResetUses() { + m.uses = nil + m.adduses = nil +} + +// SetExpiresAt sets the "expires_at" field. +func (m *DocumentTokenMutation) SetExpiresAt(t time.Time) { + m.expires_at = &t +} + +// ExpiresAt returns the value of the "expires_at" field in the mutation. +func (m *DocumentTokenMutation) ExpiresAt() (r time.Time, exists bool) { + v := m.expires_at + if v == nil { + return + } + return *v, true +} + +// OldExpiresAt returns the old "expires_at" field's value of the DocumentToken entity. +// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentTokenMutation) OldExpiresAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiresAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiresAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiresAt: %w", err) + } + return oldValue.ExpiresAt, nil +} + +// ResetExpiresAt resets all changes to the "expires_at" field. +func (m *DocumentTokenMutation) ResetExpiresAt() { + m.expires_at = nil +} + +// SetDocumentID sets the "document" edge to the Document entity by id. +func (m *DocumentTokenMutation) SetDocumentID(id uuid.UUID) { + m.document = &id +} + +// ClearDocument clears the "document" edge to the Document entity. +func (m *DocumentTokenMutation) ClearDocument() { + m.cleareddocument = true +} + +// DocumentCleared reports if the "document" edge to the Document entity was cleared. +func (m *DocumentTokenMutation) DocumentCleared() bool { + return m.cleareddocument +} + +// DocumentID returns the "document" edge ID in the mutation. +func (m *DocumentTokenMutation) DocumentID() (id uuid.UUID, exists bool) { + if m.document != nil { + return *m.document, true + } + return +} + +// DocumentIDs returns the "document" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// DocumentID instead. It exists only for internal usage by the builders. +func (m *DocumentTokenMutation) DocumentIDs() (ids []uuid.UUID) { + if id := m.document; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetDocument resets all changes to the "document" edge. +func (m *DocumentTokenMutation) ResetDocument() { + m.document = nil + m.cleareddocument = false +} + +// Where appends a list predicates to the DocumentTokenMutation builder. +func (m *DocumentTokenMutation) Where(ps ...predicate.DocumentToken) { + m.predicates = append(m.predicates, ps...) +} + +// Op returns the operation name. +func (m *DocumentTokenMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (DocumentToken). +func (m *DocumentTokenMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *DocumentTokenMutation) Fields() []string { + fields := make([]string, 0, 5) + if m.created_at != nil { + fields = append(fields, documenttoken.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, documenttoken.FieldUpdatedAt) + } + if m.token != nil { + fields = append(fields, documenttoken.FieldToken) + } + if m.uses != nil { + fields = append(fields, documenttoken.FieldUses) + } + if m.expires_at != nil { + fields = append(fields, documenttoken.FieldExpiresAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *DocumentTokenMutation) Field(name string) (ent.Value, bool) { + switch name { + case documenttoken.FieldCreatedAt: + return m.CreatedAt() + case documenttoken.FieldUpdatedAt: + return m.UpdatedAt() + case documenttoken.FieldToken: + return m.Token() + case documenttoken.FieldUses: + return m.Uses() + case documenttoken.FieldExpiresAt: + return m.ExpiresAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *DocumentTokenMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case documenttoken.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case documenttoken.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case documenttoken.FieldToken: + return m.OldToken(ctx) + case documenttoken.FieldUses: + return m.OldUses(ctx) + case documenttoken.FieldExpiresAt: + return m.OldExpiresAt(ctx) + } + return nil, fmt.Errorf("unknown DocumentToken field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DocumentTokenMutation) SetField(name string, value ent.Value) error { + switch name { + case documenttoken.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case documenttoken.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case documenttoken.FieldToken: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetToken(v) + return nil + case documenttoken.FieldUses: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUses(v) + return nil + case documenttoken.FieldExpiresAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiresAt(v) + return nil + } + return fmt.Errorf("unknown DocumentToken field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *DocumentTokenMutation) AddedFields() []string { + var fields []string + if m.adduses != nil { + fields = append(fields, documenttoken.FieldUses) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *DocumentTokenMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case documenttoken.FieldUses: + return m.AddedUses() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DocumentTokenMutation) AddField(name string, value ent.Value) error { + switch name { + case documenttoken.FieldUses: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddUses(v) + return nil + } + return fmt.Errorf("unknown DocumentToken numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *DocumentTokenMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *DocumentTokenMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *DocumentTokenMutation) ClearField(name string) error { + return fmt.Errorf("unknown DocumentToken nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *DocumentTokenMutation) ResetField(name string) error { + switch name { + case documenttoken.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case documenttoken.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case documenttoken.FieldToken: + m.ResetToken() + return nil + case documenttoken.FieldUses: + m.ResetUses() + return nil + case documenttoken.FieldExpiresAt: + m.ResetExpiresAt() + return nil + } + return fmt.Errorf("unknown DocumentToken field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *DocumentTokenMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.document != nil { + edges = append(edges, documenttoken.EdgeDocument) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *DocumentTokenMutation) AddedIDs(name string) []ent.Value { + switch name { + case documenttoken.EdgeDocument: + if id := m.document; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *DocumentTokenMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *DocumentTokenMutation) RemovedIDs(name string) []ent.Value { + switch name { + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *DocumentTokenMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.cleareddocument { + edges = append(edges, documenttoken.EdgeDocument) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *DocumentTokenMutation) EdgeCleared(name string) bool { + switch name { + case documenttoken.EdgeDocument: + return m.cleareddocument + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *DocumentTokenMutation) ClearEdge(name string) error { + switch name { + case documenttoken.EdgeDocument: + m.ClearDocument() + return nil + } + return fmt.Errorf("unknown DocumentToken unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *DocumentTokenMutation) ResetEdge(name string) error { + switch name { + case documenttoken.EdgeDocument: + m.ResetDocument() + return nil + } + return fmt.Errorf("unknown DocumentToken edge %s", name) +} + // GroupMutation represents an operation that mutates the Group nodes in the graph. type GroupMutation struct { config @@ -611,6 +1884,9 @@ type GroupMutation struct { labels map[uuid.UUID]struct{} removedlabels map[uuid.UUID]struct{} clearedlabels bool + documents map[uuid.UUID]struct{} + removeddocuments map[uuid.UUID]struct{} + cleareddocuments bool done bool oldValue func(context.Context) (*Group, error) predicates []predicate.Group @@ -1080,6 +2356,60 @@ func (m *GroupMutation) ResetLabels() { m.removedlabels = nil } +// AddDocumentIDs adds the "documents" edge to the Document entity by ids. +func (m *GroupMutation) AddDocumentIDs(ids ...uuid.UUID) { + if m.documents == nil { + m.documents = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.documents[ids[i]] = struct{}{} + } +} + +// ClearDocuments clears the "documents" edge to the Document entity. +func (m *GroupMutation) ClearDocuments() { + m.cleareddocuments = true +} + +// DocumentsCleared reports if the "documents" edge to the Document entity was cleared. +func (m *GroupMutation) DocumentsCleared() bool { + return m.cleareddocuments +} + +// RemoveDocumentIDs removes the "documents" edge to the Document entity by IDs. +func (m *GroupMutation) RemoveDocumentIDs(ids ...uuid.UUID) { + if m.removeddocuments == nil { + m.removeddocuments = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.documents, ids[i]) + m.removeddocuments[ids[i]] = struct{}{} + } +} + +// RemovedDocuments returns the removed IDs of the "documents" edge to the Document entity. +func (m *GroupMutation) RemovedDocumentsIDs() (ids []uuid.UUID) { + for id := range m.removeddocuments { + ids = append(ids, id) + } + return +} + +// DocumentsIDs returns the "documents" edge IDs in the mutation. +func (m *GroupMutation) DocumentsIDs() (ids []uuid.UUID) { + for id := range m.documents { + ids = append(ids, id) + } + return +} + +// ResetDocuments resets all changes to the "documents" edge. +func (m *GroupMutation) ResetDocuments() { + m.documents = nil + m.cleareddocuments = false + m.removeddocuments = nil +} + // Where appends a list predicates to the GroupMutation builder. func (m *GroupMutation) Where(ps ...predicate.Group) { m.predicates = append(m.predicates, ps...) @@ -1249,7 +2579,7 @@ func (m *GroupMutation) ResetField(name string) error { // AddedEdges returns all edge names that were set/added in this mutation. func (m *GroupMutation) AddedEdges() []string { - edges := make([]string, 0, 4) + edges := make([]string, 0, 5) if m.users != nil { edges = append(edges, group.EdgeUsers) } @@ -1262,6 +2592,9 @@ func (m *GroupMutation) AddedEdges() []string { if m.labels != nil { edges = append(edges, group.EdgeLabels) } + if m.documents != nil { + edges = append(edges, group.EdgeDocuments) + } return edges } @@ -1293,13 +2626,19 @@ func (m *GroupMutation) AddedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case group.EdgeDocuments: + ids := make([]ent.Value, 0, len(m.documents)) + for id := range m.documents { + ids = append(ids, id) + } + return ids } return nil } // RemovedEdges returns all edge names that were removed in this mutation. func (m *GroupMutation) RemovedEdges() []string { - edges := make([]string, 0, 4) + edges := make([]string, 0, 5) if m.removedusers != nil { edges = append(edges, group.EdgeUsers) } @@ -1312,6 +2651,9 @@ func (m *GroupMutation) RemovedEdges() []string { if m.removedlabels != nil { edges = append(edges, group.EdgeLabels) } + if m.removeddocuments != nil { + edges = append(edges, group.EdgeDocuments) + } return edges } @@ -1343,13 +2685,19 @@ func (m *GroupMutation) RemovedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case group.EdgeDocuments: + ids := make([]ent.Value, 0, len(m.removeddocuments)) + for id := range m.removeddocuments { + ids = append(ids, id) + } + return ids } return nil } // ClearedEdges returns all edge names that were cleared in this mutation. func (m *GroupMutation) ClearedEdges() []string { - edges := make([]string, 0, 4) + edges := make([]string, 0, 5) if m.clearedusers { edges = append(edges, group.EdgeUsers) } @@ -1362,6 +2710,9 @@ func (m *GroupMutation) ClearedEdges() []string { if m.clearedlabels { edges = append(edges, group.EdgeLabels) } + if m.cleareddocuments { + edges = append(edges, group.EdgeDocuments) + } return edges } @@ -1377,6 +2728,8 @@ func (m *GroupMutation) EdgeCleared(name string) bool { return m.cleareditems case group.EdgeLabels: return m.clearedlabels + case group.EdgeDocuments: + return m.cleareddocuments } return false } @@ -1405,6 +2758,9 @@ func (m *GroupMutation) ResetEdge(name string) error { case group.EdgeLabels: m.ResetLabels() return nil + case group.EdgeDocuments: + m.ResetDocuments() + return nil } return fmt.Errorf("unknown Group edge %s", name) } diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go index 6053082..7d96d7f 100644 --- a/backend/ent/predicate/predicate.go +++ b/backend/ent/predicate/predicate.go @@ -9,6 +9,12 @@ import ( // AuthTokens is the predicate function for authtokens builders. type AuthTokens func(*sql.Selector) +// Document is the predicate function for document builders. +type Document func(*sql.Selector) + +// DocumentToken is the predicate function for documenttoken builders. +type DocumentToken func(*sql.Selector) + // Group is the predicate function for group builders. type Group func(*sql.Selector) diff --git a/backend/ent/runtime.go b/backend/ent/runtime.go index 6a5b8df..d5d5eb8 100644 --- a/backend/ent/runtime.go +++ b/backend/ent/runtime.go @@ -7,6 +7,8 @@ import ( "github.com/google/uuid" "github.com/hay-kot/content/backend/ent/authtokens" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/itemfield" @@ -43,6 +45,92 @@ func init() { authtokensDescID := authtokensMixinFields0[0].Descriptor() // authtokens.DefaultID holds the default value on creation for the id field. authtokens.DefaultID = authtokensDescID.Default.(func() uuid.UUID) + documentMixin := schema.Document{}.Mixin() + documentMixinFields0 := documentMixin[0].Fields() + _ = documentMixinFields0 + documentFields := schema.Document{}.Fields() + _ = documentFields + // documentDescCreatedAt is the schema descriptor for created_at field. + documentDescCreatedAt := documentMixinFields0[1].Descriptor() + // document.DefaultCreatedAt holds the default value on creation for the created_at field. + document.DefaultCreatedAt = documentDescCreatedAt.Default.(func() time.Time) + // documentDescUpdatedAt is the schema descriptor for updated_at field. + documentDescUpdatedAt := documentMixinFields0[2].Descriptor() + // document.DefaultUpdatedAt holds the default value on creation for the updated_at field. + document.DefaultUpdatedAt = documentDescUpdatedAt.Default.(func() time.Time) + // document.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + document.UpdateDefaultUpdatedAt = documentDescUpdatedAt.UpdateDefault.(func() time.Time) + // documentDescTitle is the schema descriptor for title field. + documentDescTitle := documentFields[0].Descriptor() + // document.TitleValidator is a validator for the "title" field. It is called by the builders before save. + document.TitleValidator = func() func(string) error { + validators := documentDescTitle.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(title string) error { + for _, fn := range fns { + if err := fn(title); err != nil { + return err + } + } + return nil + } + }() + // documentDescPath is the schema descriptor for path field. + documentDescPath := documentFields[1].Descriptor() + // document.PathValidator is a validator for the "path" field. It is called by the builders before save. + document.PathValidator = func() func(string) error { + validators := documentDescPath.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(_path string) error { + for _, fn := range fns { + if err := fn(_path); err != nil { + return err + } + } + return nil + } + }() + // documentDescID is the schema descriptor for id field. + documentDescID := documentMixinFields0[0].Descriptor() + // document.DefaultID holds the default value on creation for the id field. + document.DefaultID = documentDescID.Default.(func() uuid.UUID) + documenttokenMixin := schema.DocumentToken{}.Mixin() + documenttokenMixinFields0 := documenttokenMixin[0].Fields() + _ = documenttokenMixinFields0 + documenttokenFields := schema.DocumentToken{}.Fields() + _ = documenttokenFields + // documenttokenDescCreatedAt is the schema descriptor for created_at field. + documenttokenDescCreatedAt := documenttokenMixinFields0[1].Descriptor() + // documenttoken.DefaultCreatedAt holds the default value on creation for the created_at field. + documenttoken.DefaultCreatedAt = documenttokenDescCreatedAt.Default.(func() time.Time) + // documenttokenDescUpdatedAt is the schema descriptor for updated_at field. + documenttokenDescUpdatedAt := documenttokenMixinFields0[2].Descriptor() + // documenttoken.DefaultUpdatedAt holds the default value on creation for the updated_at field. + documenttoken.DefaultUpdatedAt = documenttokenDescUpdatedAt.Default.(func() time.Time) + // documenttoken.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + documenttoken.UpdateDefaultUpdatedAt = documenttokenDescUpdatedAt.UpdateDefault.(func() time.Time) + // documenttokenDescToken is the schema descriptor for token field. + documenttokenDescToken := documenttokenFields[0].Descriptor() + // documenttoken.TokenValidator is a validator for the "token" field. It is called by the builders before save. + documenttoken.TokenValidator = documenttokenDescToken.Validators[0].(func([]byte) error) + // documenttokenDescUses is the schema descriptor for uses field. + documenttokenDescUses := documenttokenFields[1].Descriptor() + // documenttoken.DefaultUses holds the default value on creation for the uses field. + documenttoken.DefaultUses = documenttokenDescUses.Default.(int) + // documenttokenDescExpiresAt is the schema descriptor for expires_at field. + documenttokenDescExpiresAt := documenttokenFields[2].Descriptor() + // documenttoken.DefaultExpiresAt holds the default value on creation for the expires_at field. + documenttoken.DefaultExpiresAt = documenttokenDescExpiresAt.Default.(func() time.Time) + // documenttokenDescID is the schema descriptor for id field. + documenttokenDescID := documenttokenMixinFields0[0].Descriptor() + // documenttoken.DefaultID holds the default value on creation for the id field. + documenttoken.DefaultID = documenttokenDescID.Default.(func() uuid.UUID) groupMixin := schema.Group{}.Mixin() groupMixinFields0 := groupMixin[0].Fields() _ = groupMixinFields0 diff --git a/backend/ent/schema/auth_tokens.go b/backend/ent/schema/auth_tokens.go index 09297d0..9063581 100644 --- a/backend/ent/schema/auth_tokens.go +++ b/backend/ent/schema/auth_tokens.go @@ -42,7 +42,6 @@ func (AuthTokens) Edges() []ent.Edge { func (AuthTokens) Indexes() []ent.Index { return []ent.Index{ - // non-unique index. index.Fields("token"), } } diff --git a/backend/ent/schema/document.go b/backend/ent/schema/document.go new file mode 100644 index 0000000..4a34b92 --- /dev/null +++ b/backend/ent/schema/document.go @@ -0,0 +1,46 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "github.com/hay-kot/content/backend/ent/schema/mixins" +) + +// Document holds the schema definition for the Document entity. +type Document struct { + ent.Schema +} + +func (Document) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.BaseMixin{}, + } +} + +// Fields of the Document. +func (Document) Fields() []ent.Field { + return []ent.Field{ + field.String("title"). + MaxLen(255). + NotEmpty(), + field.String("path"). + MaxLen(500). + NotEmpty(), + } +} + +// Edges of the Document. +func (Document) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("group", Group.Type). + Ref("documents"). + Required(). + Unique(), + edge.To("document_tokens", DocumentToken.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + } +} diff --git a/backend/ent/schema/document_token.go b/backend/ent/schema/document_token.go new file mode 100644 index 0000000..8a4d10a --- /dev/null +++ b/backend/ent/schema/document_token.go @@ -0,0 +1,50 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + "github.com/hay-kot/content/backend/ent/schema/mixins" +) + +// DocumentToken holds the schema definition for the DocumentToken entity. +type DocumentToken struct { + ent.Schema +} + +func (DocumentToken) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.BaseMixin{}, + } +} + +// Fields of the DocumentToken. +func (DocumentToken) Fields() []ent.Field { + return []ent.Field{ + field.Bytes("token"). + NotEmpty(). + Unique(), + field.Int("uses"). + Default(1), + field.Time("expires_at"). + Default(func() time.Time { return time.Now().Add(time.Minute * 10) }), + } +} + +// Edges of the DocumentToken. +func (DocumentToken) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("document", Document.Type). + Ref("document_tokens"). + Unique(), + } +} + +func (DocumentToken) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("token"), + } +} diff --git a/backend/ent/schema/group.go b/backend/ent/schema/group.go index 68c3b99..a5c863e 100644 --- a/backend/ent/schema/group.go +++ b/backend/ent/schema/group.go @@ -34,17 +34,25 @@ func (Group) Fields() []ent.Field { // Edges of the Home. func (Group) Edges() []ent.Edge { return []ent.Edge{ - edge.To("users", User.Type).Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), - edge.To("locations", Location.Type).Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), - edge.To("items", Item.Type).Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), - edge.To("labels", Label.Type).Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), + edge.To("users", User.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + edge.To("locations", Location.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + edge.To("items", Item.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + edge.To("labels", Label.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + edge.To("documents", Document.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), } } diff --git a/backend/ent/tx.go b/backend/ent/tx.go index 69d1007..6dc9a55 100644 --- a/backend/ent/tx.go +++ b/backend/ent/tx.go @@ -14,6 +14,10 @@ type Tx struct { config // AuthTokens is the client for interacting with the AuthTokens builders. AuthTokens *AuthTokensClient + // Document is the client for interacting with the Document builders. + Document *DocumentClient + // DocumentToken is the client for interacting with the DocumentToken builders. + DocumentToken *DocumentTokenClient // Group is the client for interacting with the Group builders. Group *GroupClient // Item is the client for interacting with the Item builders. @@ -162,6 +166,8 @@ func (tx *Tx) Client() *Client { func (tx *Tx) init() { tx.AuthTokens = NewAuthTokensClient(tx.config) + tx.Document = NewDocumentClient(tx.config) + tx.DocumentToken = NewDocumentTokenClient(tx.config) tx.Group = NewGroupClient(tx.config) tx.Item = NewItemClient(tx.config) tx.ItemField = NewItemFieldClient(tx.config) diff --git a/backend/internal/mocks/factories/users.go b/backend/internal/mocks/factories/users.go index 438b2d8..1265768 100644 --- a/backend/internal/mocks/factories/users.go +++ b/backend/internal/mocks/factories/users.go @@ -8,9 +8,9 @@ import ( func UserFactory() types.UserCreate { f := faker.NewFaker() return types.UserCreate{ - Name: f.RandomString(10), - Email: f.RandomEmail(), - Password: f.RandomString(10), - IsSuperuser: f.RandomBool(), + Name: f.Str(10), + Email: f.Email(), + Password: f.Str(10), + IsSuperuser: f.Bool(), } } diff --git a/backend/internal/repo/repo_documents.go b/backend/internal/repo/repo_documents.go new file mode 100644 index 0000000..9cb4c0f --- /dev/null +++ b/backend/internal/repo/repo_documents.go @@ -0,0 +1,47 @@ +package repo + +import ( + "context" + + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/group" + "github.com/hay-kot/content/backend/internal/types" +) + +// DocumentRepository is a repository for Document entity +type DocumentRepository struct { + db *ent.Client +} + +func (r *DocumentRepository) Create(ctx context.Context, gid uuid.UUID, doc types.DocumentCreate) (*ent.Document, error) { + return r.db.Document.Create(). + SetGroupID(gid). + SetTitle(doc.Title). + SetPath(doc.Path). + Save(ctx) +} + +func (r *DocumentRepository) GetAll(ctx context.Context, gid uuid.UUID) ([]*ent.Document, error) { + return r.db.Document.Query(). + Where(document.HasGroupWith(group.ID(gid))). + All(ctx) +} + +func (r *DocumentRepository) Get(ctx context.Context, id uuid.UUID) (*ent.Document, error) { + return r.db.Document.Query(). + Where(document.ID(id)). + Only(ctx) +} + +func (r *DocumentRepository) Update(ctx context.Context, id uuid.UUID, doc types.DocumentUpdate) (*ent.Document, error) { + return r.db.Document.UpdateOneID(id). + SetTitle(doc.Title). + SetPath(doc.Path). + Save(ctx) +} + +func (r *DocumentRepository) Delete(ctx context.Context, id uuid.UUID) error { + return r.db.Document.DeleteOneID(id).Exec(ctx) +} diff --git a/backend/internal/repo/repo_documents_test.go b/backend/internal/repo/repo_documents_test.go new file mode 100644 index 0000000..187ca0f --- /dev/null +++ b/backend/internal/repo/repo_documents_test.go @@ -0,0 +1,202 @@ +package repo + +import ( + "context" + "testing" + + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent" + "github.com/hay-kot/content/backend/internal/types" + "github.com/stretchr/testify/assert" +) + +func TestDocumentRepository_Create(t *testing.T) { + type args struct { + ctx context.Context + gid uuid.UUID + doc types.DocumentCreate + } + tests := []struct { + name string + args args + want *ent.Document + wantErr bool + }{ + { + name: "create document", + args: args{ + ctx: context.Background(), + gid: tGroup.ID, + doc: types.DocumentCreate{ + Title: "test document", + Path: "/test/document", + }, + }, + want: &ent.Document{ + Title: "test document", + Path: "/test/document", + }, + wantErr: false, + }, + { + name: "create document with empty title", + args: args{ + ctx: context.Background(), + gid: tGroup.ID, + doc: types.DocumentCreate{ + Title: "", + Path: "/test/document", + }, + }, + want: nil, + wantErr: true, + }, + { + name: "create document with empty path", + args: args{ + ctx: context.Background(), + gid: tGroup.ID, + doc: types.DocumentCreate{ + Title: "test document", + Path: "", + }, + }, + want: nil, + wantErr: true, + }, + } + ids := make([]uuid.UUID, 0, len(tests)) + + t.Cleanup(func() { + for _, id := range ids { + err := tRepos.Docs.Delete(context.Background(), id) + assert.NoError(t, err) + } + }) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tRepos.Docs.Create(tt.args.ctx, tt.args.gid, tt.args.doc) + if (err != nil) != tt.wantErr { + t.Errorf("DocumentRepository.Create() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, got) + return + } + + assert.Equal(t, tt.want.Title, got.Title) + assert.Equal(t, tt.want.Path, got.Path) + ids = append(ids, got.ID) + }) + } +} + +func useDocs(t *testing.T, num int) []*ent.Document { + t.Helper() + + results := make([]*ent.Document, 0, num) + ids := make([]uuid.UUID, 0, num) + + for i := 0; i < num; i++ { + doc, err := tRepos.Docs.Create(context.Background(), tGroup.ID, types.DocumentCreate{ + Title: fk.Str(10), + Path: fk.Path(), + }) + + assert.NoError(t, err) + assert.NotNil(t, doc) + results = append(results, doc) + ids = append(ids, doc.ID) + } + + t.Cleanup(func() { + for _, id := range ids { + err := tRepos.Docs.Delete(context.Background(), id) + + if err != nil { + assert.True(t, ent.IsNotFound(err)) + } + } + }) + + return results +} + +func TestDocumentRepository_GetAll(t *testing.T) { + entities := useDocs(t, 10) + + for _, entity := range entities { + assert.NotNil(t, entity) + } + + all, err := tRepos.Docs.GetAll(context.Background(), tGroup.ID) + assert.NoError(t, err) + + assert.Len(t, all, 10) + for _, entity := range all { + assert.NotNil(t, entity) + + for _, e := range entities { + if e.ID == entity.ID { + assert.Equal(t, e.Title, entity.Title) + assert.Equal(t, e.Path, entity.Path) + } + } + } +} + +func TestDocumentRepository_Get(t *testing.T) { + entities := useDocs(t, 10) + + for _, entity := range entities { + got, err := tRepos.Docs.Get(context.Background(), entity.ID) + + assert.NoError(t, err) + assert.Equal(t, entity.ID, got.ID) + assert.Equal(t, entity.Title, got.Title) + assert.Equal(t, entity.Path, got.Path) + } +} + +func TestDocumentRepository_Update(t *testing.T) { + entities := useDocs(t, 10) + + for _, entity := range entities { + got, err := tRepos.Docs.Get(context.Background(), entity.ID) + + assert.NoError(t, err) + assert.Equal(t, entity.ID, got.ID) + assert.Equal(t, entity.Title, got.Title) + assert.Equal(t, entity.Path, got.Path) + } + + for _, entity := range entities { + updateData := types.DocumentUpdate{ + Title: fk.Str(10), + Path: fk.Path(), + } + + updated, err := tRepos.Docs.Update(context.Background(), entity.ID, updateData) + + assert.NoError(t, err) + assert.Equal(t, entity.ID, updated.ID) + assert.Equal(t, updateData.Title, updated.Title) + assert.Equal(t, updateData.Path, updated.Path) + } +} + +func TestDocumentRepository_Delete(t *testing.T) { + entities := useDocs(t, 10) + + for _, entity := range entities { + err := tRepos.Docs.Delete(context.Background(), entity.ID) + assert.NoError(t, err) + + _, err = tRepos.Docs.Get(context.Background(), entity.ID) + assert.Error(t, err) + } +} diff --git a/backend/internal/repo/repo_documents_tokens.go b/backend/internal/repo/repo_documents_tokens.go new file mode 100644 index 0000000..7c260c4 --- /dev/null +++ b/backend/internal/repo/repo_documents_tokens.go @@ -0,0 +1,41 @@ +package repo + +import ( + "context" + "time" + + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/internal/types" +) + +// DocumentTokensRepository is a repository for Document entity +type DocumentTokensRepository struct { + db *ent.Client +} + +func (r *DocumentTokensRepository) Create(ctx context.Context, data types.DocumentTokenCreate) (*ent.DocumentToken, error) { + result, err := r.db.DocumentToken.Create(). + SetDocumentID(data.DocumentID). + SetToken(data.TokenHash). + SetExpiresAt(data.ExpiresAt). + Save(ctx) + + if err != nil { + return nil, err + } + + return r.db.DocumentToken.Query(). + Where(documenttoken.ID(result.ID)). + WithDocument(). + Only(ctx) +} + +func (r *DocumentTokensRepository) PurgeExpiredTokens(ctx context.Context) (int, error) { + return r.db.DocumentToken.Delete().Where(documenttoken.ExpiresAtLT(time.Now())).Exec(ctx) +} + +func (r *DocumentTokensRepository) Delete(ctx context.Context, id uuid.UUID) error { + return r.db.DocumentToken.DeleteOneID(id).Exec(ctx) +} diff --git a/backend/internal/repo/repo_documents_tokens_test.go b/backend/internal/repo/repo_documents_tokens_test.go new file mode 100644 index 0000000..7106253 --- /dev/null +++ b/backend/internal/repo/repo_documents_tokens_test.go @@ -0,0 +1,149 @@ +package repo + +import ( + "context" + "testing" + "time" + + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/internal/types" + "github.com/stretchr/testify/assert" +) + +func TestDocumentTokensRepository_Create(t *testing.T) { + entities := useDocs(t, 1) + doc := entities[0] + expires := fk.Time() + + type args struct { + ctx context.Context + data types.DocumentTokenCreate + } + tests := []struct { + name string + args args + want *ent.DocumentToken + wantErr bool + }{ + { + name: "create document token", + args: args{ + ctx: context.Background(), + data: types.DocumentTokenCreate{ + DocumentID: doc.ID, + TokenHash: []byte("token"), + ExpiresAt: expires, + }, + }, + want: &ent.DocumentToken{ + Edges: ent.DocumentTokenEdges{ + Document: doc, + }, + Token: []byte("token"), + ExpiresAt: expires, + }, + wantErr: false, + }, + { + name: "create document token with empty token", + args: args{ + ctx: context.Background(), + data: types.DocumentTokenCreate{ + DocumentID: doc.ID, + TokenHash: []byte(""), + ExpiresAt: expires, + }, + }, + want: nil, + wantErr: true, + }, + { + name: "create document token with empty document id", + args: args{ + ctx: context.Background(), + data: types.DocumentTokenCreate{ + DocumentID: uuid.Nil, + TokenHash: []byte("token"), + ExpiresAt: expires, + }, + }, + want: nil, + wantErr: true, + }, + } + + ids := make([]uuid.UUID, 0, len(tests)) + + t.Cleanup(func() { + for _, id := range ids { + _ = tRepos.DocTokens.Delete(context.Background(), id) + } + }) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + got, err := tRepos.DocTokens.Create(tt.args.ctx, tt.args.data) + if (err != nil) != tt.wantErr { + t.Errorf("DocumentTokensRepository.Create() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tt.wantErr { + return + } + + assert.Equal(t, tt.want.Token, got.Token) + assert.WithinDuration(t, tt.want.ExpiresAt, got.ExpiresAt, time.Duration(1)*time.Second) + assert.Equal(t, tt.want.Edges.Document.ID, got.Edges.Document.ID) + }) + + } +} + +func useDocTokens(t *testing.T, num int) []*ent.DocumentToken { + entity := useDocs(t, 1)[0] + + results := make([]*ent.DocumentToken, 0, num) + + ids := make([]uuid.UUID, 0, num) + t.Cleanup(func() { + for _, id := range ids { + _ = tRepos.DocTokens.Delete(context.Background(), id) + } + }) + + for i := 0; i < num; i++ { + e, err := tRepos.DocTokens.Create(context.Background(), types.DocumentTokenCreate{ + DocumentID: entity.ID, + TokenHash: []byte(fk.Str(10)), + ExpiresAt: fk.Time(), + }) + + assert.NoError(t, err) + results = append(results, e) + ids = append(ids, e.ID) + } + + return results +} + +func TestDocumentTokensRepository_PurgeExpiredTokens(t *testing.T) { + entities := useDocTokens(t, 2) + + // set expired token + tRepos.DocTokens.db.DocumentToken.Update(). + Where(documenttoken.ID(entities[0].ID)). + SetExpiresAt(time.Now().Add(-time.Hour)). + ExecX(context.Background()) + + count, err := tRepos.DocTokens.PurgeExpiredTokens(context.Background()) + assert.NoError(t, err) + assert.Equal(t, 1, count) + + all, err := tRepos.DocTokens.db.DocumentToken.Query().All(context.Background()) + assert.NoError(t, err) + assert.Len(t, all, 1) + assert.Equal(t, entities[1].ID, all[0].ID) +} diff --git a/backend/internal/repo/repo_items_test.go b/backend/internal/repo/repo_items_test.go index 2768dc4..e3fc078 100644 --- a/backend/internal/repo/repo_items_test.go +++ b/backend/internal/repo/repo_items_test.go @@ -12,8 +12,8 @@ import ( func itemFactory() types.ItemCreate { return types.ItemCreate{ - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), } } @@ -141,20 +141,20 @@ func TestItemsRepository_Update(t *testing.T) { ID: entity.ID, Name: entity.Name, LocationID: entity.Edges.Location.ID, - SerialNumber: fk.RandomString(10), + SerialNumber: fk.Str(10), LabelIDs: nil, - ModelNumber: fk.RandomString(10), - Manufacturer: fk.RandomString(10), + ModelNumber: fk.Str(10), + Manufacturer: fk.Str(10), PurchaseTime: time.Now(), - PurchaseFrom: fk.RandomString(10), + PurchaseFrom: fk.Str(10), PurchasePrice: 300.99, SoldTime: time.Now(), - SoldTo: fk.RandomString(10), + SoldTo: fk.Str(10), SoldPrice: 300.99, - SoldNotes: fk.RandomString(10), - Notes: fk.RandomString(10), + SoldNotes: fk.Str(10), + Notes: fk.Str(10), WarrantyExpires: time.Now(), - WarrantyDetails: fk.RandomString(10), + WarrantyDetails: fk.Str(10), LifetimeWarranty: true, } diff --git a/backend/internal/repo/repo_labels_test.go b/backend/internal/repo/repo_labels_test.go index f647753..81a6f68 100644 --- a/backend/internal/repo/repo_labels_test.go +++ b/backend/internal/repo/repo_labels_test.go @@ -11,8 +11,8 @@ import ( func labelFactory() types.LabelCreate { return types.LabelCreate{ - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), } } @@ -75,8 +75,8 @@ func TestLabelRepository_Update(t *testing.T) { updateData := types.LabelUpdate{ ID: loc.ID, - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), } update, err := tRepos.Labels.Update(context.Background(), updateData) diff --git a/backend/internal/repo/repo_locations_test.go b/backend/internal/repo/repo_locations_test.go index e8d2f54..9370305 100644 --- a/backend/internal/repo/repo_locations_test.go +++ b/backend/internal/repo/repo_locations_test.go @@ -10,8 +10,8 @@ import ( func locationFactory() types.LocationCreate { return types.LocationCreate{ - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), } } @@ -31,14 +31,14 @@ func TestLocationRepository_Get(t *testing.T) { func TestLocationRepositoryGetAllWithCount(t *testing.T) { ctx := context.Background() result, err := tRepos.Locations.Create(ctx, tGroup.ID, types.LocationCreate{ - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), }) assert.NoError(t, err) _, err = tRepos.Items.Create(ctx, tGroup.ID, types.ItemCreate{ - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), LocationID: result.ID, }) @@ -74,8 +74,8 @@ func TestLocationRepository_Update(t *testing.T) { updateData := types.LocationUpdate{ ID: loc.ID, - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), } update, err := tRepos.Locations.Update(context.Background(), updateData) diff --git a/backend/internal/repo/repo_users_test.go b/backend/internal/repo/repo_users_test.go index 98f115d..f08acb4 100644 --- a/backend/internal/repo/repo_users_test.go +++ b/backend/internal/repo/repo_users_test.go @@ -13,10 +13,10 @@ import ( func userFactory() types.UserCreate { return types.UserCreate{ - Name: fk.RandomString(10), - Email: fk.RandomEmail(), - Password: fk.RandomString(10), - IsSuperuser: fk.RandomBool(), + Name: fk.Str(10), + Email: fk.Email(), + Password: fk.Str(10), + IsSuperuser: fk.Bool(), GroupID: tGroup.ID, } } @@ -109,8 +109,8 @@ func TestUserRepo_Update(t *testing.T) { assert.NoError(t, err) updateData := types.UserUpdate{ - Name: fk.RandomString(10), - Email: fk.RandomEmail(), + Name: fk.Str(10), + Email: fk.Email(), } // Update diff --git a/backend/internal/repo/repos_all.go b/backend/internal/repo/repos_all.go index 3542728..d4aa65e 100644 --- a/backend/internal/repo/repos_all.go +++ b/backend/internal/repo/repos_all.go @@ -10,6 +10,8 @@ type AllRepos struct { Locations *LocationRepository Labels *LabelRepository Items *ItemsRepository + Docs *DocumentRepository + DocTokens *DocumentTokensRepository } func EntAllRepos(db *ent.Client) *AllRepos { @@ -20,5 +22,7 @@ func EntAllRepos(db *ent.Client) *AllRepos { Locations: &LocationRepository{db}, Labels: &LabelRepository{db}, Items: &ItemsRepository{db}, + Docs: &DocumentRepository{db}, + DocTokens: &DocumentTokensRepository{db}, } } diff --git a/backend/internal/services/main_test.go b/backend/internal/services/main_test.go index 9f65278..0f5e78a 100644 --- a/backend/internal/services/main_test.go +++ b/backend/internal/services/main_test.go @@ -36,10 +36,10 @@ func bootstrap() { } tUser, err = tRepos.Users.Create(ctx, types.UserCreate{ - Name: fk.RandomString(10), - Email: fk.RandomEmail(), - Password: fk.RandomString(10), - IsSuperuser: fk.RandomBool(), + Name: fk.Str(10), + Email: fk.Email(), + Password: fk.Str(10), + IsSuperuser: fk.Bool(), GroupID: tGroup.ID, }) if err != nil { diff --git a/backend/internal/types/document_types.go b/backend/internal/types/document_types.go new file mode 100644 index 0000000..cc903aa --- /dev/null +++ b/backend/internal/types/document_types.go @@ -0,0 +1,29 @@ +package types + +import ( + "time" + + "github.com/google/uuid" +) + +type DocumentCreate struct { + Title string `json:"name"` + Path string `json:"path"` +} + +type DocumentUpdate struct { + ID uuid.UUID `json:"id"` + Title string `json:"name"` + Path string `json:"path"` +} + +type DocumentToken struct { + Raw string `json:"raw"` + ExpiresAt time.Time `json:"expiresAt"` +} + +type DocumentTokenCreate struct { + TokenHash []byte `json:"tokenHash"` + DocumentID uuid.UUID `json:"documentId"` + ExpiresAt time.Time `json:"expiresAt"` +} diff --git a/backend/pkgs/faker/random.go b/backend/pkgs/faker/random.go index 42ef538..05428fa 100644 --- a/backend/pkgs/faker/random.go +++ b/backend/pkgs/faker/random.go @@ -15,7 +15,11 @@ func NewFaker() *Faker { return &Faker{} } -func (f *Faker) RandomString(length int) string { +func (f *Faker) Time() time.Time { + return time.Now().Add(time.Duration(f.Num(1, 100)) * time.Hour) +} + +func (f *Faker) Str(length int) string { b := make([]rune, length) for i := range b { @@ -24,14 +28,18 @@ func (f *Faker) RandomString(length int) string { return string(b) } -func (f *Faker) RandomEmail() string { - return f.RandomString(10) + "@email.com" +func (f *Faker) Path() string { + return "/" + f.Str(10) + "/" + f.Str(10) + "/" + f.Str(10) } -func (f *Faker) RandomBool() bool { +func (f *Faker) Email() string { + return f.Str(10) + "@email.com" +} + +func (f *Faker) Bool() bool { return rand.Intn(2) == 1 } -func (f *Faker) RandomNumber(min, max int) int { +func (f *Faker) Num(min, max int) int { return rand.Intn(max-min) + min } diff --git a/backend/pkgs/faker/randoms_test.go b/backend/pkgs/faker/randoms_test.go index 79747c2..0773205 100644 --- a/backend/pkgs/faker/randoms_test.go +++ b/backend/pkgs/faker/randoms_test.go @@ -25,7 +25,7 @@ func Test_GetRandomString(t *testing.T) { faker := NewFaker() for i := 0; i < Loops; i++ { - generated[i] = faker.RandomString(10) + generated[i] = faker.Str(10) } if !ValidateUnique(generated) { @@ -41,7 +41,7 @@ func Test_GetRandomEmail(t *testing.T) { faker := NewFaker() for i := 0; i < Loops; i++ { - generated[i] = faker.RandomEmail() + generated[i] = faker.Email() } if !ValidateUnique(generated) { @@ -58,7 +58,7 @@ func Test_GetRandomBool(t *testing.T) { faker := NewFaker() for i := 0; i < Loops; i++ { - if faker.RandomBool() { + if faker.Bool() { trues++ } else { falses++ @@ -81,7 +81,7 @@ func Test_RandomNumber(t *testing.T) { last := MIN - 1 for i := 0; i < Loops; i++ { - n := f.RandomNumber(MIN, MAX) + n := f.Num(MIN, MAX) if n == last { t.Errorf("RandomNumber() failed to generate unique number") diff --git a/backend/pkgs/server/response_error_builder_test.go b/backend/pkgs/server/response_error_builder_test.go index b556a18..40de141 100644 --- a/backend/pkgs/server/response_error_builder_test.go +++ b/backend/pkgs/server/response_error_builder_test.go @@ -49,7 +49,7 @@ func Test_ErrorBuilder_AddError(t *testing.T) { errorStrings := make([]string, 10) for i := 0; i < 10; i++ { - err := errors.New(f.RandomString(10)) + err := errors.New(f.Str(10)) randomError[i] = err errorStrings[i] = err.Error() } @@ -72,7 +72,7 @@ func Test_ErrorBuilder_Respond(t *testing.T) { randomError := make([]error, 5) for i := 0; i < 5; i++ { - err := errors.New(f.RandomString(5)) + err := errors.New(f.Str(5)) randomError[i] = err }