diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..30ef4f4 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,31 @@ +version: 2 +updates: + # Fetch and update latest `npm` packages + - package-ecosystem: npm + directory: "/frontend" + schedule: + interval: daily + time: "00:00" + open-pull-requests-limit: 10 + reviewers: + - hay-kot + assignees: + - hay-kot + commit-message: + prefix: fix + prefix-development: chore + include: scope + - package-ecosystem: gomod + directory: backend + schedule: + interval: daily + time: "00:00" + open-pull-requests-limit: 10 + reviewers: + - hay-kot + assignees: + - hay-kot + commit-message: + prefix: fix + prefix-development: chore + include: scope diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..f4e8f04 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,72 @@ + + +## What type of PR is this? + +_(REQUIRED)_ + + + +- bug +- cleanup +- documentation +- feature + +## What this PR does / why we need it: + +_(REQUIRED)_ + + + +## Which issue(s) this PR fixes: + +_(REQUIRED)_ + + + +## Special notes for your reviewer: + +_(fill-in or delete this section)_ + + + +## Testing + +_(fill-in or delete this section)_ + + + +## Release Notes + +_(REQUIRED)_ + + +```release-note +``` \ No newline at end of file diff --git a/.gitignore b/.gitignore index f5f4657..0aeaf71 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,6 @@ config.yml homebox.db .idea -.vscode .DS_Store test-mailer.json @@ -35,4 +34,13 @@ backend/.env # Output Directory for Nuxt/Frontend during build step backend/app/api/public/* -!backend/app/api/public/.gitkeep \ No newline at end of file +!backend/app/api/public/.gitkeep + +node_modules +*.log* +.nuxt +.nitro +.cache +.output +.env +dist diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..85f49ba --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,14 @@ +{ + "editor.codeActionsOnSave": { + "source.fixAll.eslint": true + }, + "yaml.schemas": { + "https://squidfunk.github.io/mkdocs-material/schema.json": "mkdocs.yml" + }, + "explorer.fileNesting.enabled": true, + "explorer.fileNesting.patterns": { + "package.json": "package-lock.json, yarn.lock, .eslintrc.js, tsconfig.json, .prettierrc, .editorconfig, pnpm-lock.yaml, postcss.config.js, tailwind.config.js", + "docker-compose.yml": "Dockerfile, .dockerignore, docker-compose.dev.yml, docker-compose.yml", + "README.md": "LICENSE, SECURITY.md" + } +} diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..bae94e1 --- /dev/null +++ b/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. \ No newline at end of file diff --git a/README.md b/README.md index bd53229..009bc5f 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,25 @@ -

HomeBox

-

- Docs - | - Demo - | - Discord +

+ + + + + + + + + + + +
+

HomeBox

+

+ Docs + | + Demo + | + Discord

- - ## MVP Todo - [x] Locations @@ -24,8 +35,9 @@ - [ ] Update - [x] Delete - [ ] Asset Attachments for Items -- [ ] Fields To Add - - [ ] Quantity +- [x] Fields To Add + - [x] Quantity + - [x] Insured (bool) - [ ] Bulk Import via CSV - [x] Initial - [ ] Add Warranty Columns @@ -41,16 +53,18 @@ - [ ] Db Migrations - [ ] How To - [ ] Repo House Keeping + - [x] Add License - [ ] Issues Template - - [ ] PR Templates + - [x] PR Templates - [ ] Contributors Guide - - [ ] Security Policy + - [x] Security Policy - [ ] Feature Request Template - [ ] Embedded Version Info - [ ] Version Number - [ ] Git Has - [ ] Setup Docker Volumes in Dockerfile -## All Todos + +## All Todo's - [ ] User Invitation Links to Join Group - [ ] Maintenance Logs @@ -71,4 +85,7 @@ - [x] Warranty Information - [x] Option for Lifetime Warranty or Warranty Period - [ ] Expose Swagger API Documentation - - [ ] Dynamic Port / Host Settings \ No newline at end of file + - [ ] Dynamic Port / Host Settings + +## Credits +- Logo by [@lakotelman](https://github.com/lakotelman) diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..56b13b5 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,9 @@ +# Security Policy + +## Supported Versions + +Since this software is still considered beta/WIP support is always only given for the latest version. + +## Reporting a Vulnerability + +Please open a normal public issue if you have any security related concerns. \ No newline at end of file diff --git a/Taskfile.yml b/Taskfile.yml index bea4fd4..722d647 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -1,20 +1,27 @@ version: "3" tasks: + generate: + cmds: + - | + cd backend && ent generate ./ent/schema \ + --template=ent/schema/templates/has_id.tmpl + - cd backend/app/api/ && swag fmt + - cd backend/app/api/ && swag init --dir=./,../../internal,../../pkgs + - | + npx swagger-typescript-api \ + --no-client \ + --clean-output \ + --modular \ + --path ./backend/app/api/docs/swagger.json \ + --output ./frontend/lib/api/types + + python3 ./scripts/process-types.py ./frontend/lib/api/types/data-contracts.ts api: cmds: - - cd backend/app/api/ && swag fmt - - cd backend/app/api/ && swag init --dir=./,../../internal,../../pkgs,../../ent - # - | - # npx swagger-typescript-api \ - # --path ./backend/app/api/docs/swagger.json \ - # --output ./client/auto-client \ - # --module-name-first-tag \ - # --modular + - task: generate - cd backend && go run ./app/api/ {{.CLI_ARGS}} silent: false - sources: - - ./backend/**/*.go api:build: cmds: @@ -26,6 +33,10 @@ tasks: - cd backend && go test ./app/api/ silent: true + api:watch: + cmds: + - cd backend && gotestsum --watch ./... + api:coverage: cmds: - cd backend && go test -race -coverprofile=coverage.out -covermode=atomic ./app/... ./internal/... ./pkgs/... -v -cover @@ -39,12 +50,12 @@ tasks: - cd frontend && pnpm run test:ci silent: true - docker:build: + frontend:watch: + desc: Starts the vitest test runner in watch mode cmds: - - cd backend && docker-compose up --build - silent: true + - cd frontend && pnpm vitest --watch - generate:types: + frontend: + desc: Run frontend development server cmds: - - cd backend && go run ./app/generator - silent: true + - cd frontend && pnpm dev diff --git a/backend/app/api/docs/docs.go b/backend/app/api/docs/docs.go index 0982e99..b0b3d51 100644 --- a/backend/app/api/docs/docs.go +++ b/backend/app/api/docs/docs.go @@ -49,7 +49,7 @@ const docTemplate = `{ "items": { "type": "array", "items": { - "$ref": "#/definitions/types.ItemOut" + "$ref": "#/definitions/types.ItemSummary" } } } @@ -175,6 +175,15 @@ const docTemplate = `{ "name": "id", "in": "path", "required": true + }, + { + "description": "Item Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/types.ItemUpdate" + } } ], "responses": { @@ -691,7 +700,7 @@ const docTemplate = `{ "type": "object", "properties": { "item": { - "$ref": "#/definitions/ent.User" + "$ref": "#/definitions/types.UserOut" } } } @@ -788,422 +797,6 @@ const docTemplate = `{ } }, "definitions": { - "ent.AuthTokens": { - "type": "object", - "properties": { - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the AuthTokensQuery when eager-loading is set.", - "$ref": "#/definitions/ent.AuthTokensEdges" - }, - "expires_at": { - "description": "ExpiresAt holds the value of the \"expires_at\" field.", - "type": "string" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "token": { - "description": "Token holds the value of the \"token\" field.", - "type": "array", - "items": { - "type": "integer" - } - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - } - } - }, - "ent.AuthTokensEdges": { - "type": "object", - "properties": { - "user": { - "description": "User holds the value of the user edge.", - "$ref": "#/definitions/ent.User" - } - } - }, - "ent.Group": { - "type": "object", - "properties": { - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "currency": { - "description": "Currency holds the value of the \"currency\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the GroupQuery when eager-loading is set.", - "$ref": "#/definitions/ent.GroupEdges" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "name": { - "description": "Name holds the value of the \"name\" field.", - "type": "string" - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - } - } - }, - "ent.GroupEdges": { - "type": "object", - "properties": { - "items": { - "description": "Items holds the value of the items edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.Item" - } - }, - "labels": { - "description": "Labels holds the value of the labels edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.Label" - } - }, - "locations": { - "description": "Locations holds the value of the locations edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.Location" - } - }, - "users": { - "description": "Users holds the value of the users edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.User" - } - } - } - }, - "ent.Item": { - "type": "object", - "properties": { - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "description": { - "description": "Description holds the value of the \"description\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the ItemQuery when eager-loading is set.", - "$ref": "#/definitions/ent.ItemEdges" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "lifetime_warranty": { - "description": "LifetimeWarranty holds the value of the \"lifetime_warranty\" field.", - "type": "boolean" - }, - "manufacturer": { - "description": "Manufacturer holds the value of the \"manufacturer\" field.", - "type": "string" - }, - "model_number": { - "description": "ModelNumber holds the value of the \"model_number\" field.", - "type": "string" - }, - "name": { - "description": "Name holds the value of the \"name\" field.", - "type": "string" - }, - "notes": { - "description": "Notes holds the value of the \"notes\" field.", - "type": "string" - }, - "purchase_from": { - "description": "PurchaseFrom holds the value of the \"purchase_from\" field.", - "type": "string" - }, - "purchase_price": { - "description": "PurchasePrice holds the value of the \"purchase_price\" field.", - "type": "number" - }, - "purchase_time": { - "description": "PurchaseTime holds the value of the \"purchase_time\" field.", - "type": "string" - }, - "serial_number": { - "description": "SerialNumber holds the value of the \"serial_number\" field.", - "type": "string" - }, - "sold_notes": { - "description": "SoldNotes holds the value of the \"sold_notes\" field.", - "type": "string" - }, - "sold_price": { - "description": "SoldPrice holds the value of the \"sold_price\" field.", - "type": "number" - }, - "sold_time": { - "description": "SoldTime holds the value of the \"sold_time\" field.", - "type": "string" - }, - "sold_to": { - "description": "SoldTo holds the value of the \"sold_to\" field.", - "type": "string" - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - }, - "warranty_details": { - "description": "WarrantyDetails holds the value of the \"warranty_details\" field.", - "type": "string" - }, - "warranty_expires": { - "description": "WarrantyExpires holds the value of the \"warranty_expires\" field.", - "type": "string" - } - } - }, - "ent.ItemEdges": { - "type": "object", - "properties": { - "fields": { - "description": "Fields holds the value of the fields edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.ItemField" - } - }, - "group": { - "description": "Group holds the value of the group edge.", - "$ref": "#/definitions/ent.Group" - }, - "label": { - "description": "Label holds the value of the label edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.Label" - } - }, - "location": { - "description": "Location holds the value of the location edge.", - "$ref": "#/definitions/ent.Location" - } - } - }, - "ent.ItemField": { - "type": "object", - "properties": { - "boolean_value": { - "description": "BooleanValue holds the value of the \"boolean_value\" field.", - "type": "boolean" - }, - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "description": { - "description": "Description holds the value of the \"description\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the ItemFieldQuery when eager-loading is set.", - "$ref": "#/definitions/ent.ItemFieldEdges" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "name": { - "description": "Name holds the value of the \"name\" field.", - "type": "string" - }, - "number_value": { - "description": "NumberValue holds the value of the \"number_value\" field.", - "type": "integer" - }, - "text_value": { - "description": "TextValue holds the value of the \"text_value\" field.", - "type": "string" - }, - "time_value": { - "description": "TimeValue holds the value of the \"time_value\" field.", - "type": "string" - }, - "type": { - "description": "Type holds the value of the \"type\" field.", - "type": "string" - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - } - } - }, - "ent.ItemFieldEdges": { - "type": "object", - "properties": { - "item": { - "description": "Item holds the value of the item edge.", - "$ref": "#/definitions/ent.Item" - } - } - }, - "ent.Label": { - "type": "object", - "properties": { - "color": { - "description": "Color holds the value of the \"color\" field.", - "type": "string" - }, - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "description": { - "description": "Description holds the value of the \"description\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the LabelQuery when eager-loading is set.", - "$ref": "#/definitions/ent.LabelEdges" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "name": { - "description": "Name holds the value of the \"name\" field.", - "type": "string" - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - } - } - }, - "ent.LabelEdges": { - "type": "object", - "properties": { - "group": { - "description": "Group holds the value of the group edge.", - "$ref": "#/definitions/ent.Group" - }, - "items": { - "description": "Items holds the value of the items edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.Item" - } - } - } - }, - "ent.Location": { - "type": "object", - "properties": { - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "description": { - "description": "Description holds the value of the \"description\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the LocationQuery when eager-loading is set.", - "$ref": "#/definitions/ent.LocationEdges" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "name": { - "description": "Name holds the value of the \"name\" field.", - "type": "string" - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - } - } - }, - "ent.LocationEdges": { - "type": "object", - "properties": { - "group": { - "description": "Group holds the value of the group edge.", - "$ref": "#/definitions/ent.Group" - }, - "items": { - "description": "Items holds the value of the items edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.Item" - } - } - } - }, - "ent.User": { - "type": "object", - "properties": { - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the UserQuery when eager-loading is set.", - "$ref": "#/definitions/ent.UserEdges" - }, - "email": { - "description": "Email holds the value of the \"email\" field.", - "type": "string" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "is_superuser": { - "description": "IsSuperuser holds the value of the \"is_superuser\" field.", - "type": "boolean" - }, - "name": { - "description": "Name holds the value of the \"name\" field.", - "type": "string" - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - } - } - }, - "ent.UserEdges": { - "type": "object", - "properties": { - "auth_tokens": { - "description": "AuthTokens holds the value of the auth_tokens edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.AuthTokens" - } - }, - "group": { - "description": "Group holds the value of the group edge.", - "$ref": "#/definitions/ent.Group" - } - } - }, "server.Result": { "type": "object", "properties": { @@ -1245,6 +838,37 @@ const docTemplate = `{ } } }, + "types.DocumentOut": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "path": { + "type": "string" + }, + "title": { + "type": "string" + } + } + }, + "types.ItemAttachment": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "document": { + "$ref": "#/definitions/types.DocumentOut" + }, + "id": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, "types.ItemCreate": { "type": "object", "properties": { @@ -1269,6 +893,12 @@ const docTemplate = `{ "types.ItemOut": { "type": "object", "properties": { + "attachments": { + "type": "array", + "items": { + "$ref": "#/definitions/types.ItemAttachment" + } + }, "createdAt": { "type": "string" }, @@ -1278,6 +908,9 @@ const docTemplate = `{ "id": { "type": "string" }, + "insured": { + "type": "boolean" + }, "labels": { "type": "array", "items": { @@ -1309,12 +942,16 @@ const docTemplate = `{ "type": "string" }, "purchasePrice": { - "type": "number" + "type": "string", + "example": "0" }, "purchaseTime": { "description": "Purchase", "type": "string" }, + "quantity": { + "type": "integer" + }, "serialNumber": { "description": "Identifications", "type": "string" @@ -1323,7 +960,8 @@ const docTemplate = `{ "type": "string" }, "soldPrice": { - "type": "number" + "type": "string", + "example": "0" }, "soldTime": { "description": "Sold", @@ -1355,6 +993,9 @@ const docTemplate = `{ "id": { "type": "string" }, + "insured": { + "type": "boolean" + }, "labels": { "type": "array", "items": { @@ -1386,12 +1027,16 @@ const docTemplate = `{ "type": "string" }, "purchasePrice": { - "type": "number" + "type": "string", + "example": "0" }, "purchaseTime": { "description": "Purchase", "type": "string" }, + "quantity": { + "type": "integer" + }, "serialNumber": { "description": "Identifications", "type": "string" @@ -1400,7 +1045,8 @@ const docTemplate = `{ "type": "string" }, "soldPrice": { - "type": "number" + "type": "string", + "example": "0" }, "soldTime": { "description": "Sold", @@ -1420,6 +1066,85 @@ const docTemplate = `{ } } }, + "types.ItemUpdate": { + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "id": { + "type": "string" + }, + "insured": { + "type": "boolean" + }, + "labelIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "lifetimeWarranty": { + "description": "Warranty", + "type": "boolean" + }, + "locationId": { + "description": "Edges", + "type": "string" + }, + "manufacturer": { + "type": "string" + }, + "modelNumber": { + "type": "string" + }, + "name": { + "type": "string" + }, + "notes": { + "description": "Extras", + "type": "string" + }, + "purchaseFrom": { + "type": "string" + }, + "purchasePrice": { + "type": "string", + "example": "0" + }, + "purchaseTime": { + "description": "Purchase", + "type": "string" + }, + "quantity": { + "type": "integer" + }, + "serialNumber": { + "description": "Identifications", + "type": "string" + }, + "soldNotes": { + "type": "string" + }, + "soldPrice": { + "type": "string", + "example": "0" + }, + "soldTime": { + "description": "Sold", + "type": "string" + }, + "soldTo": { + "type": "string" + }, + "warrantyDetails": { + "type": "string" + }, + "warrantyExpires": { + "type": "string" + } + } + }, "types.LabelCreate": { "type": "object", "properties": { @@ -1591,6 +1316,29 @@ const docTemplate = `{ } } }, + "types.UserOut": { + "type": "object", + "properties": { + "email": { + "type": "string" + }, + "groupId": { + "type": "string" + }, + "groupName": { + "type": "string" + }, + "id": { + "type": "string" + }, + "isSuperuser": { + "type": "boolean" + }, + "name": { + "type": "string" + } + } + }, "types.UserRegistration": { "type": "object", "properties": { diff --git a/backend/app/api/docs/swagger.json b/backend/app/api/docs/swagger.json index a8e0318..f69bd9c 100644 --- a/backend/app/api/docs/swagger.json +++ b/backend/app/api/docs/swagger.json @@ -41,7 +41,7 @@ "items": { "type": "array", "items": { - "$ref": "#/definitions/types.ItemOut" + "$ref": "#/definitions/types.ItemSummary" } } } @@ -167,6 +167,15 @@ "name": "id", "in": "path", "required": true + }, + { + "description": "Item Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/types.ItemUpdate" + } } ], "responses": { @@ -683,7 +692,7 @@ "type": "object", "properties": { "item": { - "$ref": "#/definitions/ent.User" + "$ref": "#/definitions/types.UserOut" } } } @@ -780,422 +789,6 @@ } }, "definitions": { - "ent.AuthTokens": { - "type": "object", - "properties": { - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the AuthTokensQuery when eager-loading is set.", - "$ref": "#/definitions/ent.AuthTokensEdges" - }, - "expires_at": { - "description": "ExpiresAt holds the value of the \"expires_at\" field.", - "type": "string" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "token": { - "description": "Token holds the value of the \"token\" field.", - "type": "array", - "items": { - "type": "integer" - } - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - } - } - }, - "ent.AuthTokensEdges": { - "type": "object", - "properties": { - "user": { - "description": "User holds the value of the user edge.", - "$ref": "#/definitions/ent.User" - } - } - }, - "ent.Group": { - "type": "object", - "properties": { - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "currency": { - "description": "Currency holds the value of the \"currency\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the GroupQuery when eager-loading is set.", - "$ref": "#/definitions/ent.GroupEdges" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "name": { - "description": "Name holds the value of the \"name\" field.", - "type": "string" - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - } - } - }, - "ent.GroupEdges": { - "type": "object", - "properties": { - "items": { - "description": "Items holds the value of the items edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.Item" - } - }, - "labels": { - "description": "Labels holds the value of the labels edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.Label" - } - }, - "locations": { - "description": "Locations holds the value of the locations edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.Location" - } - }, - "users": { - "description": "Users holds the value of the users edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.User" - } - } - } - }, - "ent.Item": { - "type": "object", - "properties": { - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "description": { - "description": "Description holds the value of the \"description\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the ItemQuery when eager-loading is set.", - "$ref": "#/definitions/ent.ItemEdges" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "lifetime_warranty": { - "description": "LifetimeWarranty holds the value of the \"lifetime_warranty\" field.", - "type": "boolean" - }, - "manufacturer": { - "description": "Manufacturer holds the value of the \"manufacturer\" field.", - "type": "string" - }, - "model_number": { - "description": "ModelNumber holds the value of the \"model_number\" field.", - "type": "string" - }, - "name": { - "description": "Name holds the value of the \"name\" field.", - "type": "string" - }, - "notes": { - "description": "Notes holds the value of the \"notes\" field.", - "type": "string" - }, - "purchase_from": { - "description": "PurchaseFrom holds the value of the \"purchase_from\" field.", - "type": "string" - }, - "purchase_price": { - "description": "PurchasePrice holds the value of the \"purchase_price\" field.", - "type": "number" - }, - "purchase_time": { - "description": "PurchaseTime holds the value of the \"purchase_time\" field.", - "type": "string" - }, - "serial_number": { - "description": "SerialNumber holds the value of the \"serial_number\" field.", - "type": "string" - }, - "sold_notes": { - "description": "SoldNotes holds the value of the \"sold_notes\" field.", - "type": "string" - }, - "sold_price": { - "description": "SoldPrice holds the value of the \"sold_price\" field.", - "type": "number" - }, - "sold_time": { - "description": "SoldTime holds the value of the \"sold_time\" field.", - "type": "string" - }, - "sold_to": { - "description": "SoldTo holds the value of the \"sold_to\" field.", - "type": "string" - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - }, - "warranty_details": { - "description": "WarrantyDetails holds the value of the \"warranty_details\" field.", - "type": "string" - }, - "warranty_expires": { - "description": "WarrantyExpires holds the value of the \"warranty_expires\" field.", - "type": "string" - } - } - }, - "ent.ItemEdges": { - "type": "object", - "properties": { - "fields": { - "description": "Fields holds the value of the fields edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.ItemField" - } - }, - "group": { - "description": "Group holds the value of the group edge.", - "$ref": "#/definitions/ent.Group" - }, - "label": { - "description": "Label holds the value of the label edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.Label" - } - }, - "location": { - "description": "Location holds the value of the location edge.", - "$ref": "#/definitions/ent.Location" - } - } - }, - "ent.ItemField": { - "type": "object", - "properties": { - "boolean_value": { - "description": "BooleanValue holds the value of the \"boolean_value\" field.", - "type": "boolean" - }, - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "description": { - "description": "Description holds the value of the \"description\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the ItemFieldQuery when eager-loading is set.", - "$ref": "#/definitions/ent.ItemFieldEdges" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "name": { - "description": "Name holds the value of the \"name\" field.", - "type": "string" - }, - "number_value": { - "description": "NumberValue holds the value of the \"number_value\" field.", - "type": "integer" - }, - "text_value": { - "description": "TextValue holds the value of the \"text_value\" field.", - "type": "string" - }, - "time_value": { - "description": "TimeValue holds the value of the \"time_value\" field.", - "type": "string" - }, - "type": { - "description": "Type holds the value of the \"type\" field.", - "type": "string" - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - } - } - }, - "ent.ItemFieldEdges": { - "type": "object", - "properties": { - "item": { - "description": "Item holds the value of the item edge.", - "$ref": "#/definitions/ent.Item" - } - } - }, - "ent.Label": { - "type": "object", - "properties": { - "color": { - "description": "Color holds the value of the \"color\" field.", - "type": "string" - }, - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "description": { - "description": "Description holds the value of the \"description\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the LabelQuery when eager-loading is set.", - "$ref": "#/definitions/ent.LabelEdges" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "name": { - "description": "Name holds the value of the \"name\" field.", - "type": "string" - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - } - } - }, - "ent.LabelEdges": { - "type": "object", - "properties": { - "group": { - "description": "Group holds the value of the group edge.", - "$ref": "#/definitions/ent.Group" - }, - "items": { - "description": "Items holds the value of the items edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.Item" - } - } - } - }, - "ent.Location": { - "type": "object", - "properties": { - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "description": { - "description": "Description holds the value of the \"description\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the LocationQuery when eager-loading is set.", - "$ref": "#/definitions/ent.LocationEdges" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "name": { - "description": "Name holds the value of the \"name\" field.", - "type": "string" - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - } - } - }, - "ent.LocationEdges": { - "type": "object", - "properties": { - "group": { - "description": "Group holds the value of the group edge.", - "$ref": "#/definitions/ent.Group" - }, - "items": { - "description": "Items holds the value of the items edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.Item" - } - } - } - }, - "ent.User": { - "type": "object", - "properties": { - "created_at": { - "description": "CreatedAt holds the value of the \"created_at\" field.", - "type": "string" - }, - "edges": { - "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the UserQuery when eager-loading is set.", - "$ref": "#/definitions/ent.UserEdges" - }, - "email": { - "description": "Email holds the value of the \"email\" field.", - "type": "string" - }, - "id": { - "description": "ID of the ent.", - "type": "string" - }, - "is_superuser": { - "description": "IsSuperuser holds the value of the \"is_superuser\" field.", - "type": "boolean" - }, - "name": { - "description": "Name holds the value of the \"name\" field.", - "type": "string" - }, - "updated_at": { - "description": "UpdatedAt holds the value of the \"updated_at\" field.", - "type": "string" - } - } - }, - "ent.UserEdges": { - "type": "object", - "properties": { - "auth_tokens": { - "description": "AuthTokens holds the value of the auth_tokens edge.", - "type": "array", - "items": { - "$ref": "#/definitions/ent.AuthTokens" - } - }, - "group": { - "description": "Group holds the value of the group edge.", - "$ref": "#/definitions/ent.Group" - } - } - }, "server.Result": { "type": "object", "properties": { @@ -1237,6 +830,37 @@ } } }, + "types.DocumentOut": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "path": { + "type": "string" + }, + "title": { + "type": "string" + } + } + }, + "types.ItemAttachment": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "document": { + "$ref": "#/definitions/types.DocumentOut" + }, + "id": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, "types.ItemCreate": { "type": "object", "properties": { @@ -1261,6 +885,12 @@ "types.ItemOut": { "type": "object", "properties": { + "attachments": { + "type": "array", + "items": { + "$ref": "#/definitions/types.ItemAttachment" + } + }, "createdAt": { "type": "string" }, @@ -1270,6 +900,9 @@ "id": { "type": "string" }, + "insured": { + "type": "boolean" + }, "labels": { "type": "array", "items": { @@ -1301,12 +934,16 @@ "type": "string" }, "purchasePrice": { - "type": "number" + "type": "string", + "example": "0" }, "purchaseTime": { "description": "Purchase", "type": "string" }, + "quantity": { + "type": "integer" + }, "serialNumber": { "description": "Identifications", "type": "string" @@ -1315,7 +952,8 @@ "type": "string" }, "soldPrice": { - "type": "number" + "type": "string", + "example": "0" }, "soldTime": { "description": "Sold", @@ -1347,6 +985,9 @@ "id": { "type": "string" }, + "insured": { + "type": "boolean" + }, "labels": { "type": "array", "items": { @@ -1378,12 +1019,16 @@ "type": "string" }, "purchasePrice": { - "type": "number" + "type": "string", + "example": "0" }, "purchaseTime": { "description": "Purchase", "type": "string" }, + "quantity": { + "type": "integer" + }, "serialNumber": { "description": "Identifications", "type": "string" @@ -1392,7 +1037,8 @@ "type": "string" }, "soldPrice": { - "type": "number" + "type": "string", + "example": "0" }, "soldTime": { "description": "Sold", @@ -1412,6 +1058,85 @@ } } }, + "types.ItemUpdate": { + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "id": { + "type": "string" + }, + "insured": { + "type": "boolean" + }, + "labelIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "lifetimeWarranty": { + "description": "Warranty", + "type": "boolean" + }, + "locationId": { + "description": "Edges", + "type": "string" + }, + "manufacturer": { + "type": "string" + }, + "modelNumber": { + "type": "string" + }, + "name": { + "type": "string" + }, + "notes": { + "description": "Extras", + "type": "string" + }, + "purchaseFrom": { + "type": "string" + }, + "purchasePrice": { + "type": "string", + "example": "0" + }, + "purchaseTime": { + "description": "Purchase", + "type": "string" + }, + "quantity": { + "type": "integer" + }, + "serialNumber": { + "description": "Identifications", + "type": "string" + }, + "soldNotes": { + "type": "string" + }, + "soldPrice": { + "type": "string", + "example": "0" + }, + "soldTime": { + "description": "Sold", + "type": "string" + }, + "soldTo": { + "type": "string" + }, + "warrantyDetails": { + "type": "string" + }, + "warrantyExpires": { + "type": "string" + } + } + }, "types.LabelCreate": { "type": "object", "properties": { @@ -1583,6 +1308,29 @@ } } }, + "types.UserOut": { + "type": "object", + "properties": { + "email": { + "type": "string" + }, + "groupId": { + "type": "string" + }, + "groupName": { + "type": "string" + }, + "id": { + "type": "string" + }, + "isSuperuser": { + "type": "boolean" + }, + "name": { + "type": "string" + } + } + }, "types.UserRegistration": { "type": "object", "properties": { diff --git a/backend/app/api/docs/swagger.yaml b/backend/app/api/docs/swagger.yaml index cfa5bf5..f75f365 100644 --- a/backend/app/api/docs/swagger.yaml +++ b/backend/app/api/docs/swagger.yaml @@ -1,318 +1,5 @@ basePath: /api definitions: - ent.AuthTokens: - properties: - created_at: - description: CreatedAt holds the value of the "created_at" field. - type: string - edges: - $ref: '#/definitions/ent.AuthTokensEdges' - description: |- - Edges holds the relations/edges for other nodes in the graph. - The values are being populated by the AuthTokensQuery when eager-loading is set. - expires_at: - description: ExpiresAt holds the value of the "expires_at" field. - type: string - id: - description: ID of the ent. - type: string - token: - description: Token holds the value of the "token" field. - items: - type: integer - type: array - updated_at: - description: UpdatedAt holds the value of the "updated_at" field. - type: string - type: object - ent.AuthTokensEdges: - properties: - user: - $ref: '#/definitions/ent.User' - description: User holds the value of the user edge. - type: object - ent.Group: - properties: - created_at: - description: CreatedAt holds the value of the "created_at" field. - type: string - currency: - description: Currency holds the value of the "currency" field. - type: string - edges: - $ref: '#/definitions/ent.GroupEdges' - description: |- - Edges holds the relations/edges for other nodes in the graph. - The values are being populated by the GroupQuery when eager-loading is set. - id: - description: ID of the ent. - type: string - name: - description: Name holds the value of the "name" field. - type: string - updated_at: - description: UpdatedAt holds the value of the "updated_at" field. - type: string - type: object - ent.GroupEdges: - properties: - items: - description: Items holds the value of the items edge. - items: - $ref: '#/definitions/ent.Item' - type: array - labels: - description: Labels holds the value of the labels edge. - items: - $ref: '#/definitions/ent.Label' - type: array - locations: - description: Locations holds the value of the locations edge. - items: - $ref: '#/definitions/ent.Location' - type: array - users: - description: Users holds the value of the users edge. - items: - $ref: '#/definitions/ent.User' - type: array - type: object - ent.Item: - properties: - created_at: - description: CreatedAt holds the value of the "created_at" field. - type: string - description: - description: Description holds the value of the "description" field. - type: string - edges: - $ref: '#/definitions/ent.ItemEdges' - description: |- - Edges holds the relations/edges for other nodes in the graph. - The values are being populated by the ItemQuery when eager-loading is set. - id: - description: ID of the ent. - type: string - lifetime_warranty: - description: LifetimeWarranty holds the value of the "lifetime_warranty" field. - type: boolean - manufacturer: - description: Manufacturer holds the value of the "manufacturer" field. - type: string - model_number: - description: ModelNumber holds the value of the "model_number" field. - type: string - name: - description: Name holds the value of the "name" field. - type: string - notes: - description: Notes holds the value of the "notes" field. - type: string - purchase_from: - description: PurchaseFrom holds the value of the "purchase_from" field. - type: string - purchase_price: - description: PurchasePrice holds the value of the "purchase_price" field. - type: number - purchase_time: - description: PurchaseTime holds the value of the "purchase_time" field. - type: string - serial_number: - description: SerialNumber holds the value of the "serial_number" field. - type: string - sold_notes: - description: SoldNotes holds the value of the "sold_notes" field. - type: string - sold_price: - description: SoldPrice holds the value of the "sold_price" field. - type: number - sold_time: - description: SoldTime holds the value of the "sold_time" field. - type: string - sold_to: - description: SoldTo holds the value of the "sold_to" field. - type: string - updated_at: - description: UpdatedAt holds the value of the "updated_at" field. - type: string - warranty_details: - description: WarrantyDetails holds the value of the "warranty_details" field. - type: string - warranty_expires: - description: WarrantyExpires holds the value of the "warranty_expires" field. - type: string - type: object - ent.ItemEdges: - properties: - fields: - description: Fields holds the value of the fields edge. - items: - $ref: '#/definitions/ent.ItemField' - type: array - group: - $ref: '#/definitions/ent.Group' - description: Group holds the value of the group edge. - label: - description: Label holds the value of the label edge. - items: - $ref: '#/definitions/ent.Label' - type: array - location: - $ref: '#/definitions/ent.Location' - description: Location holds the value of the location edge. - type: object - ent.ItemField: - properties: - boolean_value: - description: BooleanValue holds the value of the "boolean_value" field. - type: boolean - created_at: - description: CreatedAt holds the value of the "created_at" field. - type: string - description: - description: Description holds the value of the "description" field. - type: string - edges: - $ref: '#/definitions/ent.ItemFieldEdges' - description: |- - Edges holds the relations/edges for other nodes in the graph. - The values are being populated by the ItemFieldQuery when eager-loading is set. - id: - description: ID of the ent. - type: string - name: - description: Name holds the value of the "name" field. - type: string - number_value: - description: NumberValue holds the value of the "number_value" field. - type: integer - text_value: - description: TextValue holds the value of the "text_value" field. - type: string - time_value: - description: TimeValue holds the value of the "time_value" field. - type: string - type: - description: Type holds the value of the "type" field. - type: string - updated_at: - description: UpdatedAt holds the value of the "updated_at" field. - type: string - type: object - ent.ItemFieldEdges: - properties: - item: - $ref: '#/definitions/ent.Item' - description: Item holds the value of the item edge. - type: object - ent.Label: - properties: - color: - description: Color holds the value of the "color" field. - type: string - created_at: - description: CreatedAt holds the value of the "created_at" field. - type: string - description: - description: Description holds the value of the "description" field. - type: string - edges: - $ref: '#/definitions/ent.LabelEdges' - description: |- - Edges holds the relations/edges for other nodes in the graph. - The values are being populated by the LabelQuery when eager-loading is set. - id: - description: ID of the ent. - type: string - name: - description: Name holds the value of the "name" field. - type: string - updated_at: - description: UpdatedAt holds the value of the "updated_at" field. - type: string - type: object - ent.LabelEdges: - properties: - group: - $ref: '#/definitions/ent.Group' - description: Group holds the value of the group edge. - items: - description: Items holds the value of the items edge. - items: - $ref: '#/definitions/ent.Item' - type: array - type: object - ent.Location: - properties: - created_at: - description: CreatedAt holds the value of the "created_at" field. - type: string - description: - description: Description holds the value of the "description" field. - type: string - edges: - $ref: '#/definitions/ent.LocationEdges' - description: |- - Edges holds the relations/edges for other nodes in the graph. - The values are being populated by the LocationQuery when eager-loading is set. - id: - description: ID of the ent. - type: string - name: - description: Name holds the value of the "name" field. - type: string - updated_at: - description: UpdatedAt holds the value of the "updated_at" field. - type: string - type: object - ent.LocationEdges: - properties: - group: - $ref: '#/definitions/ent.Group' - description: Group holds the value of the group edge. - items: - description: Items holds the value of the items edge. - items: - $ref: '#/definitions/ent.Item' - type: array - type: object - ent.User: - properties: - created_at: - description: CreatedAt holds the value of the "created_at" field. - type: string - edges: - $ref: '#/definitions/ent.UserEdges' - description: |- - Edges holds the relations/edges for other nodes in the graph. - The values are being populated by the UserQuery when eager-loading is set. - email: - description: Email holds the value of the "email" field. - type: string - id: - description: ID of the ent. - type: string - is_superuser: - description: IsSuperuser holds the value of the "is_superuser" field. - type: boolean - name: - description: Name holds the value of the "name" field. - type: string - updated_at: - description: UpdatedAt holds the value of the "updated_at" field. - type: string - type: object - ent.UserEdges: - properties: - auth_tokens: - description: AuthTokens holds the value of the auth_tokens edge. - items: - $ref: '#/definitions/ent.AuthTokens' - type: array - group: - $ref: '#/definitions/ent.Group' - description: Group holds the value of the group edge. - type: object server.Result: properties: details: {} @@ -340,6 +27,26 @@ definitions: type: string type: array type: object + types.DocumentOut: + properties: + id: + type: string + path: + type: string + title: + type: string + type: object + types.ItemAttachment: + properties: + createdAt: + type: string + document: + $ref: '#/definitions/types.DocumentOut' + id: + type: string + updatedAt: + type: string + type: object types.ItemCreate: properties: description: @@ -356,12 +63,18 @@ definitions: type: object types.ItemOut: properties: + attachments: + items: + $ref: '#/definitions/types.ItemAttachment' + type: array createdAt: type: string description: type: string id: type: string + insured: + type: boolean labels: items: $ref: '#/definitions/types.LabelSummary' @@ -384,17 +97,21 @@ definitions: purchaseFrom: type: string purchasePrice: - type: number + example: "0" + type: string purchaseTime: description: Purchase type: string + quantity: + type: integer serialNumber: description: Identifications type: string soldNotes: type: string soldPrice: - type: number + example: "0" + type: string soldTime: description: Sold type: string @@ -415,6 +132,8 @@ definitions: type: string id: type: string + insured: + type: boolean labels: items: $ref: '#/definitions/types.LabelSummary' @@ -437,17 +156,21 @@ definitions: purchaseFrom: type: string purchasePrice: - type: number + example: "0" + type: string purchaseTime: description: Purchase type: string + quantity: + type: integer serialNumber: description: Identifications type: string soldNotes: type: string soldPrice: - type: number + example: "0" + type: string soldTime: description: Sold type: string @@ -460,6 +183,61 @@ definitions: warrantyExpires: type: string type: object + types.ItemUpdate: + properties: + description: + type: string + id: + type: string + insured: + type: boolean + labelIds: + items: + type: string + type: array + lifetimeWarranty: + description: Warranty + type: boolean + locationId: + description: Edges + type: string + manufacturer: + type: string + modelNumber: + type: string + name: + type: string + notes: + description: Extras + type: string + purchaseFrom: + type: string + purchasePrice: + example: "0" + type: string + purchaseTime: + description: Purchase + type: string + quantity: + type: integer + serialNumber: + description: Identifications + type: string + soldNotes: + type: string + soldPrice: + example: "0" + type: string + soldTime: + description: Sold + type: string + soldTo: + type: string + warrantyDetails: + type: string + warrantyExpires: + type: string + type: object types.LabelCreate: properties: color: @@ -571,6 +349,21 @@ definitions: password: type: string type: object + types.UserOut: + properties: + email: + type: string + groupId: + type: string + groupName: + type: string + id: + type: string + isSuperuser: + type: boolean + name: + type: string + type: object types.UserRegistration: properties: groupName: @@ -609,7 +402,7 @@ paths: - properties: items: items: - $ref: '#/definitions/types.ItemOut' + $ref: '#/definitions/types.ItemSummary' type: array type: object security: @@ -681,6 +474,12 @@ paths: name: id required: true type: string + - description: Item Data + in: body + name: payload + required: true + schema: + $ref: '#/definitions/types.ItemUpdate' produces: - application/json responses: @@ -1006,7 +805,7 @@ paths: - $ref: '#/definitions/server.Result' - properties: item: - $ref: '#/definitions/ent.User' + $ref: '#/definitions/types.UserOut' type: object security: - Bearer: [] diff --git a/backend/app/api/v1/v1_ctrl_items.go b/backend/app/api/v1/v1_ctrl_items.go index 3c2cf28..23a0d92 100644 --- a/backend/app/api/v1/v1_ctrl_items.go +++ b/backend/app/api/v1/v1_ctrl_items.go @@ -14,7 +14,7 @@ import ( // @Summary Get All Items // @Tags Items // @Produce json -// @Success 200 {object} server.Results{items=[]types.ItemOut} +// @Success 200 {object} server.Results{items=[]types.ItemSummary} // @Router /v1/items [GET] // @Security Bearer func (ctrl *V1Controller) HandleItemsGetAll() http.HandlerFunc { @@ -64,7 +64,7 @@ func (ctrl *V1Controller) HandleItemsCreate() http.HandlerFunc { // @Summary deletes a item // @Tags Items // @Produce json -// @Param id path string true "Item ID" +// @Param id path string true "Item ID" // @Success 204 // @Router /v1/items/{id} [DELETE] // @Security Bearer @@ -90,7 +90,7 @@ func (ctrl *V1Controller) HandleItemDelete() http.HandlerFunc { // @Tags Items // @Produce json // @Param id path string true "Item ID" -// @Success 200 {object} types.ItemOut +// @Success 200 {object} types.ItemOut // @Router /v1/items/{id} [GET] // @Security Bearer func (ctrl *V1Controller) HandleItemGet() http.HandlerFunc { @@ -115,6 +115,7 @@ func (ctrl *V1Controller) HandleItemGet() http.HandlerFunc { // @Tags Items // @Produce json // @Param id path string true "Item ID" +// @Param payload body types.ItemUpdate true "Item Data" // @Success 200 {object} types.ItemOut // @Router /v1/items/{id} [PUT] // @Security Bearer diff --git a/backend/app/api/v1/v1_ctrl_user.go b/backend/app/api/v1/v1_ctrl_user.go index 520fd81..f7c4de6 100644 --- a/backend/app/api/v1/v1_ctrl_user.go +++ b/backend/app/api/v1/v1_ctrl_user.go @@ -41,7 +41,7 @@ func (ctrl *V1Controller) HandleUserRegistration() http.HandlerFunc { // @Summary Get the current user // @Tags User // @Produce json -// @Success 200 {object} server.Result{item=ent.User} +// @Success 200 {object} server.Result{item=types.UserOut} // @Router /v1/users/self [GET] // @Security Bearer func (ctrl *V1Controller) HandleUserSelf() http.HandlerFunc { diff --git a/backend/ent/attachment.go b/backend/ent/attachment.go new file mode 100644 index 0000000..b22b8db --- /dev/null +++ b/backend/ent/attachment.go @@ -0,0 +1,197 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/attachment" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/item" +) + +// Attachment is the model entity for the Attachment schema. +type Attachment struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Type holds the value of the "type" field. + Type attachment.Type `json:"type,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AttachmentQuery when eager-loading is set. + Edges AttachmentEdges `json:"edges"` + document_attachments *uuid.UUID + item_attachments *uuid.UUID +} + +// AttachmentEdges holds the relations/edges for other nodes in the graph. +type AttachmentEdges struct { + // Item holds the value of the item edge. + Item *Item `json:"item,omitempty"` + // Document holds the value of the document edge. + Document *Document `json:"document,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// ItemOrErr returns the Item value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AttachmentEdges) ItemOrErr() (*Item, error) { + if e.loadedTypes[0] { + if e.Item == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: item.Label} + } + return e.Item, nil + } + return nil, &NotLoadedError{edge: "item"} +} + +// DocumentOrErr returns the Document value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AttachmentEdges) DocumentOrErr() (*Document, error) { + if e.loadedTypes[1] { + if e.Document == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: document.Label} + } + return e.Document, nil + } + return nil, &NotLoadedError{edge: "document"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Attachment) scanValues(columns []string) ([]interface{}, error) { + values := make([]interface{}, len(columns)) + for i := range columns { + switch columns[i] { + case attachment.FieldType: + values[i] = new(sql.NullString) + case attachment.FieldCreatedAt, attachment.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case attachment.FieldID: + values[i] = new(uuid.UUID) + case attachment.ForeignKeys[0]: // document_attachments + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + case attachment.ForeignKeys[1]: // item_attachments + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + default: + return nil, fmt.Errorf("unexpected column %q for type Attachment", columns[i]) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Attachment fields. +func (a *Attachment) assignValues(columns []string, values []interface{}) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case attachment.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + a.ID = *value + } + case attachment.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + a.CreatedAt = value.Time + } + case attachment.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + a.UpdatedAt = value.Time + } + case attachment.FieldType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field type", values[i]) + } else if value.Valid { + a.Type = attachment.Type(value.String) + } + case attachment.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field document_attachments", values[i]) + } else if value.Valid { + a.document_attachments = new(uuid.UUID) + *a.document_attachments = *value.S.(*uuid.UUID) + } + case attachment.ForeignKeys[1]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field item_attachments", values[i]) + } else if value.Valid { + a.item_attachments = new(uuid.UUID) + *a.item_attachments = *value.S.(*uuid.UUID) + } + } + } + return nil +} + +// QueryItem queries the "item" edge of the Attachment entity. +func (a *Attachment) QueryItem() *ItemQuery { + return (&AttachmentClient{config: a.config}).QueryItem(a) +} + +// QueryDocument queries the "document" edge of the Attachment entity. +func (a *Attachment) QueryDocument() *DocumentQuery { + return (&AttachmentClient{config: a.config}).QueryDocument(a) +} + +// Update returns a builder for updating this Attachment. +// Note that you need to call Attachment.Unwrap() before calling this method if this Attachment +// was returned from a transaction, and the transaction was committed or rolled back. +func (a *Attachment) Update() *AttachmentUpdateOne { + return (&AttachmentClient{config: a.config}).UpdateOne(a) +} + +// Unwrap unwraps the Attachment entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (a *Attachment) Unwrap() *Attachment { + _tx, ok := a.config.driver.(*txDriver) + if !ok { + panic("ent: Attachment is not a transactional entity") + } + a.config.driver = _tx.drv + return a +} + +// String implements the fmt.Stringer. +func (a *Attachment) String() string { + var builder strings.Builder + builder.WriteString("Attachment(") + builder.WriteString(fmt.Sprintf("id=%v, ", a.ID)) + builder.WriteString("created_at=") + builder.WriteString(a.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(a.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("type=") + builder.WriteString(fmt.Sprintf("%v", a.Type)) + builder.WriteByte(')') + return builder.String() +} + +// Attachments is a parsable slice of Attachment. +type Attachments []*Attachment + +func (a Attachments) config(cfg config) { + for _i := range a { + a[_i].config = cfg + } +} diff --git a/backend/ent/attachment/attachment.go b/backend/ent/attachment/attachment.go new file mode 100644 index 0000000..d2775e2 --- /dev/null +++ b/backend/ent/attachment/attachment.go @@ -0,0 +1,112 @@ +// Code generated by ent, DO NOT EDIT. + +package attachment + +import ( + "fmt" + "time" + + "github.com/google/uuid" +) + +const ( + // Label holds the string label denoting the attachment type in the database. + Label = "attachment" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldType holds the string denoting the type field in the database. + FieldType = "type" + // EdgeItem holds the string denoting the item edge name in mutations. + EdgeItem = "item" + // EdgeDocument holds the string denoting the document edge name in mutations. + EdgeDocument = "document" + // Table holds the table name of the attachment in the database. + Table = "attachments" + // ItemTable is the table that holds the item relation/edge. + ItemTable = "attachments" + // ItemInverseTable is the table name for the Item entity. + // It exists in this package in order to avoid circular dependency with the "item" package. + ItemInverseTable = "items" + // ItemColumn is the table column denoting the item relation/edge. + ItemColumn = "item_attachments" + // DocumentTable is the table that holds the document relation/edge. + DocumentTable = "attachments" + // DocumentInverseTable is the table name for the Document entity. + // It exists in this package in order to avoid circular dependency with the "document" package. + DocumentInverseTable = "documents" + // DocumentColumn is the table column denoting the document relation/edge. + DocumentColumn = "document_attachments" +) + +// Columns holds all SQL columns for attachment fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldType, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "attachments" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "document_attachments", + "item_attachments", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) + +// Type defines the type for the "type" enum field. +type Type string + +// TypeAttachment is the default value of the Type enum. +const DefaultType = TypeAttachment + +// Type values. +const ( + TypePhoto Type = "photo" + TypeManual Type = "manual" + TypeWarranty Type = "warranty" + TypeAttachment Type = "attachment" +) + +func (_type Type) String() string { + return string(_type) +} + +// TypeValidator is a validator for the "type" field enum values. It is called by the builders before save. +func TypeValidator(_type Type) error { + switch _type { + case TypePhoto, TypeManual, TypeWarranty, TypeAttachment: + return nil + default: + return fmt.Errorf("attachment: invalid enum value for type field: %q", _type) + } +} diff --git a/backend/ent/attachment/where.go b/backend/ent/attachment/where.go new file mode 100644 index 0000000..c601949 --- /dev/null +++ b/backend/ent/attachment/where.go @@ -0,0 +1,349 @@ +// Code generated by ent, DO NOT EDIT. + +package attachment + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Attachment { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Attachment { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Attachment { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Attachment { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v Type) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldType), v)) + }) +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v Type) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldType), v)) + }) +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...Type) predicate.Attachment { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldType), v...)) + }) +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...Type) predicate.Attachment { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldType), v...)) + }) +} + +// HasItem applies the HasEdge predicate on the "item" edge. +func HasItem() predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ItemTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasItemWith applies the HasEdge predicate on the "item" edge with a given conditions (other predicates). +func HasItemWith(preds ...predicate.Item) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ItemInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasDocument applies the HasEdge predicate on the "document" edge. +func HasDocument() predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDocumentWith applies the HasEdge predicate on the "document" edge with a given conditions (other predicates). +func HasDocumentWith(preds ...predicate.Document) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Attachment) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Attachment) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Attachment) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/backend/ent/attachment_create.go b/backend/ent/attachment_create.go new file mode 100644 index 0000000..fdce24b --- /dev/null +++ b/backend/ent/attachment_create.go @@ -0,0 +1,402 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/attachment" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/item" +) + +// AttachmentCreate is the builder for creating a Attachment entity. +type AttachmentCreate struct { + config + mutation *AttachmentMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (ac *AttachmentCreate) SetCreatedAt(t time.Time) *AttachmentCreate { + ac.mutation.SetCreatedAt(t) + return ac +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (ac *AttachmentCreate) SetNillableCreatedAt(t *time.Time) *AttachmentCreate { + if t != nil { + ac.SetCreatedAt(*t) + } + return ac +} + +// SetUpdatedAt sets the "updated_at" field. +func (ac *AttachmentCreate) SetUpdatedAt(t time.Time) *AttachmentCreate { + ac.mutation.SetUpdatedAt(t) + return ac +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (ac *AttachmentCreate) SetNillableUpdatedAt(t *time.Time) *AttachmentCreate { + if t != nil { + ac.SetUpdatedAt(*t) + } + return ac +} + +// SetType sets the "type" field. +func (ac *AttachmentCreate) SetType(a attachment.Type) *AttachmentCreate { + ac.mutation.SetType(a) + return ac +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (ac *AttachmentCreate) SetNillableType(a *attachment.Type) *AttachmentCreate { + if a != nil { + ac.SetType(*a) + } + return ac +} + +// SetID sets the "id" field. +func (ac *AttachmentCreate) SetID(u uuid.UUID) *AttachmentCreate { + ac.mutation.SetID(u) + return ac +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (ac *AttachmentCreate) SetNillableID(u *uuid.UUID) *AttachmentCreate { + if u != nil { + ac.SetID(*u) + } + return ac +} + +// SetItemID sets the "item" edge to the Item entity by ID. +func (ac *AttachmentCreate) SetItemID(id uuid.UUID) *AttachmentCreate { + ac.mutation.SetItemID(id) + return ac +} + +// SetItem sets the "item" edge to the Item entity. +func (ac *AttachmentCreate) SetItem(i *Item) *AttachmentCreate { + return ac.SetItemID(i.ID) +} + +// SetDocumentID sets the "document" edge to the Document entity by ID. +func (ac *AttachmentCreate) SetDocumentID(id uuid.UUID) *AttachmentCreate { + ac.mutation.SetDocumentID(id) + return ac +} + +// SetDocument sets the "document" edge to the Document entity. +func (ac *AttachmentCreate) SetDocument(d *Document) *AttachmentCreate { + return ac.SetDocumentID(d.ID) +} + +// Mutation returns the AttachmentMutation object of the builder. +func (ac *AttachmentCreate) Mutation() *AttachmentMutation { + return ac.mutation +} + +// Save creates the Attachment in the database. +func (ac *AttachmentCreate) Save(ctx context.Context) (*Attachment, error) { + var ( + err error + node *Attachment + ) + ac.defaults() + if len(ac.hooks) == 0 { + if err = ac.check(); err != nil { + return nil, err + } + node, err = ac.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AttachmentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = ac.check(); err != nil { + return nil, err + } + ac.mutation = mutation + if node, err = ac.sqlSave(ctx); err != nil { + return nil, err + } + mutation.id = &node.ID + mutation.done = true + return node, err + }) + for i := len(ac.hooks) - 1; i >= 0; i-- { + if ac.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = ac.hooks[i](mut) + } + v, err := mut.Mutate(ctx, ac.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Attachment) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from AttachmentMutation", v) + } + node = nv + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (ac *AttachmentCreate) SaveX(ctx context.Context) *Attachment { + v, err := ac.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ac *AttachmentCreate) Exec(ctx context.Context) error { + _, err := ac.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ac *AttachmentCreate) ExecX(ctx context.Context) { + if err := ac.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (ac *AttachmentCreate) defaults() { + if _, ok := ac.mutation.CreatedAt(); !ok { + v := attachment.DefaultCreatedAt() + ac.mutation.SetCreatedAt(v) + } + if _, ok := ac.mutation.UpdatedAt(); !ok { + v := attachment.DefaultUpdatedAt() + ac.mutation.SetUpdatedAt(v) + } + if _, ok := ac.mutation.GetType(); !ok { + v := attachment.DefaultType + ac.mutation.SetType(v) + } + if _, ok := ac.mutation.ID(); !ok { + v := attachment.DefaultID() + ac.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ac *AttachmentCreate) check() error { + if _, ok := ac.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Attachment.created_at"`)} + } + if _, ok := ac.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Attachment.updated_at"`)} + } + if _, ok := ac.mutation.GetType(); !ok { + return &ValidationError{Name: "type", err: errors.New(`ent: missing required field "Attachment.type"`)} + } + if v, ok := ac.mutation.GetType(); ok { + if err := attachment.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Attachment.type": %w`, err)} + } + } + if _, ok := ac.mutation.ItemID(); !ok { + return &ValidationError{Name: "item", err: errors.New(`ent: missing required edge "Attachment.item"`)} + } + if _, ok := ac.mutation.DocumentID(); !ok { + return &ValidationError{Name: "document", err: errors.New(`ent: missing required edge "Attachment.document"`)} + } + return nil +} + +func (ac *AttachmentCreate) sqlSave(ctx context.Context) (*Attachment, error) { + _node, _spec := ac.createSpec() + if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + return _node, nil +} + +func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) { + var ( + _node = &Attachment{config: ac.config} + _spec = &sqlgraph.CreateSpec{ + Table: attachment.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + } + ) + if id, ok := ac.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := ac.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: attachment.FieldCreatedAt, + }) + _node.CreatedAt = value + } + if value, ok := ac.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: attachment.FieldUpdatedAt, + }) + _node.UpdatedAt = value + } + if value, ok := ac.mutation.GetType(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeEnum, + Value: value, + Column: attachment.FieldType, + }) + _node.Type = value + } + if nodes := ac.mutation.ItemIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: attachment.ItemTable, + Columns: []string{attachment.ItemColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: item.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.item_attachments = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := ac.mutation.DocumentIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: attachment.DocumentTable, + Columns: []string{attachment.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.document_attachments = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// AttachmentCreateBulk is the builder for creating many Attachment entities in bulk. +type AttachmentCreateBulk struct { + config + builders []*AttachmentCreate +} + +// Save creates the Attachment entities in the database. +func (acb *AttachmentCreateBulk) Save(ctx context.Context) ([]*Attachment, error) { + specs := make([]*sqlgraph.CreateSpec, len(acb.builders)) + nodes := make([]*Attachment, len(acb.builders)) + mutators := make([]Mutator, len(acb.builders)) + for i := range acb.builders { + func(i int, root context.Context) { + builder := acb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AttachmentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, acb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, acb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (acb *AttachmentCreateBulk) SaveX(ctx context.Context) []*Attachment { + v, err := acb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (acb *AttachmentCreateBulk) Exec(ctx context.Context) error { + _, err := acb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (acb *AttachmentCreateBulk) ExecX(ctx context.Context) { + if err := acb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/attachment_delete.go b/backend/ent/attachment_delete.go new file mode 100644 index 0000000..0b858cc --- /dev/null +++ b/backend/ent/attachment_delete.go @@ -0,0 +1,115 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/hay-kot/content/backend/ent/attachment" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// AttachmentDelete is the builder for deleting a Attachment entity. +type AttachmentDelete struct { + config + hooks []Hook + mutation *AttachmentMutation +} + +// Where appends a list predicates to the AttachmentDelete builder. +func (ad *AttachmentDelete) Where(ps ...predicate.Attachment) *AttachmentDelete { + ad.mutation.Where(ps...) + return ad +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ad *AttachmentDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(ad.hooks) == 0 { + affected, err = ad.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AttachmentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + ad.mutation = mutation + affected, err = ad.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(ad.hooks) - 1; i >= 0; i-- { + if ad.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = ad.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, ad.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ad *AttachmentDelete) ExecX(ctx context.Context) int { + n, err := ad.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ad *AttachmentDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: attachment.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + if ps := ad.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ad.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return affected, err +} + +// AttachmentDeleteOne is the builder for deleting a single Attachment entity. +type AttachmentDeleteOne struct { + ad *AttachmentDelete +} + +// Exec executes the deletion query. +func (ado *AttachmentDeleteOne) Exec(ctx context.Context) error { + n, err := ado.ad.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{attachment.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ado *AttachmentDeleteOne) ExecX(ctx context.Context) { + ado.ad.ExecX(ctx) +} diff --git a/backend/ent/attachment_query.go b/backend/ent/attachment_query.go new file mode 100644 index 0000000..f657057 --- /dev/null +++ b/backend/ent/attachment_query.go @@ -0,0 +1,683 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/attachment" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/item" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// AttachmentQuery is the builder for querying Attachment entities. +type AttachmentQuery struct { + config + limit *int + offset *int + unique *bool + order []OrderFunc + fields []string + predicates []predicate.Attachment + withItem *ItemQuery + withDocument *DocumentQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AttachmentQuery builder. +func (aq *AttachmentQuery) Where(ps ...predicate.Attachment) *AttachmentQuery { + aq.predicates = append(aq.predicates, ps...) + return aq +} + +// Limit adds a limit step to the query. +func (aq *AttachmentQuery) Limit(limit int) *AttachmentQuery { + aq.limit = &limit + return aq +} + +// Offset adds an offset step to the query. +func (aq *AttachmentQuery) Offset(offset int) *AttachmentQuery { + aq.offset = &offset + return aq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (aq *AttachmentQuery) Unique(unique bool) *AttachmentQuery { + aq.unique = &unique + return aq +} + +// Order adds an order step to the query. +func (aq *AttachmentQuery) Order(o ...OrderFunc) *AttachmentQuery { + aq.order = append(aq.order, o...) + return aq +} + +// QueryItem chains the current query on the "item" edge. +func (aq *AttachmentQuery) QueryItem() *ItemQuery { + query := &ItemQuery{config: aq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := aq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(attachment.Table, attachment.FieldID, selector), + sqlgraph.To(item.Table, item.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, attachment.ItemTable, attachment.ItemColumn), + ) + fromU = sqlgraph.SetNeighbors(aq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryDocument chains the current query on the "document" edge. +func (aq *AttachmentQuery) QueryDocument() *DocumentQuery { + query := &DocumentQuery{config: aq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := aq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(attachment.Table, attachment.FieldID, selector), + sqlgraph.To(document.Table, document.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, attachment.DocumentTable, attachment.DocumentColumn), + ) + fromU = sqlgraph.SetNeighbors(aq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Attachment entity from the query. +// Returns a *NotFoundError when no Attachment was found. +func (aq *AttachmentQuery) First(ctx context.Context) (*Attachment, error) { + nodes, err := aq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{attachment.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (aq *AttachmentQuery) FirstX(ctx context.Context) *Attachment { + node, err := aq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Attachment ID from the query. +// Returns a *NotFoundError when no Attachment ID was found. +func (aq *AttachmentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = aq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{attachment.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (aq *AttachmentQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := aq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Attachment entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Attachment entity is found. +// Returns a *NotFoundError when no Attachment entities are found. +func (aq *AttachmentQuery) Only(ctx context.Context) (*Attachment, error) { + nodes, err := aq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{attachment.Label} + default: + return nil, &NotSingularError{attachment.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (aq *AttachmentQuery) OnlyX(ctx context.Context) *Attachment { + node, err := aq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Attachment ID in the query. +// Returns a *NotSingularError when more than one Attachment ID is found. +// Returns a *NotFoundError when no entities are found. +func (aq *AttachmentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = aq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{attachment.Label} + default: + err = &NotSingularError{attachment.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (aq *AttachmentQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := aq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Attachments. +func (aq *AttachmentQuery) All(ctx context.Context) ([]*Attachment, error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + return aq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (aq *AttachmentQuery) AllX(ctx context.Context) []*Attachment { + nodes, err := aq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Attachment IDs. +func (aq *AttachmentQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { + var ids []uuid.UUID + if err := aq.Select(attachment.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (aq *AttachmentQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := aq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (aq *AttachmentQuery) Count(ctx context.Context) (int, error) { + if err := aq.prepareQuery(ctx); err != nil { + return 0, err + } + return aq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (aq *AttachmentQuery) CountX(ctx context.Context) int { + count, err := aq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (aq *AttachmentQuery) Exist(ctx context.Context) (bool, error) { + if err := aq.prepareQuery(ctx); err != nil { + return false, err + } + return aq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (aq *AttachmentQuery) ExistX(ctx context.Context) bool { + exist, err := aq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AttachmentQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (aq *AttachmentQuery) Clone() *AttachmentQuery { + if aq == nil { + return nil + } + return &AttachmentQuery{ + config: aq.config, + limit: aq.limit, + offset: aq.offset, + order: append([]OrderFunc{}, aq.order...), + predicates: append([]predicate.Attachment{}, aq.predicates...), + withItem: aq.withItem.Clone(), + withDocument: aq.withDocument.Clone(), + // clone intermediate query. + sql: aq.sql.Clone(), + path: aq.path, + unique: aq.unique, + } +} + +// WithItem tells the query-builder to eager-load the nodes that are connected to +// the "item" edge. The optional arguments are used to configure the query builder of the edge. +func (aq *AttachmentQuery) WithItem(opts ...func(*ItemQuery)) *AttachmentQuery { + query := &ItemQuery{config: aq.config} + for _, opt := range opts { + opt(query) + } + aq.withItem = query + return aq +} + +// WithDocument tells the query-builder to eager-load the nodes that are connected to +// the "document" edge. The optional arguments are used to configure the query builder of the edge. +func (aq *AttachmentQuery) WithDocument(opts ...func(*DocumentQuery)) *AttachmentQuery { + query := &DocumentQuery{config: aq.config} + for _, opt := range opts { + opt(query) + } + aq.withDocument = query + return aq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Attachment.Query(). +// GroupBy(attachment.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (aq *AttachmentQuery) GroupBy(field string, fields ...string) *AttachmentGroupBy { + grbuild := &AttachmentGroupBy{config: aq.config} + grbuild.fields = append([]string{field}, fields...) + grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + return aq.sqlQuery(ctx), nil + } + grbuild.label = attachment.Label + grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Attachment.Query(). +// Select(attachment.FieldCreatedAt). +// Scan(ctx, &v) +func (aq *AttachmentQuery) Select(fields ...string) *AttachmentSelect { + aq.fields = append(aq.fields, fields...) + selbuild := &AttachmentSelect{AttachmentQuery: aq} + selbuild.label = attachment.Label + selbuild.flds, selbuild.scan = &aq.fields, selbuild.Scan + return selbuild +} + +func (aq *AttachmentQuery) prepareQuery(ctx context.Context) error { + for _, f := range aq.fields { + if !attachment.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if aq.path != nil { + prev, err := aq.path(ctx) + if err != nil { + return err + } + aq.sql = prev + } + return nil +} + +func (aq *AttachmentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Attachment, error) { + var ( + nodes = []*Attachment{} + withFKs = aq.withFKs + _spec = aq.querySpec() + loadedTypes = [2]bool{ + aq.withItem != nil, + aq.withDocument != nil, + } + ) + if aq.withItem != nil || aq.withDocument != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, attachment.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]interface{}, error) { + return (*Attachment).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []interface{}) error { + node := &Attachment{config: aq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, aq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := aq.withItem; query != nil { + if err := aq.loadItem(ctx, query, nodes, nil, + func(n *Attachment, e *Item) { n.Edges.Item = e }); err != nil { + return nil, err + } + } + if query := aq.withDocument; query != nil { + if err := aq.loadDocument(ctx, query, nodes, nil, + func(n *Attachment, e *Document) { n.Edges.Document = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (aq *AttachmentQuery) loadItem(ctx context.Context, query *ItemQuery, nodes []*Attachment, init func(*Attachment), assign func(*Attachment, *Item)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Attachment) + for i := range nodes { + if nodes[i].item_attachments == nil { + continue + } + fk := *nodes[i].item_attachments + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + query.Where(item.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "item_attachments" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (aq *AttachmentQuery) loadDocument(ctx context.Context, query *DocumentQuery, nodes []*Attachment, init func(*Attachment), assign func(*Attachment, *Document)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Attachment) + for i := range nodes { + if nodes[i].document_attachments == nil { + continue + } + fk := *nodes[i].document_attachments + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + query.Where(document.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "document_attachments" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (aq *AttachmentQuery) sqlCount(ctx context.Context) (int, error) { + _spec := aq.querySpec() + _spec.Node.Columns = aq.fields + if len(aq.fields) > 0 { + _spec.Unique = aq.unique != nil && *aq.unique + } + return sqlgraph.CountNodes(ctx, aq.driver, _spec) +} + +func (aq *AttachmentQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := aq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %w", err) + } + return n > 0, nil +} + +func (aq *AttachmentQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: attachment.Table, + Columns: attachment.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + From: aq.sql, + Unique: true, + } + if unique := aq.unique; unique != nil { + _spec.Unique = *unique + } + if fields := aq.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, attachment.FieldID) + for i := range fields { + if fields[i] != attachment.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := aq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := aq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := aq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := aq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (aq *AttachmentQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(aq.driver.Dialect()) + t1 := builder.Table(attachment.Table) + columns := aq.fields + if len(columns) == 0 { + columns = attachment.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if aq.sql != nil { + selector = aq.sql + selector.Select(selector.Columns(columns...)...) + } + if aq.unique != nil && *aq.unique { + selector.Distinct() + } + for _, p := range aq.predicates { + p(selector) + } + for _, p := range aq.order { + p(selector) + } + if offset := aq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := aq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// AttachmentGroupBy is the group-by builder for Attachment entities. +type AttachmentGroupBy struct { + config + selector + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (agb *AttachmentGroupBy) Aggregate(fns ...AggregateFunc) *AttachmentGroupBy { + agb.fns = append(agb.fns, fns...) + return agb +} + +// Scan applies the group-by query and scans the result into the given value. +func (agb *AttachmentGroupBy) Scan(ctx context.Context, v interface{}) error { + query, err := agb.path(ctx) + if err != nil { + return err + } + agb.sql = query + return agb.sqlScan(ctx, v) +} + +func (agb *AttachmentGroupBy) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range agb.fields { + if !attachment.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := agb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := agb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (agb *AttachmentGroupBy) sqlQuery() *sql.Selector { + selector := agb.sql.Select() + aggregation := make([]string, 0, len(agb.fns)) + for _, fn := range agb.fns { + aggregation = append(aggregation, fn(selector)) + } + // If no columns were selected in a custom aggregation function, the default + // selection is the fields used for "group-by", and the aggregation functions. + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(agb.fields)+len(agb.fns)) + for _, f := range agb.fields { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + return selector.GroupBy(selector.Columns(agb.fields...)...) +} + +// AttachmentSelect is the builder for selecting fields of Attachment entities. +type AttachmentSelect struct { + *AttachmentQuery + selector + // intermediate query (i.e. traversal path). + sql *sql.Selector +} + +// Scan applies the selector query and scans the result into the given value. +func (as *AttachmentSelect) Scan(ctx context.Context, v interface{}) error { + if err := as.prepareQuery(ctx); err != nil { + return err + } + as.sql = as.AttachmentQuery.sqlQuery(ctx) + return as.sqlScan(ctx, v) +} + +func (as *AttachmentSelect) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := as.sql.Query() + if err := as.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/attachment_update.go b/backend/ent/attachment_update.go new file mode 100644 index 0000000..53b5feb --- /dev/null +++ b/backend/ent/attachment_update.go @@ -0,0 +1,587 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/attachment" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/item" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// AttachmentUpdate is the builder for updating Attachment entities. +type AttachmentUpdate struct { + config + hooks []Hook + mutation *AttachmentMutation +} + +// Where appends a list predicates to the AttachmentUpdate builder. +func (au *AttachmentUpdate) Where(ps ...predicate.Attachment) *AttachmentUpdate { + au.mutation.Where(ps...) + return au +} + +// SetUpdatedAt sets the "updated_at" field. +func (au *AttachmentUpdate) SetUpdatedAt(t time.Time) *AttachmentUpdate { + au.mutation.SetUpdatedAt(t) + return au +} + +// SetType sets the "type" field. +func (au *AttachmentUpdate) SetType(a attachment.Type) *AttachmentUpdate { + au.mutation.SetType(a) + return au +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (au *AttachmentUpdate) SetNillableType(a *attachment.Type) *AttachmentUpdate { + if a != nil { + au.SetType(*a) + } + return au +} + +// SetItemID sets the "item" edge to the Item entity by ID. +func (au *AttachmentUpdate) SetItemID(id uuid.UUID) *AttachmentUpdate { + au.mutation.SetItemID(id) + return au +} + +// SetItem sets the "item" edge to the Item entity. +func (au *AttachmentUpdate) SetItem(i *Item) *AttachmentUpdate { + return au.SetItemID(i.ID) +} + +// SetDocumentID sets the "document" edge to the Document entity by ID. +func (au *AttachmentUpdate) SetDocumentID(id uuid.UUID) *AttachmentUpdate { + au.mutation.SetDocumentID(id) + return au +} + +// SetDocument sets the "document" edge to the Document entity. +func (au *AttachmentUpdate) SetDocument(d *Document) *AttachmentUpdate { + return au.SetDocumentID(d.ID) +} + +// Mutation returns the AttachmentMutation object of the builder. +func (au *AttachmentUpdate) Mutation() *AttachmentMutation { + return au.mutation +} + +// ClearItem clears the "item" edge to the Item entity. +func (au *AttachmentUpdate) ClearItem() *AttachmentUpdate { + au.mutation.ClearItem() + return au +} + +// ClearDocument clears the "document" edge to the Document entity. +func (au *AttachmentUpdate) ClearDocument() *AttachmentUpdate { + au.mutation.ClearDocument() + return au +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (au *AttachmentUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + au.defaults() + if len(au.hooks) == 0 { + if err = au.check(); err != nil { + return 0, err + } + affected, err = au.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AttachmentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = au.check(); err != nil { + return 0, err + } + au.mutation = mutation + affected, err = au.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(au.hooks) - 1; i >= 0; i-- { + if au.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = au.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, au.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (au *AttachmentUpdate) SaveX(ctx context.Context) int { + affected, err := au.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (au *AttachmentUpdate) Exec(ctx context.Context) error { + _, err := au.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (au *AttachmentUpdate) ExecX(ctx context.Context) { + if err := au.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (au *AttachmentUpdate) defaults() { + if _, ok := au.mutation.UpdatedAt(); !ok { + v := attachment.UpdateDefaultUpdatedAt() + au.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (au *AttachmentUpdate) check() error { + if v, ok := au.mutation.GetType(); ok { + if err := attachment.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Attachment.type": %w`, err)} + } + } + if _, ok := au.mutation.ItemID(); au.mutation.ItemCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Attachment.item"`) + } + if _, ok := au.mutation.DocumentID(); au.mutation.DocumentCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Attachment.document"`) + } + return nil +} + +func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: attachment.Table, + Columns: attachment.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + if ps := au.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := au.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: attachment.FieldUpdatedAt, + }) + } + if value, ok := au.mutation.GetType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeEnum, + Value: value, + Column: attachment.FieldType, + }) + } + if au.mutation.ItemCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: attachment.ItemTable, + Columns: []string{attachment.ItemColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: item.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.ItemIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: attachment.ItemTable, + Columns: []string{attachment.ItemColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: item.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if au.mutation.DocumentCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: attachment.DocumentTable, + Columns: []string{attachment.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.DocumentIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: attachment.DocumentTable, + Columns: []string{attachment.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, au.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{attachment.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + return n, nil +} + +// AttachmentUpdateOne is the builder for updating a single Attachment entity. +type AttachmentUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AttachmentMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (auo *AttachmentUpdateOne) SetUpdatedAt(t time.Time) *AttachmentUpdateOne { + auo.mutation.SetUpdatedAt(t) + return auo +} + +// SetType sets the "type" field. +func (auo *AttachmentUpdateOne) SetType(a attachment.Type) *AttachmentUpdateOne { + auo.mutation.SetType(a) + return auo +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (auo *AttachmentUpdateOne) SetNillableType(a *attachment.Type) *AttachmentUpdateOne { + if a != nil { + auo.SetType(*a) + } + return auo +} + +// SetItemID sets the "item" edge to the Item entity by ID. +func (auo *AttachmentUpdateOne) SetItemID(id uuid.UUID) *AttachmentUpdateOne { + auo.mutation.SetItemID(id) + return auo +} + +// SetItem sets the "item" edge to the Item entity. +func (auo *AttachmentUpdateOne) SetItem(i *Item) *AttachmentUpdateOne { + return auo.SetItemID(i.ID) +} + +// SetDocumentID sets the "document" edge to the Document entity by ID. +func (auo *AttachmentUpdateOne) SetDocumentID(id uuid.UUID) *AttachmentUpdateOne { + auo.mutation.SetDocumentID(id) + return auo +} + +// SetDocument sets the "document" edge to the Document entity. +func (auo *AttachmentUpdateOne) SetDocument(d *Document) *AttachmentUpdateOne { + return auo.SetDocumentID(d.ID) +} + +// Mutation returns the AttachmentMutation object of the builder. +func (auo *AttachmentUpdateOne) Mutation() *AttachmentMutation { + return auo.mutation +} + +// ClearItem clears the "item" edge to the Item entity. +func (auo *AttachmentUpdateOne) ClearItem() *AttachmentUpdateOne { + auo.mutation.ClearItem() + return auo +} + +// ClearDocument clears the "document" edge to the Document entity. +func (auo *AttachmentUpdateOne) ClearDocument() *AttachmentUpdateOne { + auo.mutation.ClearDocument() + return auo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (auo *AttachmentUpdateOne) Select(field string, fields ...string) *AttachmentUpdateOne { + auo.fields = append([]string{field}, fields...) + return auo +} + +// Save executes the query and returns the updated Attachment entity. +func (auo *AttachmentUpdateOne) Save(ctx context.Context) (*Attachment, error) { + var ( + err error + node *Attachment + ) + auo.defaults() + if len(auo.hooks) == 0 { + if err = auo.check(); err != nil { + return nil, err + } + node, err = auo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AttachmentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = auo.check(); err != nil { + return nil, err + } + auo.mutation = mutation + node, err = auo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(auo.hooks) - 1; i >= 0; i-- { + if auo.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = auo.hooks[i](mut) + } + v, err := mut.Mutate(ctx, auo.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Attachment) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from AttachmentMutation", v) + } + node = nv + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (auo *AttachmentUpdateOne) SaveX(ctx context.Context) *Attachment { + node, err := auo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (auo *AttachmentUpdateOne) Exec(ctx context.Context) error { + _, err := auo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (auo *AttachmentUpdateOne) ExecX(ctx context.Context) { + if err := auo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (auo *AttachmentUpdateOne) defaults() { + if _, ok := auo.mutation.UpdatedAt(); !ok { + v := attachment.UpdateDefaultUpdatedAt() + auo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (auo *AttachmentUpdateOne) check() error { + if v, ok := auo.mutation.GetType(); ok { + if err := attachment.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Attachment.type": %w`, err)} + } + } + if _, ok := auo.mutation.ItemID(); auo.mutation.ItemCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Attachment.item"`) + } + if _, ok := auo.mutation.DocumentID(); auo.mutation.DocumentCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Attachment.document"`) + } + return nil +} + +func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: attachment.Table, + Columns: attachment.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + id, ok := auo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Attachment.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := auo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, attachment.FieldID) + for _, f := range fields { + if !attachment.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != attachment.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := auo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := auo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: attachment.FieldUpdatedAt, + }) + } + if value, ok := auo.mutation.GetType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeEnum, + Value: value, + Column: attachment.FieldType, + }) + } + if auo.mutation.ItemCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: attachment.ItemTable, + Columns: []string{attachment.ItemColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: item.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.ItemIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: attachment.ItemTable, + Columns: []string{attachment.ItemColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: item.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if auo.mutation.DocumentCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: attachment.DocumentTable, + Columns: []string{attachment.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.DocumentIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: attachment.DocumentTable, + Columns: []string{attachment.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Attachment{config: auo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, auo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{attachment.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} diff --git a/backend/ent/client.go b/backend/ent/client.go index 5c55b71..fd39792 100644 --- a/backend/ent/client.go +++ b/backend/ent/client.go @@ -11,7 +11,10 @@ import ( "github.com/google/uuid" "github.com/hay-kot/content/backend/ent/migrate" + "github.com/hay-kot/content/backend/ent/attachment" "github.com/hay-kot/content/backend/ent/authtokens" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/itemfield" @@ -29,8 +32,14 @@ type Client struct { config // Schema is the client for creating, migrating and dropping schema. Schema *migrate.Schema + // Attachment is the client for interacting with the Attachment builders. + Attachment *AttachmentClient // AuthTokens is the client for interacting with the AuthTokens builders. AuthTokens *AuthTokensClient + // Document is the client for interacting with the Document builders. + Document *DocumentClient + // DocumentToken is the client for interacting with the DocumentToken builders. + DocumentToken *DocumentTokenClient // Group is the client for interacting with the Group builders. Group *GroupClient // Item is the client for interacting with the Item builders. @@ -56,7 +65,10 @@ func NewClient(opts ...Option) *Client { func (c *Client) init() { c.Schema = migrate.NewSchema(c.driver) + c.Attachment = NewAttachmentClient(c.config) c.AuthTokens = NewAuthTokensClient(c.config) + c.Document = NewDocumentClient(c.config) + c.DocumentToken = NewDocumentTokenClient(c.config) c.Group = NewGroupClient(c.config) c.Item = NewItemClient(c.config) c.ItemField = NewItemFieldClient(c.config) @@ -94,15 +106,18 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { cfg := c.config cfg.driver = tx return &Tx{ - ctx: ctx, - config: cfg, - AuthTokens: NewAuthTokensClient(cfg), - Group: NewGroupClient(cfg), - Item: NewItemClient(cfg), - ItemField: NewItemFieldClient(cfg), - Label: NewLabelClient(cfg), - Location: NewLocationClient(cfg), - User: NewUserClient(cfg), + ctx: ctx, + config: cfg, + Attachment: NewAttachmentClient(cfg), + AuthTokens: NewAuthTokensClient(cfg), + Document: NewDocumentClient(cfg), + DocumentToken: NewDocumentTokenClient(cfg), + Group: NewGroupClient(cfg), + Item: NewItemClient(cfg), + ItemField: NewItemFieldClient(cfg), + Label: NewLabelClient(cfg), + Location: NewLocationClient(cfg), + User: NewUserClient(cfg), }, nil } @@ -120,22 +135,25 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) cfg := c.config cfg.driver = &txDriver{tx: tx, drv: c.driver} return &Tx{ - ctx: ctx, - config: cfg, - AuthTokens: NewAuthTokensClient(cfg), - Group: NewGroupClient(cfg), - Item: NewItemClient(cfg), - ItemField: NewItemFieldClient(cfg), - Label: NewLabelClient(cfg), - Location: NewLocationClient(cfg), - User: NewUserClient(cfg), + ctx: ctx, + config: cfg, + Attachment: NewAttachmentClient(cfg), + AuthTokens: NewAuthTokensClient(cfg), + Document: NewDocumentClient(cfg), + DocumentToken: NewDocumentTokenClient(cfg), + Group: NewGroupClient(cfg), + Item: NewItemClient(cfg), + ItemField: NewItemFieldClient(cfg), + Label: NewLabelClient(cfg), + Location: NewLocationClient(cfg), + User: NewUserClient(cfg), }, nil } // Debug returns a new debug-client. It's used to get verbose logging on specific operations. // // client.Debug(). -// AuthTokens. +// Attachment. // Query(). // Count(ctx) func (c *Client) Debug() *Client { @@ -157,7 +175,10 @@ func (c *Client) Close() error { // Use adds the mutation hooks to all the entity clients. // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { + c.Attachment.Use(hooks...) c.AuthTokens.Use(hooks...) + c.Document.Use(hooks...) + c.DocumentToken.Use(hooks...) c.Group.Use(hooks...) c.Item.Use(hooks...) c.ItemField.Use(hooks...) @@ -166,6 +187,128 @@ func (c *Client) Use(hooks ...Hook) { c.User.Use(hooks...) } +// AttachmentClient is a client for the Attachment schema. +type AttachmentClient struct { + config +} + +// NewAttachmentClient returns a client for the Attachment from the given config. +func NewAttachmentClient(c config) *AttachmentClient { + return &AttachmentClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `attachment.Hooks(f(g(h())))`. +func (c *AttachmentClient) Use(hooks ...Hook) { + c.hooks.Attachment = append(c.hooks.Attachment, hooks...) +} + +// Create returns a builder for creating a Attachment entity. +func (c *AttachmentClient) Create() *AttachmentCreate { + mutation := newAttachmentMutation(c.config, OpCreate) + return &AttachmentCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Attachment entities. +func (c *AttachmentClient) CreateBulk(builders ...*AttachmentCreate) *AttachmentCreateBulk { + return &AttachmentCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Attachment. +func (c *AttachmentClient) Update() *AttachmentUpdate { + mutation := newAttachmentMutation(c.config, OpUpdate) + return &AttachmentUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AttachmentClient) UpdateOne(a *Attachment) *AttachmentUpdateOne { + mutation := newAttachmentMutation(c.config, OpUpdateOne, withAttachment(a)) + return &AttachmentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AttachmentClient) UpdateOneID(id uuid.UUID) *AttachmentUpdateOne { + mutation := newAttachmentMutation(c.config, OpUpdateOne, withAttachmentID(id)) + return &AttachmentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Attachment. +func (c *AttachmentClient) Delete() *AttachmentDelete { + mutation := newAttachmentMutation(c.config, OpDelete) + return &AttachmentDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AttachmentClient) DeleteOne(a *Attachment) *AttachmentDeleteOne { + return c.DeleteOneID(a.ID) +} + +// DeleteOne returns a builder for deleting the given entity by its id. +func (c *AttachmentClient) DeleteOneID(id uuid.UUID) *AttachmentDeleteOne { + builder := c.Delete().Where(attachment.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AttachmentDeleteOne{builder} +} + +// Query returns a query builder for Attachment. +func (c *AttachmentClient) Query() *AttachmentQuery { + return &AttachmentQuery{ + config: c.config, + } +} + +// Get returns a Attachment entity by its id. +func (c *AttachmentClient) Get(ctx context.Context, id uuid.UUID) (*Attachment, error) { + return c.Query().Where(attachment.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AttachmentClient) GetX(ctx context.Context, id uuid.UUID) *Attachment { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryItem queries the item edge of a Attachment. +func (c *AttachmentClient) QueryItem(a *Attachment) *ItemQuery { + query := &ItemQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := a.ID + step := sqlgraph.NewStep( + sqlgraph.From(attachment.Table, attachment.FieldID, id), + sqlgraph.To(item.Table, item.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, attachment.ItemTable, attachment.ItemColumn), + ) + fromV = sqlgraph.Neighbors(a.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryDocument queries the document edge of a Attachment. +func (c *AttachmentClient) QueryDocument(a *Attachment) *DocumentQuery { + query := &DocumentQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := a.ID + step := sqlgraph.NewStep( + sqlgraph.From(attachment.Table, attachment.FieldID, id), + sqlgraph.To(document.Table, document.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, attachment.DocumentTable, attachment.DocumentColumn), + ) + fromV = sqlgraph.Neighbors(a.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *AttachmentClient) Hooks() []Hook { + return c.hooks.Attachment +} + // AuthTokensClient is a client for the AuthTokens schema. type AuthTokensClient struct { config @@ -272,6 +415,250 @@ func (c *AuthTokensClient) Hooks() []Hook { return c.hooks.AuthTokens } +// DocumentClient is a client for the Document schema. +type DocumentClient struct { + config +} + +// NewDocumentClient returns a client for the Document from the given config. +func NewDocumentClient(c config) *DocumentClient { + return &DocumentClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `document.Hooks(f(g(h())))`. +func (c *DocumentClient) Use(hooks ...Hook) { + c.hooks.Document = append(c.hooks.Document, hooks...) +} + +// Create returns a builder for creating a Document entity. +func (c *DocumentClient) Create() *DocumentCreate { + mutation := newDocumentMutation(c.config, OpCreate) + return &DocumentCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Document entities. +func (c *DocumentClient) CreateBulk(builders ...*DocumentCreate) *DocumentCreateBulk { + return &DocumentCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Document. +func (c *DocumentClient) Update() *DocumentUpdate { + mutation := newDocumentMutation(c.config, OpUpdate) + return &DocumentUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *DocumentClient) UpdateOne(d *Document) *DocumentUpdateOne { + mutation := newDocumentMutation(c.config, OpUpdateOne, withDocument(d)) + return &DocumentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *DocumentClient) UpdateOneID(id uuid.UUID) *DocumentUpdateOne { + mutation := newDocumentMutation(c.config, OpUpdateOne, withDocumentID(id)) + return &DocumentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Document. +func (c *DocumentClient) Delete() *DocumentDelete { + mutation := newDocumentMutation(c.config, OpDelete) + return &DocumentDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *DocumentClient) DeleteOne(d *Document) *DocumentDeleteOne { + return c.DeleteOneID(d.ID) +} + +// DeleteOne returns a builder for deleting the given entity by its id. +func (c *DocumentClient) DeleteOneID(id uuid.UUID) *DocumentDeleteOne { + builder := c.Delete().Where(document.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &DocumentDeleteOne{builder} +} + +// Query returns a query builder for Document. +func (c *DocumentClient) Query() *DocumentQuery { + return &DocumentQuery{ + config: c.config, + } +} + +// Get returns a Document entity by its id. +func (c *DocumentClient) Get(ctx context.Context, id uuid.UUID) (*Document, error) { + return c.Query().Where(document.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *DocumentClient) GetX(ctx context.Context, id uuid.UUID) *Document { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryGroup queries the group edge of a Document. +func (c *DocumentClient) QueryGroup(d *Document) *GroupQuery { + query := &GroupQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := d.ID + step := sqlgraph.NewStep( + sqlgraph.From(document.Table, document.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, document.GroupTable, document.GroupColumn), + ) + fromV = sqlgraph.Neighbors(d.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryDocumentTokens queries the document_tokens edge of a Document. +func (c *DocumentClient) QueryDocumentTokens(d *Document) *DocumentTokenQuery { + query := &DocumentTokenQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := d.ID + step := sqlgraph.NewStep( + sqlgraph.From(document.Table, document.FieldID, id), + sqlgraph.To(documenttoken.Table, documenttoken.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, document.DocumentTokensTable, document.DocumentTokensColumn), + ) + fromV = sqlgraph.Neighbors(d.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAttachments queries the attachments edge of a Document. +func (c *DocumentClient) QueryAttachments(d *Document) *AttachmentQuery { + query := &AttachmentQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := d.ID + step := sqlgraph.NewStep( + sqlgraph.From(document.Table, document.FieldID, id), + sqlgraph.To(attachment.Table, attachment.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, document.AttachmentsTable, document.AttachmentsColumn), + ) + fromV = sqlgraph.Neighbors(d.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *DocumentClient) Hooks() []Hook { + return c.hooks.Document +} + +// DocumentTokenClient is a client for the DocumentToken schema. +type DocumentTokenClient struct { + config +} + +// NewDocumentTokenClient returns a client for the DocumentToken from the given config. +func NewDocumentTokenClient(c config) *DocumentTokenClient { + return &DocumentTokenClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `documenttoken.Hooks(f(g(h())))`. +func (c *DocumentTokenClient) Use(hooks ...Hook) { + c.hooks.DocumentToken = append(c.hooks.DocumentToken, hooks...) +} + +// Create returns a builder for creating a DocumentToken entity. +func (c *DocumentTokenClient) Create() *DocumentTokenCreate { + mutation := newDocumentTokenMutation(c.config, OpCreate) + return &DocumentTokenCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of DocumentToken entities. +func (c *DocumentTokenClient) CreateBulk(builders ...*DocumentTokenCreate) *DocumentTokenCreateBulk { + return &DocumentTokenCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for DocumentToken. +func (c *DocumentTokenClient) Update() *DocumentTokenUpdate { + mutation := newDocumentTokenMutation(c.config, OpUpdate) + return &DocumentTokenUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *DocumentTokenClient) UpdateOne(dt *DocumentToken) *DocumentTokenUpdateOne { + mutation := newDocumentTokenMutation(c.config, OpUpdateOne, withDocumentToken(dt)) + return &DocumentTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *DocumentTokenClient) UpdateOneID(id uuid.UUID) *DocumentTokenUpdateOne { + mutation := newDocumentTokenMutation(c.config, OpUpdateOne, withDocumentTokenID(id)) + return &DocumentTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for DocumentToken. +func (c *DocumentTokenClient) Delete() *DocumentTokenDelete { + mutation := newDocumentTokenMutation(c.config, OpDelete) + return &DocumentTokenDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *DocumentTokenClient) DeleteOne(dt *DocumentToken) *DocumentTokenDeleteOne { + return c.DeleteOneID(dt.ID) +} + +// DeleteOne returns a builder for deleting the given entity by its id. +func (c *DocumentTokenClient) DeleteOneID(id uuid.UUID) *DocumentTokenDeleteOne { + builder := c.Delete().Where(documenttoken.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &DocumentTokenDeleteOne{builder} +} + +// Query returns a query builder for DocumentToken. +func (c *DocumentTokenClient) Query() *DocumentTokenQuery { + return &DocumentTokenQuery{ + config: c.config, + } +} + +// Get returns a DocumentToken entity by its id. +func (c *DocumentTokenClient) Get(ctx context.Context, id uuid.UUID) (*DocumentToken, error) { + return c.Query().Where(documenttoken.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *DocumentTokenClient) GetX(ctx context.Context, id uuid.UUID) *DocumentToken { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryDocument queries the document edge of a DocumentToken. +func (c *DocumentTokenClient) QueryDocument(dt *DocumentToken) *DocumentQuery { + query := &DocumentQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := dt.ID + step := sqlgraph.NewStep( + sqlgraph.From(documenttoken.Table, documenttoken.FieldID, id), + sqlgraph.To(document.Table, document.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, documenttoken.DocumentTable, documenttoken.DocumentColumn), + ) + fromV = sqlgraph.Neighbors(dt.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *DocumentTokenClient) Hooks() []Hook { + return c.hooks.DocumentToken +} + // GroupClient is a client for the Group schema. type GroupClient struct { config @@ -421,6 +808,22 @@ func (c *GroupClient) QueryLabels(gr *Group) *LabelQuery { return query } +// QueryDocuments queries the documents edge of a Group. +func (c *GroupClient) QueryDocuments(gr *Group) *DocumentQuery { + query := &DocumentQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := gr.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(document.Table, document.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.DocumentsTable, group.DocumentsColumn), + ) + fromV = sqlgraph.Neighbors(gr.driver.Dialect(), step) + return fromV, nil + } + return query +} + // Hooks returns the client hooks. func (c *GroupClient) Hooks() []Hook { return c.hooks.Group @@ -575,6 +978,22 @@ func (c *ItemClient) QueryLabel(i *Item) *LabelQuery { return query } +// QueryAttachments queries the attachments edge of a Item. +func (c *ItemClient) QueryAttachments(i *Item) *AttachmentQuery { + query := &AttachmentQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := i.ID + step := sqlgraph.NewStep( + sqlgraph.From(item.Table, item.FieldID, id), + sqlgraph.To(attachment.Table, attachment.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, item.AttachmentsTable, item.AttachmentsColumn), + ) + fromV = sqlgraph.Neighbors(i.driver.Dialect(), step) + return fromV, nil + } + return query +} + // Hooks returns the client hooks. func (c *ItemClient) Hooks() []Hook { return c.hooks.Item diff --git a/backend/ent/config.go b/backend/ent/config.go index b0dbf9f..8ae9829 100644 --- a/backend/ent/config.go +++ b/backend/ent/config.go @@ -24,13 +24,16 @@ type config struct { // hooks per client, for fast access. type hooks struct { - AuthTokens []ent.Hook - Group []ent.Hook - Item []ent.Hook - ItemField []ent.Hook - Label []ent.Hook - Location []ent.Hook - User []ent.Hook + Attachment []ent.Hook + AuthTokens []ent.Hook + Document []ent.Hook + DocumentToken []ent.Hook + Group []ent.Hook + Item []ent.Hook + ItemField []ent.Hook + Label []ent.Hook + Location []ent.Hook + User []ent.Hook } // Options applies the options on the config object. diff --git a/backend/ent/document.go b/backend/ent/document.go new file mode 100644 index 0000000..14a33ca --- /dev/null +++ b/backend/ent/document.go @@ -0,0 +1,209 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/group" +) + +// Document is the model entity for the Document schema. +type Document struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Title holds the value of the "title" field. + Title string `json:"title,omitempty"` + // Path holds the value of the "path" field. + Path string `json:"path,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the DocumentQuery when eager-loading is set. + Edges DocumentEdges `json:"edges"` + group_documents *uuid.UUID +} + +// DocumentEdges holds the relations/edges for other nodes in the graph. +type DocumentEdges struct { + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // DocumentTokens holds the value of the document_tokens edge. + DocumentTokens []*DocumentToken `json:"document_tokens,omitempty"` + // Attachments holds the value of the attachments edge. + Attachments []*Attachment `json:"attachments,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [3]bool +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e DocumentEdges) GroupOrErr() (*Group, error) { + if e.loadedTypes[0] { + if e.Group == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: group.Label} + } + return e.Group, nil + } + return nil, &NotLoadedError{edge: "group"} +} + +// DocumentTokensOrErr returns the DocumentTokens value or an error if the edge +// was not loaded in eager-loading. +func (e DocumentEdges) DocumentTokensOrErr() ([]*DocumentToken, error) { + if e.loadedTypes[1] { + return e.DocumentTokens, nil + } + return nil, &NotLoadedError{edge: "document_tokens"} +} + +// AttachmentsOrErr returns the Attachments value or an error if the edge +// was not loaded in eager-loading. +func (e DocumentEdges) AttachmentsOrErr() ([]*Attachment, error) { + if e.loadedTypes[2] { + return e.Attachments, nil + } + return nil, &NotLoadedError{edge: "attachments"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Document) scanValues(columns []string) ([]interface{}, error) { + values := make([]interface{}, len(columns)) + for i := range columns { + switch columns[i] { + case document.FieldTitle, document.FieldPath: + values[i] = new(sql.NullString) + case document.FieldCreatedAt, document.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case document.FieldID: + values[i] = new(uuid.UUID) + case document.ForeignKeys[0]: // group_documents + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + default: + return nil, fmt.Errorf("unexpected column %q for type Document", columns[i]) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Document fields. +func (d *Document) assignValues(columns []string, values []interface{}) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case document.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + d.ID = *value + } + case document.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + d.CreatedAt = value.Time + } + case document.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + d.UpdatedAt = value.Time + } + case document.FieldTitle: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field title", values[i]) + } else if value.Valid { + d.Title = value.String + } + case document.FieldPath: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field path", values[i]) + } else if value.Valid { + d.Path = value.String + } + case document.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field group_documents", values[i]) + } else if value.Valid { + d.group_documents = new(uuid.UUID) + *d.group_documents = *value.S.(*uuid.UUID) + } + } + } + return nil +} + +// QueryGroup queries the "group" edge of the Document entity. +func (d *Document) QueryGroup() *GroupQuery { + return (&DocumentClient{config: d.config}).QueryGroup(d) +} + +// QueryDocumentTokens queries the "document_tokens" edge of the Document entity. +func (d *Document) QueryDocumentTokens() *DocumentTokenQuery { + return (&DocumentClient{config: d.config}).QueryDocumentTokens(d) +} + +// QueryAttachments queries the "attachments" edge of the Document entity. +func (d *Document) QueryAttachments() *AttachmentQuery { + return (&DocumentClient{config: d.config}).QueryAttachments(d) +} + +// Update returns a builder for updating this Document. +// Note that you need to call Document.Unwrap() before calling this method if this Document +// was returned from a transaction, and the transaction was committed or rolled back. +func (d *Document) Update() *DocumentUpdateOne { + return (&DocumentClient{config: d.config}).UpdateOne(d) +} + +// Unwrap unwraps the Document entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (d *Document) Unwrap() *Document { + _tx, ok := d.config.driver.(*txDriver) + if !ok { + panic("ent: Document is not a transactional entity") + } + d.config.driver = _tx.drv + return d +} + +// String implements the fmt.Stringer. +func (d *Document) String() string { + var builder strings.Builder + builder.WriteString("Document(") + builder.WriteString(fmt.Sprintf("id=%v, ", d.ID)) + builder.WriteString("created_at=") + builder.WriteString(d.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(d.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("title=") + builder.WriteString(d.Title) + builder.WriteString(", ") + builder.WriteString("path=") + builder.WriteString(d.Path) + builder.WriteByte(')') + return builder.String() +} + +// Documents is a parsable slice of Document. +type Documents []*Document + +func (d Documents) config(cfg config) { + for _i := range d { + d[_i].config = cfg + } +} diff --git a/backend/ent/document/document.go b/backend/ent/document/document.go new file mode 100644 index 0000000..bfc3881 --- /dev/null +++ b/backend/ent/document/document.go @@ -0,0 +1,98 @@ +// Code generated by ent, DO NOT EDIT. + +package document + +import ( + "time" + + "github.com/google/uuid" +) + +const ( + // Label holds the string label denoting the document type in the database. + Label = "document" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldTitle holds the string denoting the title field in the database. + FieldTitle = "title" + // FieldPath holds the string denoting the path field in the database. + FieldPath = "path" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // EdgeDocumentTokens holds the string denoting the document_tokens edge name in mutations. + EdgeDocumentTokens = "document_tokens" + // EdgeAttachments holds the string denoting the attachments edge name in mutations. + EdgeAttachments = "attachments" + // Table holds the table name of the document in the database. + Table = "documents" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "documents" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_documents" + // DocumentTokensTable is the table that holds the document_tokens relation/edge. + DocumentTokensTable = "document_tokens" + // DocumentTokensInverseTable is the table name for the DocumentToken entity. + // It exists in this package in order to avoid circular dependency with the "documenttoken" package. + DocumentTokensInverseTable = "document_tokens" + // DocumentTokensColumn is the table column denoting the document_tokens relation/edge. + DocumentTokensColumn = "document_document_tokens" + // AttachmentsTable is the table that holds the attachments relation/edge. + AttachmentsTable = "attachments" + // AttachmentsInverseTable is the table name for the Attachment entity. + // It exists in this package in order to avoid circular dependency with the "attachment" package. + AttachmentsInverseTable = "attachments" + // AttachmentsColumn is the table column denoting the attachments relation/edge. + AttachmentsColumn = "document_attachments" +) + +// Columns holds all SQL columns for document fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldTitle, + FieldPath, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "documents" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "group_documents", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // TitleValidator is a validator for the "title" field. It is called by the builders before save. + TitleValidator func(string) error + // PathValidator is a validator for the "path" field. It is called by the builders before save. + PathValidator func(string) error + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) diff --git a/backend/ent/document/where.go b/backend/ent/document/where.go new file mode 100644 index 0000000..f859293 --- /dev/null +++ b/backend/ent/document/where.go @@ -0,0 +1,553 @@ +// Code generated by ent, DO NOT EDIT. + +package document + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Title applies equality check predicate on the "title" field. It's identical to TitleEQ. +func Title(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldTitle), v)) + }) +} + +// Path applies equality check predicate on the "path" field. It's identical to PathEQ. +func Path(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldPath), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// TitleEQ applies the EQ predicate on the "title" field. +func TitleEQ(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldTitle), v)) + }) +} + +// TitleNEQ applies the NEQ predicate on the "title" field. +func TitleNEQ(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldTitle), v)) + }) +} + +// TitleIn applies the In predicate on the "title" field. +func TitleIn(vs ...string) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldTitle), v...)) + }) +} + +// TitleNotIn applies the NotIn predicate on the "title" field. +func TitleNotIn(vs ...string) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldTitle), v...)) + }) +} + +// TitleGT applies the GT predicate on the "title" field. +func TitleGT(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldTitle), v)) + }) +} + +// TitleGTE applies the GTE predicate on the "title" field. +func TitleGTE(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldTitle), v)) + }) +} + +// TitleLT applies the LT predicate on the "title" field. +func TitleLT(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldTitle), v)) + }) +} + +// TitleLTE applies the LTE predicate on the "title" field. +func TitleLTE(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldTitle), v)) + }) +} + +// TitleContains applies the Contains predicate on the "title" field. +func TitleContains(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldTitle), v)) + }) +} + +// TitleHasPrefix applies the HasPrefix predicate on the "title" field. +func TitleHasPrefix(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldTitle), v)) + }) +} + +// TitleHasSuffix applies the HasSuffix predicate on the "title" field. +func TitleHasSuffix(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldTitle), v)) + }) +} + +// TitleEqualFold applies the EqualFold predicate on the "title" field. +func TitleEqualFold(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldTitle), v)) + }) +} + +// TitleContainsFold applies the ContainsFold predicate on the "title" field. +func TitleContainsFold(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldTitle), v)) + }) +} + +// PathEQ applies the EQ predicate on the "path" field. +func PathEQ(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldPath), v)) + }) +} + +// PathNEQ applies the NEQ predicate on the "path" field. +func PathNEQ(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldPath), v)) + }) +} + +// PathIn applies the In predicate on the "path" field. +func PathIn(vs ...string) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldPath), v...)) + }) +} + +// PathNotIn applies the NotIn predicate on the "path" field. +func PathNotIn(vs ...string) predicate.Document { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldPath), v...)) + }) +} + +// PathGT applies the GT predicate on the "path" field. +func PathGT(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldPath), v)) + }) +} + +// PathGTE applies the GTE predicate on the "path" field. +func PathGTE(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldPath), v)) + }) +} + +// PathLT applies the LT predicate on the "path" field. +func PathLT(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldPath), v)) + }) +} + +// PathLTE applies the LTE predicate on the "path" field. +func PathLTE(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldPath), v)) + }) +} + +// PathContains applies the Contains predicate on the "path" field. +func PathContains(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldPath), v)) + }) +} + +// PathHasPrefix applies the HasPrefix predicate on the "path" field. +func PathHasPrefix(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldPath), v)) + }) +} + +// PathHasSuffix applies the HasSuffix predicate on the "path" field. +func PathHasSuffix(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldPath), v)) + }) +} + +// PathEqualFold applies the EqualFold predicate on the "path" field. +func PathEqualFold(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldPath), v)) + }) +} + +// PathContainsFold applies the ContainsFold predicate on the "path" field. +func PathContainsFold(v string) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldPath), v)) + }) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.Document { + return predicate.Document(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasDocumentTokens applies the HasEdge predicate on the "document_tokens" edge. +func HasDocumentTokens() predicate.Document { + return predicate.Document(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentTokensTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DocumentTokensTable, DocumentTokensColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDocumentTokensWith applies the HasEdge predicate on the "document_tokens" edge with a given conditions (other predicates). +func HasDocumentTokensWith(preds ...predicate.DocumentToken) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentTokensInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DocumentTokensTable, DocumentTokensColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAttachments applies the HasEdge predicate on the "attachments" edge. +func HasAttachments() predicate.Document { + return predicate.Document(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AttachmentsTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAttachmentsWith applies the HasEdge predicate on the "attachments" edge with a given conditions (other predicates). +func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AttachmentsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Document) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Document) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Document) predicate.Document { + return predicate.Document(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/backend/ent/document_create.go b/backend/ent/document_create.go new file mode 100644 index 0000000..ea98c1e --- /dev/null +++ b/backend/ent/document_create.go @@ -0,0 +1,447 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/attachment" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/ent/group" +) + +// DocumentCreate is the builder for creating a Document entity. +type DocumentCreate struct { + config + mutation *DocumentMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (dc *DocumentCreate) SetCreatedAt(t time.Time) *DocumentCreate { + dc.mutation.SetCreatedAt(t) + return dc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (dc *DocumentCreate) SetNillableCreatedAt(t *time.Time) *DocumentCreate { + if t != nil { + dc.SetCreatedAt(*t) + } + return dc +} + +// SetUpdatedAt sets the "updated_at" field. +func (dc *DocumentCreate) SetUpdatedAt(t time.Time) *DocumentCreate { + dc.mutation.SetUpdatedAt(t) + return dc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (dc *DocumentCreate) SetNillableUpdatedAt(t *time.Time) *DocumentCreate { + if t != nil { + dc.SetUpdatedAt(*t) + } + return dc +} + +// SetTitle sets the "title" field. +func (dc *DocumentCreate) SetTitle(s string) *DocumentCreate { + dc.mutation.SetTitle(s) + return dc +} + +// SetPath sets the "path" field. +func (dc *DocumentCreate) SetPath(s string) *DocumentCreate { + dc.mutation.SetPath(s) + return dc +} + +// SetID sets the "id" field. +func (dc *DocumentCreate) SetID(u uuid.UUID) *DocumentCreate { + dc.mutation.SetID(u) + return dc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (dc *DocumentCreate) SetNillableID(u *uuid.UUID) *DocumentCreate { + if u != nil { + dc.SetID(*u) + } + return dc +} + +// SetGroupID sets the "group" edge to the Group entity by ID. +func (dc *DocumentCreate) SetGroupID(id uuid.UUID) *DocumentCreate { + dc.mutation.SetGroupID(id) + return dc +} + +// SetGroup sets the "group" edge to the Group entity. +func (dc *DocumentCreate) SetGroup(g *Group) *DocumentCreate { + return dc.SetGroupID(g.ID) +} + +// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by IDs. +func (dc *DocumentCreate) AddDocumentTokenIDs(ids ...uuid.UUID) *DocumentCreate { + dc.mutation.AddDocumentTokenIDs(ids...) + return dc +} + +// AddDocumentTokens adds the "document_tokens" edges to the DocumentToken entity. +func (dc *DocumentCreate) AddDocumentTokens(d ...*DocumentToken) *DocumentCreate { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return dc.AddDocumentTokenIDs(ids...) +} + +// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs. +func (dc *DocumentCreate) AddAttachmentIDs(ids ...uuid.UUID) *DocumentCreate { + dc.mutation.AddAttachmentIDs(ids...) + return dc +} + +// AddAttachments adds the "attachments" edges to the Attachment entity. +func (dc *DocumentCreate) AddAttachments(a ...*Attachment) *DocumentCreate { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return dc.AddAttachmentIDs(ids...) +} + +// Mutation returns the DocumentMutation object of the builder. +func (dc *DocumentCreate) Mutation() *DocumentMutation { + return dc.mutation +} + +// Save creates the Document in the database. +func (dc *DocumentCreate) Save(ctx context.Context) (*Document, error) { + var ( + err error + node *Document + ) + dc.defaults() + if len(dc.hooks) == 0 { + if err = dc.check(); err != nil { + return nil, err + } + node, err = dc.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = dc.check(); err != nil { + return nil, err + } + dc.mutation = mutation + if node, err = dc.sqlSave(ctx); err != nil { + return nil, err + } + mutation.id = &node.ID + mutation.done = true + return node, err + }) + for i := len(dc.hooks) - 1; i >= 0; i-- { + if dc.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dc.hooks[i](mut) + } + v, err := mut.Mutate(ctx, dc.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Document) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from DocumentMutation", v) + } + node = nv + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (dc *DocumentCreate) SaveX(ctx context.Context) *Document { + v, err := dc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dc *DocumentCreate) Exec(ctx context.Context) error { + _, err := dc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dc *DocumentCreate) ExecX(ctx context.Context) { + if err := dc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dc *DocumentCreate) defaults() { + if _, ok := dc.mutation.CreatedAt(); !ok { + v := document.DefaultCreatedAt() + dc.mutation.SetCreatedAt(v) + } + if _, ok := dc.mutation.UpdatedAt(); !ok { + v := document.DefaultUpdatedAt() + dc.mutation.SetUpdatedAt(v) + } + if _, ok := dc.mutation.ID(); !ok { + v := document.DefaultID() + dc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dc *DocumentCreate) check() error { + if _, ok := dc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Document.created_at"`)} + } + if _, ok := dc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Document.updated_at"`)} + } + if _, ok := dc.mutation.Title(); !ok { + return &ValidationError{Name: "title", err: errors.New(`ent: missing required field "Document.title"`)} + } + if v, ok := dc.mutation.Title(); ok { + if err := document.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Document.title": %w`, err)} + } + } + if _, ok := dc.mutation.Path(); !ok { + return &ValidationError{Name: "path", err: errors.New(`ent: missing required field "Document.path"`)} + } + if v, ok := dc.mutation.Path(); ok { + if err := document.PathValidator(v); err != nil { + return &ValidationError{Name: "path", err: fmt.Errorf(`ent: validator failed for field "Document.path": %w`, err)} + } + } + if _, ok := dc.mutation.GroupID(); !ok { + return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "Document.group"`)} + } + return nil +} + +func (dc *DocumentCreate) sqlSave(ctx context.Context) (*Document, error) { + _node, _spec := dc.createSpec() + if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + return _node, nil +} + +func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) { + var ( + _node = &Document{config: dc.config} + _spec = &sqlgraph.CreateSpec{ + Table: document.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + } + ) + if id, ok := dc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := dc.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: document.FieldCreatedAt, + }) + _node.CreatedAt = value + } + if value, ok := dc.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: document.FieldUpdatedAt, + }) + _node.UpdatedAt = value + } + if value, ok := dc.mutation.Title(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: document.FieldTitle, + }) + _node.Title = value + } + if value, ok := dc.mutation.Path(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: document.FieldPath, + }) + _node.Path = value + } + if nodes := dc.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: document.GroupTable, + Columns: []string{document.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: group.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.group_documents = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := dc.mutation.DocumentTokensIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := dc.mutation.AttachmentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.AttachmentsTable, + Columns: []string{document.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// DocumentCreateBulk is the builder for creating many Document entities in bulk. +type DocumentCreateBulk struct { + config + builders []*DocumentCreate +} + +// Save creates the Document entities in the database. +func (dcb *DocumentCreateBulk) Save(ctx context.Context) ([]*Document, error) { + specs := make([]*sqlgraph.CreateSpec, len(dcb.builders)) + nodes := make([]*Document, len(dcb.builders)) + mutators := make([]Mutator, len(dcb.builders)) + for i := range dcb.builders { + func(i int, root context.Context) { + builder := dcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, dcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, dcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (dcb *DocumentCreateBulk) SaveX(ctx context.Context) []*Document { + v, err := dcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dcb *DocumentCreateBulk) Exec(ctx context.Context) error { + _, err := dcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcb *DocumentCreateBulk) ExecX(ctx context.Context) { + if err := dcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/document_delete.go b/backend/ent/document_delete.go new file mode 100644 index 0000000..2b5f19a --- /dev/null +++ b/backend/ent/document_delete.go @@ -0,0 +1,115 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// DocumentDelete is the builder for deleting a Document entity. +type DocumentDelete struct { + config + hooks []Hook + mutation *DocumentMutation +} + +// Where appends a list predicates to the DocumentDelete builder. +func (dd *DocumentDelete) Where(ps ...predicate.Document) *DocumentDelete { + dd.mutation.Where(ps...) + return dd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (dd *DocumentDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(dd.hooks) == 0 { + affected, err = dd.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + dd.mutation = mutation + affected, err = dd.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(dd.hooks) - 1; i >= 0; i-- { + if dd.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dd.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, dd.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dd *DocumentDelete) ExecX(ctx context.Context) int { + n, err := dd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (dd *DocumentDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: document.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + if ps := dd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, dd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return affected, err +} + +// DocumentDeleteOne is the builder for deleting a single Document entity. +type DocumentDeleteOne struct { + dd *DocumentDelete +} + +// Exec executes the deletion query. +func (ddo *DocumentDeleteOne) Exec(ctx context.Context) error { + n, err := ddo.dd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{document.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ddo *DocumentDeleteOne) ExecX(ctx context.Context) { + ddo.dd.ExecX(ctx) +} diff --git a/backend/ent/document_query.go b/backend/ent/document_query.go new file mode 100644 index 0000000..0739e02 --- /dev/null +++ b/backend/ent/document_query.go @@ -0,0 +1,762 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/attachment" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/ent/group" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// DocumentQuery is the builder for querying Document entities. +type DocumentQuery struct { + config + limit *int + offset *int + unique *bool + order []OrderFunc + fields []string + predicates []predicate.Document + withGroup *GroupQuery + withDocumentTokens *DocumentTokenQuery + withAttachments *AttachmentQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the DocumentQuery builder. +func (dq *DocumentQuery) Where(ps ...predicate.Document) *DocumentQuery { + dq.predicates = append(dq.predicates, ps...) + return dq +} + +// Limit adds a limit step to the query. +func (dq *DocumentQuery) Limit(limit int) *DocumentQuery { + dq.limit = &limit + return dq +} + +// Offset adds an offset step to the query. +func (dq *DocumentQuery) Offset(offset int) *DocumentQuery { + dq.offset = &offset + return dq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (dq *DocumentQuery) Unique(unique bool) *DocumentQuery { + dq.unique = &unique + return dq +} + +// Order adds an order step to the query. +func (dq *DocumentQuery) Order(o ...OrderFunc) *DocumentQuery { + dq.order = append(dq.order, o...) + return dq +} + +// QueryGroup chains the current query on the "group" edge. +func (dq *DocumentQuery) QueryGroup() *GroupQuery { + query := &GroupQuery{config: dq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(document.Table, document.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, document.GroupTable, document.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(dq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryDocumentTokens chains the current query on the "document_tokens" edge. +func (dq *DocumentQuery) QueryDocumentTokens() *DocumentTokenQuery { + query := &DocumentTokenQuery{config: dq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(document.Table, document.FieldID, selector), + sqlgraph.To(documenttoken.Table, documenttoken.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, document.DocumentTokensTable, document.DocumentTokensColumn), + ) + fromU = sqlgraph.SetNeighbors(dq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAttachments chains the current query on the "attachments" edge. +func (dq *DocumentQuery) QueryAttachments() *AttachmentQuery { + query := &AttachmentQuery{config: dq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(document.Table, document.FieldID, selector), + sqlgraph.To(attachment.Table, attachment.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, document.AttachmentsTable, document.AttachmentsColumn), + ) + fromU = sqlgraph.SetNeighbors(dq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Document entity from the query. +// Returns a *NotFoundError when no Document was found. +func (dq *DocumentQuery) First(ctx context.Context) (*Document, error) { + nodes, err := dq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{document.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (dq *DocumentQuery) FirstX(ctx context.Context) *Document { + node, err := dq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Document ID from the query. +// Returns a *NotFoundError when no Document ID was found. +func (dq *DocumentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = dq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{document.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (dq *DocumentQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := dq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Document entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Document entity is found. +// Returns a *NotFoundError when no Document entities are found. +func (dq *DocumentQuery) Only(ctx context.Context) (*Document, error) { + nodes, err := dq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{document.Label} + default: + return nil, &NotSingularError{document.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (dq *DocumentQuery) OnlyX(ctx context.Context) *Document { + node, err := dq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Document ID in the query. +// Returns a *NotSingularError when more than one Document ID is found. +// Returns a *NotFoundError when no entities are found. +func (dq *DocumentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = dq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{document.Label} + default: + err = &NotSingularError{document.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (dq *DocumentQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := dq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Documents. +func (dq *DocumentQuery) All(ctx context.Context) ([]*Document, error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + return dq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (dq *DocumentQuery) AllX(ctx context.Context) []*Document { + nodes, err := dq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Document IDs. +func (dq *DocumentQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { + var ids []uuid.UUID + if err := dq.Select(document.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (dq *DocumentQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := dq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (dq *DocumentQuery) Count(ctx context.Context) (int, error) { + if err := dq.prepareQuery(ctx); err != nil { + return 0, err + } + return dq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (dq *DocumentQuery) CountX(ctx context.Context) int { + count, err := dq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (dq *DocumentQuery) Exist(ctx context.Context) (bool, error) { + if err := dq.prepareQuery(ctx); err != nil { + return false, err + } + return dq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (dq *DocumentQuery) ExistX(ctx context.Context) bool { + exist, err := dq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the DocumentQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (dq *DocumentQuery) Clone() *DocumentQuery { + if dq == nil { + return nil + } + return &DocumentQuery{ + config: dq.config, + limit: dq.limit, + offset: dq.offset, + order: append([]OrderFunc{}, dq.order...), + predicates: append([]predicate.Document{}, dq.predicates...), + withGroup: dq.withGroup.Clone(), + withDocumentTokens: dq.withDocumentTokens.Clone(), + withAttachments: dq.withAttachments.Clone(), + // clone intermediate query. + sql: dq.sql.Clone(), + path: dq.path, + unique: dq.unique, + } +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (dq *DocumentQuery) WithGroup(opts ...func(*GroupQuery)) *DocumentQuery { + query := &GroupQuery{config: dq.config} + for _, opt := range opts { + opt(query) + } + dq.withGroup = query + return dq +} + +// WithDocumentTokens tells the query-builder to eager-load the nodes that are connected to +// the "document_tokens" edge. The optional arguments are used to configure the query builder of the edge. +func (dq *DocumentQuery) WithDocumentTokens(opts ...func(*DocumentTokenQuery)) *DocumentQuery { + query := &DocumentTokenQuery{config: dq.config} + for _, opt := range opts { + opt(query) + } + dq.withDocumentTokens = query + return dq +} + +// WithAttachments tells the query-builder to eager-load the nodes that are connected to +// the "attachments" edge. The optional arguments are used to configure the query builder of the edge. +func (dq *DocumentQuery) WithAttachments(opts ...func(*AttachmentQuery)) *DocumentQuery { + query := &AttachmentQuery{config: dq.config} + for _, opt := range opts { + opt(query) + } + dq.withAttachments = query + return dq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Document.Query(). +// GroupBy(document.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (dq *DocumentQuery) GroupBy(field string, fields ...string) *DocumentGroupBy { + grbuild := &DocumentGroupBy{config: dq.config} + grbuild.fields = append([]string{field}, fields...) + grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + return dq.sqlQuery(ctx), nil + } + grbuild.label = document.Label + grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Document.Query(). +// Select(document.FieldCreatedAt). +// Scan(ctx, &v) +func (dq *DocumentQuery) Select(fields ...string) *DocumentSelect { + dq.fields = append(dq.fields, fields...) + selbuild := &DocumentSelect{DocumentQuery: dq} + selbuild.label = document.Label + selbuild.flds, selbuild.scan = &dq.fields, selbuild.Scan + return selbuild +} + +func (dq *DocumentQuery) prepareQuery(ctx context.Context) error { + for _, f := range dq.fields { + if !document.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if dq.path != nil { + prev, err := dq.path(ctx) + if err != nil { + return err + } + dq.sql = prev + } + return nil +} + +func (dq *DocumentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Document, error) { + var ( + nodes = []*Document{} + withFKs = dq.withFKs + _spec = dq.querySpec() + loadedTypes = [3]bool{ + dq.withGroup != nil, + dq.withDocumentTokens != nil, + dq.withAttachments != nil, + } + ) + if dq.withGroup != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, document.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]interface{}, error) { + return (*Document).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []interface{}) error { + node := &Document{config: dq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, dq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := dq.withGroup; query != nil { + if err := dq.loadGroup(ctx, query, nodes, nil, + func(n *Document, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + if query := dq.withDocumentTokens; query != nil { + if err := dq.loadDocumentTokens(ctx, query, nodes, + func(n *Document) { n.Edges.DocumentTokens = []*DocumentToken{} }, + func(n *Document, e *DocumentToken) { n.Edges.DocumentTokens = append(n.Edges.DocumentTokens, e) }); err != nil { + return nil, err + } + } + if query := dq.withAttachments; query != nil { + if err := dq.loadAttachments(ctx, query, nodes, + func(n *Document) { n.Edges.Attachments = []*Attachment{} }, + func(n *Document, e *Attachment) { n.Edges.Attachments = append(n.Edges.Attachments, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (dq *DocumentQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Document, init func(*Document), assign func(*Document, *Group)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Document) + for i := range nodes { + if nodes[i].group_documents == nil { + continue + } + fk := *nodes[i].group_documents + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_documents" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (dq *DocumentQuery) loadDocumentTokens(ctx context.Context, query *DocumentTokenQuery, nodes []*Document, init func(*Document), assign func(*Document, *DocumentToken)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Document) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.InValues(document.DocumentTokensColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.document_document_tokens + if fk == nil { + return fmt.Errorf(`foreign-key "document_document_tokens" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected foreign-key "document_document_tokens" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (dq *DocumentQuery) loadAttachments(ctx context.Context, query *AttachmentQuery, nodes []*Document, init func(*Document), assign func(*Document, *Attachment)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Document) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.InValues(document.AttachmentsColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.document_attachments + if fk == nil { + return fmt.Errorf(`foreign-key "document_attachments" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected foreign-key "document_attachments" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (dq *DocumentQuery) sqlCount(ctx context.Context) (int, error) { + _spec := dq.querySpec() + _spec.Node.Columns = dq.fields + if len(dq.fields) > 0 { + _spec.Unique = dq.unique != nil && *dq.unique + } + return sqlgraph.CountNodes(ctx, dq.driver, _spec) +} + +func (dq *DocumentQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := dq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %w", err) + } + return n > 0, nil +} + +func (dq *DocumentQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: document.Table, + Columns: document.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + From: dq.sql, + Unique: true, + } + if unique := dq.unique; unique != nil { + _spec.Unique = *unique + } + if fields := dq.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, document.FieldID) + for i := range fields { + if fields[i] != document.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := dq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := dq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := dq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := dq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (dq *DocumentQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(dq.driver.Dialect()) + t1 := builder.Table(document.Table) + columns := dq.fields + if len(columns) == 0 { + columns = document.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if dq.sql != nil { + selector = dq.sql + selector.Select(selector.Columns(columns...)...) + } + if dq.unique != nil && *dq.unique { + selector.Distinct() + } + for _, p := range dq.predicates { + p(selector) + } + for _, p := range dq.order { + p(selector) + } + if offset := dq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := dq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// DocumentGroupBy is the group-by builder for Document entities. +type DocumentGroupBy struct { + config + selector + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (dgb *DocumentGroupBy) Aggregate(fns ...AggregateFunc) *DocumentGroupBy { + dgb.fns = append(dgb.fns, fns...) + return dgb +} + +// Scan applies the group-by query and scans the result into the given value. +func (dgb *DocumentGroupBy) Scan(ctx context.Context, v interface{}) error { + query, err := dgb.path(ctx) + if err != nil { + return err + } + dgb.sql = query + return dgb.sqlScan(ctx, v) +} + +func (dgb *DocumentGroupBy) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range dgb.fields { + if !document.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := dgb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (dgb *DocumentGroupBy) sqlQuery() *sql.Selector { + selector := dgb.sql.Select() + aggregation := make([]string, 0, len(dgb.fns)) + for _, fn := range dgb.fns { + aggregation = append(aggregation, fn(selector)) + } + // If no columns were selected in a custom aggregation function, the default + // selection is the fields used for "group-by", and the aggregation functions. + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(dgb.fields)+len(dgb.fns)) + for _, f := range dgb.fields { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + return selector.GroupBy(selector.Columns(dgb.fields...)...) +} + +// DocumentSelect is the builder for selecting fields of Document entities. +type DocumentSelect struct { + *DocumentQuery + selector + // intermediate query (i.e. traversal path). + sql *sql.Selector +} + +// Scan applies the selector query and scans the result into the given value. +func (ds *DocumentSelect) Scan(ctx context.Context, v interface{}) error { + if err := ds.prepareQuery(ctx); err != nil { + return err + } + ds.sql = ds.DocumentQuery.sqlQuery(ctx) + return ds.sqlScan(ctx, v) +} + +func (ds *DocumentSelect) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := ds.sql.Query() + if err := ds.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/document_update.go b/backend/ent/document_update.go new file mode 100644 index 0000000..6aab168 --- /dev/null +++ b/backend/ent/document_update.go @@ -0,0 +1,858 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/attachment" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/ent/group" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// DocumentUpdate is the builder for updating Document entities. +type DocumentUpdate struct { + config + hooks []Hook + mutation *DocumentMutation +} + +// Where appends a list predicates to the DocumentUpdate builder. +func (du *DocumentUpdate) Where(ps ...predicate.Document) *DocumentUpdate { + du.mutation.Where(ps...) + return du +} + +// SetUpdatedAt sets the "updated_at" field. +func (du *DocumentUpdate) SetUpdatedAt(t time.Time) *DocumentUpdate { + du.mutation.SetUpdatedAt(t) + return du +} + +// SetTitle sets the "title" field. +func (du *DocumentUpdate) SetTitle(s string) *DocumentUpdate { + du.mutation.SetTitle(s) + return du +} + +// SetPath sets the "path" field. +func (du *DocumentUpdate) SetPath(s string) *DocumentUpdate { + du.mutation.SetPath(s) + return du +} + +// SetGroupID sets the "group" edge to the Group entity by ID. +func (du *DocumentUpdate) SetGroupID(id uuid.UUID) *DocumentUpdate { + du.mutation.SetGroupID(id) + return du +} + +// SetGroup sets the "group" edge to the Group entity. +func (du *DocumentUpdate) SetGroup(g *Group) *DocumentUpdate { + return du.SetGroupID(g.ID) +} + +// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by IDs. +func (du *DocumentUpdate) AddDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdate { + du.mutation.AddDocumentTokenIDs(ids...) + return du +} + +// AddDocumentTokens adds the "document_tokens" edges to the DocumentToken entity. +func (du *DocumentUpdate) AddDocumentTokens(d ...*DocumentToken) *DocumentUpdate { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return du.AddDocumentTokenIDs(ids...) +} + +// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs. +func (du *DocumentUpdate) AddAttachmentIDs(ids ...uuid.UUID) *DocumentUpdate { + du.mutation.AddAttachmentIDs(ids...) + return du +} + +// AddAttachments adds the "attachments" edges to the Attachment entity. +func (du *DocumentUpdate) AddAttachments(a ...*Attachment) *DocumentUpdate { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return du.AddAttachmentIDs(ids...) +} + +// Mutation returns the DocumentMutation object of the builder. +func (du *DocumentUpdate) Mutation() *DocumentMutation { + return du.mutation +} + +// ClearGroup clears the "group" edge to the Group entity. +func (du *DocumentUpdate) ClearGroup() *DocumentUpdate { + du.mutation.ClearGroup() + return du +} + +// ClearDocumentTokens clears all "document_tokens" edges to the DocumentToken entity. +func (du *DocumentUpdate) ClearDocumentTokens() *DocumentUpdate { + du.mutation.ClearDocumentTokens() + return du +} + +// RemoveDocumentTokenIDs removes the "document_tokens" edge to DocumentToken entities by IDs. +func (du *DocumentUpdate) RemoveDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdate { + du.mutation.RemoveDocumentTokenIDs(ids...) + return du +} + +// RemoveDocumentTokens removes "document_tokens" edges to DocumentToken entities. +func (du *DocumentUpdate) RemoveDocumentTokens(d ...*DocumentToken) *DocumentUpdate { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return du.RemoveDocumentTokenIDs(ids...) +} + +// ClearAttachments clears all "attachments" edges to the Attachment entity. +func (du *DocumentUpdate) ClearAttachments() *DocumentUpdate { + du.mutation.ClearAttachments() + return du +} + +// RemoveAttachmentIDs removes the "attachments" edge to Attachment entities by IDs. +func (du *DocumentUpdate) RemoveAttachmentIDs(ids ...uuid.UUID) *DocumentUpdate { + du.mutation.RemoveAttachmentIDs(ids...) + return du +} + +// RemoveAttachments removes "attachments" edges to Attachment entities. +func (du *DocumentUpdate) RemoveAttachments(a ...*Attachment) *DocumentUpdate { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return du.RemoveAttachmentIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (du *DocumentUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + du.defaults() + if len(du.hooks) == 0 { + if err = du.check(); err != nil { + return 0, err + } + affected, err = du.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = du.check(); err != nil { + return 0, err + } + du.mutation = mutation + affected, err = du.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(du.hooks) - 1; i >= 0; i-- { + if du.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = du.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, du.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (du *DocumentUpdate) SaveX(ctx context.Context) int { + affected, err := du.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (du *DocumentUpdate) Exec(ctx context.Context) error { + _, err := du.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (du *DocumentUpdate) ExecX(ctx context.Context) { + if err := du.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (du *DocumentUpdate) defaults() { + if _, ok := du.mutation.UpdatedAt(); !ok { + v := document.UpdateDefaultUpdatedAt() + du.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (du *DocumentUpdate) check() error { + if v, ok := du.mutation.Title(); ok { + if err := document.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Document.title": %w`, err)} + } + } + if v, ok := du.mutation.Path(); ok { + if err := document.PathValidator(v); err != nil { + return &ValidationError{Name: "path", err: fmt.Errorf(`ent: validator failed for field "Document.path": %w`, err)} + } + } + if _, ok := du.mutation.GroupID(); du.mutation.GroupCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Document.group"`) + } + return nil +} + +func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: document.Table, + Columns: document.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + if ps := du.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := du.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: document.FieldUpdatedAt, + }) + } + if value, ok := du.mutation.Title(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: document.FieldTitle, + }) + } + if value, ok := du.mutation.Path(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: document.FieldPath, + }) + } + if du.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: document.GroupTable, + Columns: []string{document.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: group.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: document.GroupTable, + Columns: []string{document.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: group.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if du.mutation.DocumentTokensCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.RemovedDocumentTokensIDs(); len(nodes) > 0 && !du.mutation.DocumentTokensCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.DocumentTokensIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if du.mutation.AttachmentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.AttachmentsTable, + Columns: []string{document.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.RemovedAttachmentsIDs(); len(nodes) > 0 && !du.mutation.AttachmentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.AttachmentsTable, + Columns: []string{document.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.AttachmentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.AttachmentsTable, + Columns: []string{document.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, du.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{document.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + return n, nil +} + +// DocumentUpdateOne is the builder for updating a single Document entity. +type DocumentUpdateOne struct { + config + fields []string + hooks []Hook + mutation *DocumentMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (duo *DocumentUpdateOne) SetUpdatedAt(t time.Time) *DocumentUpdateOne { + duo.mutation.SetUpdatedAt(t) + return duo +} + +// SetTitle sets the "title" field. +func (duo *DocumentUpdateOne) SetTitle(s string) *DocumentUpdateOne { + duo.mutation.SetTitle(s) + return duo +} + +// SetPath sets the "path" field. +func (duo *DocumentUpdateOne) SetPath(s string) *DocumentUpdateOne { + duo.mutation.SetPath(s) + return duo +} + +// SetGroupID sets the "group" edge to the Group entity by ID. +func (duo *DocumentUpdateOne) SetGroupID(id uuid.UUID) *DocumentUpdateOne { + duo.mutation.SetGroupID(id) + return duo +} + +// SetGroup sets the "group" edge to the Group entity. +func (duo *DocumentUpdateOne) SetGroup(g *Group) *DocumentUpdateOne { + return duo.SetGroupID(g.ID) +} + +// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by IDs. +func (duo *DocumentUpdateOne) AddDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdateOne { + duo.mutation.AddDocumentTokenIDs(ids...) + return duo +} + +// AddDocumentTokens adds the "document_tokens" edges to the DocumentToken entity. +func (duo *DocumentUpdateOne) AddDocumentTokens(d ...*DocumentToken) *DocumentUpdateOne { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return duo.AddDocumentTokenIDs(ids...) +} + +// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs. +func (duo *DocumentUpdateOne) AddAttachmentIDs(ids ...uuid.UUID) *DocumentUpdateOne { + duo.mutation.AddAttachmentIDs(ids...) + return duo +} + +// AddAttachments adds the "attachments" edges to the Attachment entity. +func (duo *DocumentUpdateOne) AddAttachments(a ...*Attachment) *DocumentUpdateOne { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return duo.AddAttachmentIDs(ids...) +} + +// Mutation returns the DocumentMutation object of the builder. +func (duo *DocumentUpdateOne) Mutation() *DocumentMutation { + return duo.mutation +} + +// ClearGroup clears the "group" edge to the Group entity. +func (duo *DocumentUpdateOne) ClearGroup() *DocumentUpdateOne { + duo.mutation.ClearGroup() + return duo +} + +// ClearDocumentTokens clears all "document_tokens" edges to the DocumentToken entity. +func (duo *DocumentUpdateOne) ClearDocumentTokens() *DocumentUpdateOne { + duo.mutation.ClearDocumentTokens() + return duo +} + +// RemoveDocumentTokenIDs removes the "document_tokens" edge to DocumentToken entities by IDs. +func (duo *DocumentUpdateOne) RemoveDocumentTokenIDs(ids ...uuid.UUID) *DocumentUpdateOne { + duo.mutation.RemoveDocumentTokenIDs(ids...) + return duo +} + +// RemoveDocumentTokens removes "document_tokens" edges to DocumentToken entities. +func (duo *DocumentUpdateOne) RemoveDocumentTokens(d ...*DocumentToken) *DocumentUpdateOne { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return duo.RemoveDocumentTokenIDs(ids...) +} + +// ClearAttachments clears all "attachments" edges to the Attachment entity. +func (duo *DocumentUpdateOne) ClearAttachments() *DocumentUpdateOne { + duo.mutation.ClearAttachments() + return duo +} + +// RemoveAttachmentIDs removes the "attachments" edge to Attachment entities by IDs. +func (duo *DocumentUpdateOne) RemoveAttachmentIDs(ids ...uuid.UUID) *DocumentUpdateOne { + duo.mutation.RemoveAttachmentIDs(ids...) + return duo +} + +// RemoveAttachments removes "attachments" edges to Attachment entities. +func (duo *DocumentUpdateOne) RemoveAttachments(a ...*Attachment) *DocumentUpdateOne { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return duo.RemoveAttachmentIDs(ids...) +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (duo *DocumentUpdateOne) Select(field string, fields ...string) *DocumentUpdateOne { + duo.fields = append([]string{field}, fields...) + return duo +} + +// Save executes the query and returns the updated Document entity. +func (duo *DocumentUpdateOne) Save(ctx context.Context) (*Document, error) { + var ( + err error + node *Document + ) + duo.defaults() + if len(duo.hooks) == 0 { + if err = duo.check(); err != nil { + return nil, err + } + node, err = duo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = duo.check(); err != nil { + return nil, err + } + duo.mutation = mutation + node, err = duo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(duo.hooks) - 1; i >= 0; i-- { + if duo.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = duo.hooks[i](mut) + } + v, err := mut.Mutate(ctx, duo.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Document) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from DocumentMutation", v) + } + node = nv + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (duo *DocumentUpdateOne) SaveX(ctx context.Context) *Document { + node, err := duo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (duo *DocumentUpdateOne) Exec(ctx context.Context) error { + _, err := duo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (duo *DocumentUpdateOne) ExecX(ctx context.Context) { + if err := duo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (duo *DocumentUpdateOne) defaults() { + if _, ok := duo.mutation.UpdatedAt(); !ok { + v := document.UpdateDefaultUpdatedAt() + duo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (duo *DocumentUpdateOne) check() error { + if v, ok := duo.mutation.Title(); ok { + if err := document.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Document.title": %w`, err)} + } + } + if v, ok := duo.mutation.Path(); ok { + if err := document.PathValidator(v); err != nil { + return &ValidationError{Name: "path", err: fmt.Errorf(`ent: validator failed for field "Document.path": %w`, err)} + } + } + if _, ok := duo.mutation.GroupID(); duo.mutation.GroupCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Document.group"`) + } + return nil +} + +func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: document.Table, + Columns: document.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + id, ok := duo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Document.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := duo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, document.FieldID) + for _, f := range fields { + if !document.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != document.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := duo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := duo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: document.FieldUpdatedAt, + }) + } + if value, ok := duo.mutation.Title(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: document.FieldTitle, + }) + } + if value, ok := duo.mutation.Path(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: document.FieldPath, + }) + } + if duo.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: document.GroupTable, + Columns: []string{document.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: group.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: document.GroupTable, + Columns: []string{document.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: group.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if duo.mutation.DocumentTokensCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.RemovedDocumentTokensIDs(); len(nodes) > 0 && !duo.mutation.DocumentTokensCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.DocumentTokensIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.DocumentTokensTable, + Columns: []string{document.DocumentTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if duo.mutation.AttachmentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.AttachmentsTable, + Columns: []string{document.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.RemovedAttachmentsIDs(); len(nodes) > 0 && !duo.mutation.AttachmentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.AttachmentsTable, + Columns: []string{document.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.AttachmentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: document.AttachmentsTable, + Columns: []string{document.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Document{config: duo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, duo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{document.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} diff --git a/backend/ent/documenttoken.go b/backend/ent/documenttoken.go new file mode 100644 index 0000000..c3b0a9e --- /dev/null +++ b/backend/ent/documenttoken.go @@ -0,0 +1,190 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" +) + +// DocumentToken is the model entity for the DocumentToken schema. +type DocumentToken struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Token holds the value of the "token" field. + Token []byte `json:"token,omitempty"` + // Uses holds the value of the "uses" field. + Uses int `json:"uses,omitempty"` + // ExpiresAt holds the value of the "expires_at" field. + ExpiresAt time.Time `json:"expires_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the DocumentTokenQuery when eager-loading is set. + Edges DocumentTokenEdges `json:"edges"` + document_document_tokens *uuid.UUID +} + +// DocumentTokenEdges holds the relations/edges for other nodes in the graph. +type DocumentTokenEdges struct { + // Document holds the value of the document edge. + Document *Document `json:"document,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// DocumentOrErr returns the Document value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e DocumentTokenEdges) DocumentOrErr() (*Document, error) { + if e.loadedTypes[0] { + if e.Document == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: document.Label} + } + return e.Document, nil + } + return nil, &NotLoadedError{edge: "document"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*DocumentToken) scanValues(columns []string) ([]interface{}, error) { + values := make([]interface{}, len(columns)) + for i := range columns { + switch columns[i] { + case documenttoken.FieldToken: + values[i] = new([]byte) + case documenttoken.FieldUses: + values[i] = new(sql.NullInt64) + case documenttoken.FieldCreatedAt, documenttoken.FieldUpdatedAt, documenttoken.FieldExpiresAt: + values[i] = new(sql.NullTime) + case documenttoken.FieldID: + values[i] = new(uuid.UUID) + case documenttoken.ForeignKeys[0]: // document_document_tokens + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + default: + return nil, fmt.Errorf("unexpected column %q for type DocumentToken", columns[i]) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the DocumentToken fields. +func (dt *DocumentToken) assignValues(columns []string, values []interface{}) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case documenttoken.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + dt.ID = *value + } + case documenttoken.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + dt.CreatedAt = value.Time + } + case documenttoken.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + dt.UpdatedAt = value.Time + } + case documenttoken.FieldToken: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field token", values[i]) + } else if value != nil { + dt.Token = *value + } + case documenttoken.FieldUses: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field uses", values[i]) + } else if value.Valid { + dt.Uses = int(value.Int64) + } + case documenttoken.FieldExpiresAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expires_at", values[i]) + } else if value.Valid { + dt.ExpiresAt = value.Time + } + case documenttoken.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field document_document_tokens", values[i]) + } else if value.Valid { + dt.document_document_tokens = new(uuid.UUID) + *dt.document_document_tokens = *value.S.(*uuid.UUID) + } + } + } + return nil +} + +// QueryDocument queries the "document" edge of the DocumentToken entity. +func (dt *DocumentToken) QueryDocument() *DocumentQuery { + return (&DocumentTokenClient{config: dt.config}).QueryDocument(dt) +} + +// Update returns a builder for updating this DocumentToken. +// Note that you need to call DocumentToken.Unwrap() before calling this method if this DocumentToken +// was returned from a transaction, and the transaction was committed or rolled back. +func (dt *DocumentToken) Update() *DocumentTokenUpdateOne { + return (&DocumentTokenClient{config: dt.config}).UpdateOne(dt) +} + +// Unwrap unwraps the DocumentToken entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (dt *DocumentToken) Unwrap() *DocumentToken { + _tx, ok := dt.config.driver.(*txDriver) + if !ok { + panic("ent: DocumentToken is not a transactional entity") + } + dt.config.driver = _tx.drv + return dt +} + +// String implements the fmt.Stringer. +func (dt *DocumentToken) String() string { + var builder strings.Builder + builder.WriteString("DocumentToken(") + builder.WriteString(fmt.Sprintf("id=%v, ", dt.ID)) + builder.WriteString("created_at=") + builder.WriteString(dt.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(dt.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("token=") + builder.WriteString(fmt.Sprintf("%v", dt.Token)) + builder.WriteString(", ") + builder.WriteString("uses=") + builder.WriteString(fmt.Sprintf("%v", dt.Uses)) + builder.WriteString(", ") + builder.WriteString("expires_at=") + builder.WriteString(dt.ExpiresAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// DocumentTokens is a parsable slice of DocumentToken. +type DocumentTokens []*DocumentToken + +func (dt DocumentTokens) config(cfg config) { + for _i := range dt { + dt[_i].config = cfg + } +} diff --git a/backend/ent/documenttoken/documenttoken.go b/backend/ent/documenttoken/documenttoken.go new file mode 100644 index 0000000..ce05656 --- /dev/null +++ b/backend/ent/documenttoken/documenttoken.go @@ -0,0 +1,85 @@ +// Code generated by ent, DO NOT EDIT. + +package documenttoken + +import ( + "time" + + "github.com/google/uuid" +) + +const ( + // Label holds the string label denoting the documenttoken type in the database. + Label = "document_token" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldToken holds the string denoting the token field in the database. + FieldToken = "token" + // FieldUses holds the string denoting the uses field in the database. + FieldUses = "uses" + // FieldExpiresAt holds the string denoting the expires_at field in the database. + FieldExpiresAt = "expires_at" + // EdgeDocument holds the string denoting the document edge name in mutations. + EdgeDocument = "document" + // Table holds the table name of the documenttoken in the database. + Table = "document_tokens" + // DocumentTable is the table that holds the document relation/edge. + DocumentTable = "document_tokens" + // DocumentInverseTable is the table name for the Document entity. + // It exists in this package in order to avoid circular dependency with the "document" package. + DocumentInverseTable = "documents" + // DocumentColumn is the table column denoting the document relation/edge. + DocumentColumn = "document_document_tokens" +) + +// Columns holds all SQL columns for documenttoken fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldToken, + FieldUses, + FieldExpiresAt, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "document_tokens" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "document_document_tokens", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // TokenValidator is a validator for the "token" field. It is called by the builders before save. + TokenValidator func([]byte) error + // DefaultUses holds the default value on creation for the "uses" field. + DefaultUses int + // DefaultExpiresAt holds the default value on creation for the "expires_at" field. + DefaultExpiresAt func() time.Time + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) diff --git a/backend/ent/documenttoken/where.go b/backend/ent/documenttoken/where.go new file mode 100644 index 0000000..918b975 --- /dev/null +++ b/backend/ent/documenttoken/where.go @@ -0,0 +1,498 @@ +// Code generated by ent, DO NOT EDIT. + +package documenttoken + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Token applies equality check predicate on the "token" field. It's identical to TokenEQ. +func Token(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldToken), v)) + }) +} + +// Uses applies equality check predicate on the "uses" field. It's identical to UsesEQ. +func Uses(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUses), v)) + }) +} + +// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ. +func ExpiresAt(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldExpiresAt), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// TokenEQ applies the EQ predicate on the "token" field. +func TokenEQ(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldToken), v)) + }) +} + +// TokenNEQ applies the NEQ predicate on the "token" field. +func TokenNEQ(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldToken), v)) + }) +} + +// TokenIn applies the In predicate on the "token" field. +func TokenIn(vs ...[]byte) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldToken), v...)) + }) +} + +// TokenNotIn applies the NotIn predicate on the "token" field. +func TokenNotIn(vs ...[]byte) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldToken), v...)) + }) +} + +// TokenGT applies the GT predicate on the "token" field. +func TokenGT(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldToken), v)) + }) +} + +// TokenGTE applies the GTE predicate on the "token" field. +func TokenGTE(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldToken), v)) + }) +} + +// TokenLT applies the LT predicate on the "token" field. +func TokenLT(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldToken), v)) + }) +} + +// TokenLTE applies the LTE predicate on the "token" field. +func TokenLTE(v []byte) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldToken), v)) + }) +} + +// UsesEQ applies the EQ predicate on the "uses" field. +func UsesEQ(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUses), v)) + }) +} + +// UsesNEQ applies the NEQ predicate on the "uses" field. +func UsesNEQ(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUses), v)) + }) +} + +// UsesIn applies the In predicate on the "uses" field. +func UsesIn(vs ...int) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUses), v...)) + }) +} + +// UsesNotIn applies the NotIn predicate on the "uses" field. +func UsesNotIn(vs ...int) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUses), v...)) + }) +} + +// UsesGT applies the GT predicate on the "uses" field. +func UsesGT(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUses), v)) + }) +} + +// UsesGTE applies the GTE predicate on the "uses" field. +func UsesGTE(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUses), v)) + }) +} + +// UsesLT applies the LT predicate on the "uses" field. +func UsesLT(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUses), v)) + }) +} + +// UsesLTE applies the LTE predicate on the "uses" field. +func UsesLTE(v int) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUses), v)) + }) +} + +// ExpiresAtEQ applies the EQ predicate on the "expires_at" field. +func ExpiresAtEQ(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldExpiresAt), v)) + }) +} + +// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field. +func ExpiresAtNEQ(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldExpiresAt), v)) + }) +} + +// ExpiresAtIn applies the In predicate on the "expires_at" field. +func ExpiresAtIn(vs ...time.Time) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldExpiresAt), v...)) + }) +} + +// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field. +func ExpiresAtNotIn(vs ...time.Time) predicate.DocumentToken { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldExpiresAt), v...)) + }) +} + +// ExpiresAtGT applies the GT predicate on the "expires_at" field. +func ExpiresAtGT(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldExpiresAt), v)) + }) +} + +// ExpiresAtGTE applies the GTE predicate on the "expires_at" field. +func ExpiresAtGTE(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldExpiresAt), v)) + }) +} + +// ExpiresAtLT applies the LT predicate on the "expires_at" field. +func ExpiresAtLT(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldExpiresAt), v)) + }) +} + +// ExpiresAtLTE applies the LTE predicate on the "expires_at" field. +func ExpiresAtLTE(v time.Time) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldExpiresAt), v)) + }) +} + +// HasDocument applies the HasEdge predicate on the "document" edge. +func HasDocument() predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDocumentWith applies the HasEdge predicate on the "document" edge with a given conditions (other predicates). +func HasDocumentWith(preds ...predicate.Document) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.DocumentToken) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.DocumentToken) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.DocumentToken) predicate.DocumentToken { + return predicate.DocumentToken(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/backend/ent/documenttoken_create.go b/backend/ent/documenttoken_create.go new file mode 100644 index 0000000..65908b6 --- /dev/null +++ b/backend/ent/documenttoken_create.go @@ -0,0 +1,418 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" +) + +// DocumentTokenCreate is the builder for creating a DocumentToken entity. +type DocumentTokenCreate struct { + config + mutation *DocumentTokenMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (dtc *DocumentTokenCreate) SetCreatedAt(t time.Time) *DocumentTokenCreate { + dtc.mutation.SetCreatedAt(t) + return dtc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (dtc *DocumentTokenCreate) SetNillableCreatedAt(t *time.Time) *DocumentTokenCreate { + if t != nil { + dtc.SetCreatedAt(*t) + } + return dtc +} + +// SetUpdatedAt sets the "updated_at" field. +func (dtc *DocumentTokenCreate) SetUpdatedAt(t time.Time) *DocumentTokenCreate { + dtc.mutation.SetUpdatedAt(t) + return dtc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (dtc *DocumentTokenCreate) SetNillableUpdatedAt(t *time.Time) *DocumentTokenCreate { + if t != nil { + dtc.SetUpdatedAt(*t) + } + return dtc +} + +// SetToken sets the "token" field. +func (dtc *DocumentTokenCreate) SetToken(b []byte) *DocumentTokenCreate { + dtc.mutation.SetToken(b) + return dtc +} + +// SetUses sets the "uses" field. +func (dtc *DocumentTokenCreate) SetUses(i int) *DocumentTokenCreate { + dtc.mutation.SetUses(i) + return dtc +} + +// SetNillableUses sets the "uses" field if the given value is not nil. +func (dtc *DocumentTokenCreate) SetNillableUses(i *int) *DocumentTokenCreate { + if i != nil { + dtc.SetUses(*i) + } + return dtc +} + +// SetExpiresAt sets the "expires_at" field. +func (dtc *DocumentTokenCreate) SetExpiresAt(t time.Time) *DocumentTokenCreate { + dtc.mutation.SetExpiresAt(t) + return dtc +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (dtc *DocumentTokenCreate) SetNillableExpiresAt(t *time.Time) *DocumentTokenCreate { + if t != nil { + dtc.SetExpiresAt(*t) + } + return dtc +} + +// SetID sets the "id" field. +func (dtc *DocumentTokenCreate) SetID(u uuid.UUID) *DocumentTokenCreate { + dtc.mutation.SetID(u) + return dtc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (dtc *DocumentTokenCreate) SetNillableID(u *uuid.UUID) *DocumentTokenCreate { + if u != nil { + dtc.SetID(*u) + } + return dtc +} + +// SetDocumentID sets the "document" edge to the Document entity by ID. +func (dtc *DocumentTokenCreate) SetDocumentID(id uuid.UUID) *DocumentTokenCreate { + dtc.mutation.SetDocumentID(id) + return dtc +} + +// SetNillableDocumentID sets the "document" edge to the Document entity by ID if the given value is not nil. +func (dtc *DocumentTokenCreate) SetNillableDocumentID(id *uuid.UUID) *DocumentTokenCreate { + if id != nil { + dtc = dtc.SetDocumentID(*id) + } + return dtc +} + +// SetDocument sets the "document" edge to the Document entity. +func (dtc *DocumentTokenCreate) SetDocument(d *Document) *DocumentTokenCreate { + return dtc.SetDocumentID(d.ID) +} + +// Mutation returns the DocumentTokenMutation object of the builder. +func (dtc *DocumentTokenCreate) Mutation() *DocumentTokenMutation { + return dtc.mutation +} + +// Save creates the DocumentToken in the database. +func (dtc *DocumentTokenCreate) Save(ctx context.Context) (*DocumentToken, error) { + var ( + err error + node *DocumentToken + ) + dtc.defaults() + if len(dtc.hooks) == 0 { + if err = dtc.check(); err != nil { + return nil, err + } + node, err = dtc.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = dtc.check(); err != nil { + return nil, err + } + dtc.mutation = mutation + if node, err = dtc.sqlSave(ctx); err != nil { + return nil, err + } + mutation.id = &node.ID + mutation.done = true + return node, err + }) + for i := len(dtc.hooks) - 1; i >= 0; i-- { + if dtc.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dtc.hooks[i](mut) + } + v, err := mut.Mutate(ctx, dtc.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*DocumentToken) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from DocumentTokenMutation", v) + } + node = nv + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (dtc *DocumentTokenCreate) SaveX(ctx context.Context) *DocumentToken { + v, err := dtc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dtc *DocumentTokenCreate) Exec(ctx context.Context) error { + _, err := dtc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtc *DocumentTokenCreate) ExecX(ctx context.Context) { + if err := dtc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dtc *DocumentTokenCreate) defaults() { + if _, ok := dtc.mutation.CreatedAt(); !ok { + v := documenttoken.DefaultCreatedAt() + dtc.mutation.SetCreatedAt(v) + } + if _, ok := dtc.mutation.UpdatedAt(); !ok { + v := documenttoken.DefaultUpdatedAt() + dtc.mutation.SetUpdatedAt(v) + } + if _, ok := dtc.mutation.Uses(); !ok { + v := documenttoken.DefaultUses + dtc.mutation.SetUses(v) + } + if _, ok := dtc.mutation.ExpiresAt(); !ok { + v := documenttoken.DefaultExpiresAt() + dtc.mutation.SetExpiresAt(v) + } + if _, ok := dtc.mutation.ID(); !ok { + v := documenttoken.DefaultID() + dtc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dtc *DocumentTokenCreate) check() error { + if _, ok := dtc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "DocumentToken.created_at"`)} + } + if _, ok := dtc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "DocumentToken.updated_at"`)} + } + if _, ok := dtc.mutation.Token(); !ok { + return &ValidationError{Name: "token", err: errors.New(`ent: missing required field "DocumentToken.token"`)} + } + if v, ok := dtc.mutation.Token(); ok { + if err := documenttoken.TokenValidator(v); err != nil { + return &ValidationError{Name: "token", err: fmt.Errorf(`ent: validator failed for field "DocumentToken.token": %w`, err)} + } + } + if _, ok := dtc.mutation.Uses(); !ok { + return &ValidationError{Name: "uses", err: errors.New(`ent: missing required field "DocumentToken.uses"`)} + } + if _, ok := dtc.mutation.ExpiresAt(); !ok { + return &ValidationError{Name: "expires_at", err: errors.New(`ent: missing required field "DocumentToken.expires_at"`)} + } + return nil +} + +func (dtc *DocumentTokenCreate) sqlSave(ctx context.Context) (*DocumentToken, error) { + _node, _spec := dtc.createSpec() + if err := sqlgraph.CreateNode(ctx, dtc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + return _node, nil +} + +func (dtc *DocumentTokenCreate) createSpec() (*DocumentToken, *sqlgraph.CreateSpec) { + var ( + _node = &DocumentToken{config: dtc.config} + _spec = &sqlgraph.CreateSpec{ + Table: documenttoken.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + } + ) + if id, ok := dtc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := dtc.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldCreatedAt, + }) + _node.CreatedAt = value + } + if value, ok := dtc.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldUpdatedAt, + }) + _node.UpdatedAt = value + } + if value, ok := dtc.mutation.Token(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeBytes, + Value: value, + Column: documenttoken.FieldToken, + }) + _node.Token = value + } + if value, ok := dtc.mutation.Uses(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: documenttoken.FieldUses, + }) + _node.Uses = value + } + if value, ok := dtc.mutation.ExpiresAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldExpiresAt, + }) + _node.ExpiresAt = value + } + if nodes := dtc.mutation.DocumentIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: documenttoken.DocumentTable, + Columns: []string{documenttoken.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.document_document_tokens = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// DocumentTokenCreateBulk is the builder for creating many DocumentToken entities in bulk. +type DocumentTokenCreateBulk struct { + config + builders []*DocumentTokenCreate +} + +// Save creates the DocumentToken entities in the database. +func (dtcb *DocumentTokenCreateBulk) Save(ctx context.Context) ([]*DocumentToken, error) { + specs := make([]*sqlgraph.CreateSpec, len(dtcb.builders)) + nodes := make([]*DocumentToken, len(dtcb.builders)) + mutators := make([]Mutator, len(dtcb.builders)) + for i := range dtcb.builders { + func(i int, root context.Context) { + builder := dtcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, dtcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, dtcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, dtcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (dtcb *DocumentTokenCreateBulk) SaveX(ctx context.Context) []*DocumentToken { + v, err := dtcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dtcb *DocumentTokenCreateBulk) Exec(ctx context.Context) error { + _, err := dtcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtcb *DocumentTokenCreateBulk) ExecX(ctx context.Context) { + if err := dtcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/documenttoken_delete.go b/backend/ent/documenttoken_delete.go new file mode 100644 index 0000000..bc8f488 --- /dev/null +++ b/backend/ent/documenttoken_delete.go @@ -0,0 +1,115 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// DocumentTokenDelete is the builder for deleting a DocumentToken entity. +type DocumentTokenDelete struct { + config + hooks []Hook + mutation *DocumentTokenMutation +} + +// Where appends a list predicates to the DocumentTokenDelete builder. +func (dtd *DocumentTokenDelete) Where(ps ...predicate.DocumentToken) *DocumentTokenDelete { + dtd.mutation.Where(ps...) + return dtd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (dtd *DocumentTokenDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(dtd.hooks) == 0 { + affected, err = dtd.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + dtd.mutation = mutation + affected, err = dtd.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(dtd.hooks) - 1; i >= 0; i-- { + if dtd.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dtd.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, dtd.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtd *DocumentTokenDelete) ExecX(ctx context.Context) int { + n, err := dtd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (dtd *DocumentTokenDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: documenttoken.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + if ps := dtd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, dtd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return affected, err +} + +// DocumentTokenDeleteOne is the builder for deleting a single DocumentToken entity. +type DocumentTokenDeleteOne struct { + dtd *DocumentTokenDelete +} + +// Exec executes the deletion query. +func (dtdo *DocumentTokenDeleteOne) Exec(ctx context.Context) error { + n, err := dtdo.dtd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{documenttoken.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtdo *DocumentTokenDeleteOne) ExecX(ctx context.Context) { + dtdo.dtd.ExecX(ctx) +} diff --git a/backend/ent/documenttoken_query.go b/backend/ent/documenttoken_query.go new file mode 100644 index 0000000..bd48c10 --- /dev/null +++ b/backend/ent/documenttoken_query.go @@ -0,0 +1,611 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// DocumentTokenQuery is the builder for querying DocumentToken entities. +type DocumentTokenQuery struct { + config + limit *int + offset *int + unique *bool + order []OrderFunc + fields []string + predicates []predicate.DocumentToken + withDocument *DocumentQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the DocumentTokenQuery builder. +func (dtq *DocumentTokenQuery) Where(ps ...predicate.DocumentToken) *DocumentTokenQuery { + dtq.predicates = append(dtq.predicates, ps...) + return dtq +} + +// Limit adds a limit step to the query. +func (dtq *DocumentTokenQuery) Limit(limit int) *DocumentTokenQuery { + dtq.limit = &limit + return dtq +} + +// Offset adds an offset step to the query. +func (dtq *DocumentTokenQuery) Offset(offset int) *DocumentTokenQuery { + dtq.offset = &offset + return dtq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (dtq *DocumentTokenQuery) Unique(unique bool) *DocumentTokenQuery { + dtq.unique = &unique + return dtq +} + +// Order adds an order step to the query. +func (dtq *DocumentTokenQuery) Order(o ...OrderFunc) *DocumentTokenQuery { + dtq.order = append(dtq.order, o...) + return dtq +} + +// QueryDocument chains the current query on the "document" edge. +func (dtq *DocumentTokenQuery) QueryDocument() *DocumentQuery { + query := &DocumentQuery{config: dtq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dtq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dtq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(documenttoken.Table, documenttoken.FieldID, selector), + sqlgraph.To(document.Table, document.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, documenttoken.DocumentTable, documenttoken.DocumentColumn), + ) + fromU = sqlgraph.SetNeighbors(dtq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first DocumentToken entity from the query. +// Returns a *NotFoundError when no DocumentToken was found. +func (dtq *DocumentTokenQuery) First(ctx context.Context) (*DocumentToken, error) { + nodes, err := dtq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{documenttoken.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (dtq *DocumentTokenQuery) FirstX(ctx context.Context) *DocumentToken { + node, err := dtq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first DocumentToken ID from the query. +// Returns a *NotFoundError when no DocumentToken ID was found. +func (dtq *DocumentTokenQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = dtq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{documenttoken.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (dtq *DocumentTokenQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := dtq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single DocumentToken entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one DocumentToken entity is found. +// Returns a *NotFoundError when no DocumentToken entities are found. +func (dtq *DocumentTokenQuery) Only(ctx context.Context) (*DocumentToken, error) { + nodes, err := dtq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{documenttoken.Label} + default: + return nil, &NotSingularError{documenttoken.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (dtq *DocumentTokenQuery) OnlyX(ctx context.Context) *DocumentToken { + node, err := dtq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only DocumentToken ID in the query. +// Returns a *NotSingularError when more than one DocumentToken ID is found. +// Returns a *NotFoundError when no entities are found. +func (dtq *DocumentTokenQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = dtq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{documenttoken.Label} + default: + err = &NotSingularError{documenttoken.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (dtq *DocumentTokenQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := dtq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of DocumentTokens. +func (dtq *DocumentTokenQuery) All(ctx context.Context) ([]*DocumentToken, error) { + if err := dtq.prepareQuery(ctx); err != nil { + return nil, err + } + return dtq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (dtq *DocumentTokenQuery) AllX(ctx context.Context) []*DocumentToken { + nodes, err := dtq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of DocumentToken IDs. +func (dtq *DocumentTokenQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { + var ids []uuid.UUID + if err := dtq.Select(documenttoken.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (dtq *DocumentTokenQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := dtq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (dtq *DocumentTokenQuery) Count(ctx context.Context) (int, error) { + if err := dtq.prepareQuery(ctx); err != nil { + return 0, err + } + return dtq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (dtq *DocumentTokenQuery) CountX(ctx context.Context) int { + count, err := dtq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (dtq *DocumentTokenQuery) Exist(ctx context.Context) (bool, error) { + if err := dtq.prepareQuery(ctx); err != nil { + return false, err + } + return dtq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (dtq *DocumentTokenQuery) ExistX(ctx context.Context) bool { + exist, err := dtq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the DocumentTokenQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (dtq *DocumentTokenQuery) Clone() *DocumentTokenQuery { + if dtq == nil { + return nil + } + return &DocumentTokenQuery{ + config: dtq.config, + limit: dtq.limit, + offset: dtq.offset, + order: append([]OrderFunc{}, dtq.order...), + predicates: append([]predicate.DocumentToken{}, dtq.predicates...), + withDocument: dtq.withDocument.Clone(), + // clone intermediate query. + sql: dtq.sql.Clone(), + path: dtq.path, + unique: dtq.unique, + } +} + +// WithDocument tells the query-builder to eager-load the nodes that are connected to +// the "document" edge. The optional arguments are used to configure the query builder of the edge. +func (dtq *DocumentTokenQuery) WithDocument(opts ...func(*DocumentQuery)) *DocumentTokenQuery { + query := &DocumentQuery{config: dtq.config} + for _, opt := range opts { + opt(query) + } + dtq.withDocument = query + return dtq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.DocumentToken.Query(). +// GroupBy(documenttoken.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (dtq *DocumentTokenQuery) GroupBy(field string, fields ...string) *DocumentTokenGroupBy { + grbuild := &DocumentTokenGroupBy{config: dtq.config} + grbuild.fields = append([]string{field}, fields...) + grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := dtq.prepareQuery(ctx); err != nil { + return nil, err + } + return dtq.sqlQuery(ctx), nil + } + grbuild.label = documenttoken.Label + grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.DocumentToken.Query(). +// Select(documenttoken.FieldCreatedAt). +// Scan(ctx, &v) +func (dtq *DocumentTokenQuery) Select(fields ...string) *DocumentTokenSelect { + dtq.fields = append(dtq.fields, fields...) + selbuild := &DocumentTokenSelect{DocumentTokenQuery: dtq} + selbuild.label = documenttoken.Label + selbuild.flds, selbuild.scan = &dtq.fields, selbuild.Scan + return selbuild +} + +func (dtq *DocumentTokenQuery) prepareQuery(ctx context.Context) error { + for _, f := range dtq.fields { + if !documenttoken.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if dtq.path != nil { + prev, err := dtq.path(ctx) + if err != nil { + return err + } + dtq.sql = prev + } + return nil +} + +func (dtq *DocumentTokenQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DocumentToken, error) { + var ( + nodes = []*DocumentToken{} + withFKs = dtq.withFKs + _spec = dtq.querySpec() + loadedTypes = [1]bool{ + dtq.withDocument != nil, + } + ) + if dtq.withDocument != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, documenttoken.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]interface{}, error) { + return (*DocumentToken).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []interface{}) error { + node := &DocumentToken{config: dtq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, dtq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := dtq.withDocument; query != nil { + if err := dtq.loadDocument(ctx, query, nodes, nil, + func(n *DocumentToken, e *Document) { n.Edges.Document = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (dtq *DocumentTokenQuery) loadDocument(ctx context.Context, query *DocumentQuery, nodes []*DocumentToken, init func(*DocumentToken), assign func(*DocumentToken, *Document)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*DocumentToken) + for i := range nodes { + if nodes[i].document_document_tokens == nil { + continue + } + fk := *nodes[i].document_document_tokens + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + query.Where(document.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "document_document_tokens" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (dtq *DocumentTokenQuery) sqlCount(ctx context.Context) (int, error) { + _spec := dtq.querySpec() + _spec.Node.Columns = dtq.fields + if len(dtq.fields) > 0 { + _spec.Unique = dtq.unique != nil && *dtq.unique + } + return sqlgraph.CountNodes(ctx, dtq.driver, _spec) +} + +func (dtq *DocumentTokenQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := dtq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %w", err) + } + return n > 0, nil +} + +func (dtq *DocumentTokenQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: documenttoken.Table, + Columns: documenttoken.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + From: dtq.sql, + Unique: true, + } + if unique := dtq.unique; unique != nil { + _spec.Unique = *unique + } + if fields := dtq.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, documenttoken.FieldID) + for i := range fields { + if fields[i] != documenttoken.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := dtq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := dtq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := dtq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := dtq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (dtq *DocumentTokenQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(dtq.driver.Dialect()) + t1 := builder.Table(documenttoken.Table) + columns := dtq.fields + if len(columns) == 0 { + columns = documenttoken.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if dtq.sql != nil { + selector = dtq.sql + selector.Select(selector.Columns(columns...)...) + } + if dtq.unique != nil && *dtq.unique { + selector.Distinct() + } + for _, p := range dtq.predicates { + p(selector) + } + for _, p := range dtq.order { + p(selector) + } + if offset := dtq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := dtq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// DocumentTokenGroupBy is the group-by builder for DocumentToken entities. +type DocumentTokenGroupBy struct { + config + selector + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (dtgb *DocumentTokenGroupBy) Aggregate(fns ...AggregateFunc) *DocumentTokenGroupBy { + dtgb.fns = append(dtgb.fns, fns...) + return dtgb +} + +// Scan applies the group-by query and scans the result into the given value. +func (dtgb *DocumentTokenGroupBy) Scan(ctx context.Context, v interface{}) error { + query, err := dtgb.path(ctx) + if err != nil { + return err + } + dtgb.sql = query + return dtgb.sqlScan(ctx, v) +} + +func (dtgb *DocumentTokenGroupBy) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range dtgb.fields { + if !documenttoken.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := dtgb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dtgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (dtgb *DocumentTokenGroupBy) sqlQuery() *sql.Selector { + selector := dtgb.sql.Select() + aggregation := make([]string, 0, len(dtgb.fns)) + for _, fn := range dtgb.fns { + aggregation = append(aggregation, fn(selector)) + } + // If no columns were selected in a custom aggregation function, the default + // selection is the fields used for "group-by", and the aggregation functions. + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(dtgb.fields)+len(dtgb.fns)) + for _, f := range dtgb.fields { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + return selector.GroupBy(selector.Columns(dtgb.fields...)...) +} + +// DocumentTokenSelect is the builder for selecting fields of DocumentToken entities. +type DocumentTokenSelect struct { + *DocumentTokenQuery + selector + // intermediate query (i.e. traversal path). + sql *sql.Selector +} + +// Scan applies the selector query and scans the result into the given value. +func (dts *DocumentTokenSelect) Scan(ctx context.Context, v interface{}) error { + if err := dts.prepareQuery(ctx); err != nil { + return err + } + dts.sql = dts.DocumentTokenQuery.sqlQuery(ctx) + return dts.sqlScan(ctx, v) +} + +func (dts *DocumentTokenSelect) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := dts.sql.Query() + if err := dts.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/documenttoken_update.go b/backend/ent/documenttoken_update.go new file mode 100644 index 0000000..e4586be --- /dev/null +++ b/backend/ent/documenttoken_update.go @@ -0,0 +1,582 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/ent/predicate" +) + +// DocumentTokenUpdate is the builder for updating DocumentToken entities. +type DocumentTokenUpdate struct { + config + hooks []Hook + mutation *DocumentTokenMutation +} + +// Where appends a list predicates to the DocumentTokenUpdate builder. +func (dtu *DocumentTokenUpdate) Where(ps ...predicate.DocumentToken) *DocumentTokenUpdate { + dtu.mutation.Where(ps...) + return dtu +} + +// SetUpdatedAt sets the "updated_at" field. +func (dtu *DocumentTokenUpdate) SetUpdatedAt(t time.Time) *DocumentTokenUpdate { + dtu.mutation.SetUpdatedAt(t) + return dtu +} + +// SetToken sets the "token" field. +func (dtu *DocumentTokenUpdate) SetToken(b []byte) *DocumentTokenUpdate { + dtu.mutation.SetToken(b) + return dtu +} + +// SetUses sets the "uses" field. +func (dtu *DocumentTokenUpdate) SetUses(i int) *DocumentTokenUpdate { + dtu.mutation.ResetUses() + dtu.mutation.SetUses(i) + return dtu +} + +// SetNillableUses sets the "uses" field if the given value is not nil. +func (dtu *DocumentTokenUpdate) SetNillableUses(i *int) *DocumentTokenUpdate { + if i != nil { + dtu.SetUses(*i) + } + return dtu +} + +// AddUses adds i to the "uses" field. +func (dtu *DocumentTokenUpdate) AddUses(i int) *DocumentTokenUpdate { + dtu.mutation.AddUses(i) + return dtu +} + +// SetExpiresAt sets the "expires_at" field. +func (dtu *DocumentTokenUpdate) SetExpiresAt(t time.Time) *DocumentTokenUpdate { + dtu.mutation.SetExpiresAt(t) + return dtu +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (dtu *DocumentTokenUpdate) SetNillableExpiresAt(t *time.Time) *DocumentTokenUpdate { + if t != nil { + dtu.SetExpiresAt(*t) + } + return dtu +} + +// SetDocumentID sets the "document" edge to the Document entity by ID. +func (dtu *DocumentTokenUpdate) SetDocumentID(id uuid.UUID) *DocumentTokenUpdate { + dtu.mutation.SetDocumentID(id) + return dtu +} + +// SetNillableDocumentID sets the "document" edge to the Document entity by ID if the given value is not nil. +func (dtu *DocumentTokenUpdate) SetNillableDocumentID(id *uuid.UUID) *DocumentTokenUpdate { + if id != nil { + dtu = dtu.SetDocumentID(*id) + } + return dtu +} + +// SetDocument sets the "document" edge to the Document entity. +func (dtu *DocumentTokenUpdate) SetDocument(d *Document) *DocumentTokenUpdate { + return dtu.SetDocumentID(d.ID) +} + +// Mutation returns the DocumentTokenMutation object of the builder. +func (dtu *DocumentTokenUpdate) Mutation() *DocumentTokenMutation { + return dtu.mutation +} + +// ClearDocument clears the "document" edge to the Document entity. +func (dtu *DocumentTokenUpdate) ClearDocument() *DocumentTokenUpdate { + dtu.mutation.ClearDocument() + return dtu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (dtu *DocumentTokenUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + dtu.defaults() + if len(dtu.hooks) == 0 { + if err = dtu.check(); err != nil { + return 0, err + } + affected, err = dtu.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = dtu.check(); err != nil { + return 0, err + } + dtu.mutation = mutation + affected, err = dtu.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(dtu.hooks) - 1; i >= 0; i-- { + if dtu.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dtu.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, dtu.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (dtu *DocumentTokenUpdate) SaveX(ctx context.Context) int { + affected, err := dtu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (dtu *DocumentTokenUpdate) Exec(ctx context.Context) error { + _, err := dtu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtu *DocumentTokenUpdate) ExecX(ctx context.Context) { + if err := dtu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dtu *DocumentTokenUpdate) defaults() { + if _, ok := dtu.mutation.UpdatedAt(); !ok { + v := documenttoken.UpdateDefaultUpdatedAt() + dtu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dtu *DocumentTokenUpdate) check() error { + if v, ok := dtu.mutation.Token(); ok { + if err := documenttoken.TokenValidator(v); err != nil { + return &ValidationError{Name: "token", err: fmt.Errorf(`ent: validator failed for field "DocumentToken.token": %w`, err)} + } + } + return nil +} + +func (dtu *DocumentTokenUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: documenttoken.Table, + Columns: documenttoken.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + if ps := dtu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := dtu.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldUpdatedAt, + }) + } + if value, ok := dtu.mutation.Token(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBytes, + Value: value, + Column: documenttoken.FieldToken, + }) + } + if value, ok := dtu.mutation.Uses(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: documenttoken.FieldUses, + }) + } + if value, ok := dtu.mutation.AddedUses(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: documenttoken.FieldUses, + }) + } + if value, ok := dtu.mutation.ExpiresAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldExpiresAt, + }) + } + if dtu.mutation.DocumentCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: documenttoken.DocumentTable, + Columns: []string{documenttoken.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dtu.mutation.DocumentIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: documenttoken.DocumentTable, + Columns: []string{documenttoken.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, dtu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{documenttoken.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + return n, nil +} + +// DocumentTokenUpdateOne is the builder for updating a single DocumentToken entity. +type DocumentTokenUpdateOne struct { + config + fields []string + hooks []Hook + mutation *DocumentTokenMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (dtuo *DocumentTokenUpdateOne) SetUpdatedAt(t time.Time) *DocumentTokenUpdateOne { + dtuo.mutation.SetUpdatedAt(t) + return dtuo +} + +// SetToken sets the "token" field. +func (dtuo *DocumentTokenUpdateOne) SetToken(b []byte) *DocumentTokenUpdateOne { + dtuo.mutation.SetToken(b) + return dtuo +} + +// SetUses sets the "uses" field. +func (dtuo *DocumentTokenUpdateOne) SetUses(i int) *DocumentTokenUpdateOne { + dtuo.mutation.ResetUses() + dtuo.mutation.SetUses(i) + return dtuo +} + +// SetNillableUses sets the "uses" field if the given value is not nil. +func (dtuo *DocumentTokenUpdateOne) SetNillableUses(i *int) *DocumentTokenUpdateOne { + if i != nil { + dtuo.SetUses(*i) + } + return dtuo +} + +// AddUses adds i to the "uses" field. +func (dtuo *DocumentTokenUpdateOne) AddUses(i int) *DocumentTokenUpdateOne { + dtuo.mutation.AddUses(i) + return dtuo +} + +// SetExpiresAt sets the "expires_at" field. +func (dtuo *DocumentTokenUpdateOne) SetExpiresAt(t time.Time) *DocumentTokenUpdateOne { + dtuo.mutation.SetExpiresAt(t) + return dtuo +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (dtuo *DocumentTokenUpdateOne) SetNillableExpiresAt(t *time.Time) *DocumentTokenUpdateOne { + if t != nil { + dtuo.SetExpiresAt(*t) + } + return dtuo +} + +// SetDocumentID sets the "document" edge to the Document entity by ID. +func (dtuo *DocumentTokenUpdateOne) SetDocumentID(id uuid.UUID) *DocumentTokenUpdateOne { + dtuo.mutation.SetDocumentID(id) + return dtuo +} + +// SetNillableDocumentID sets the "document" edge to the Document entity by ID if the given value is not nil. +func (dtuo *DocumentTokenUpdateOne) SetNillableDocumentID(id *uuid.UUID) *DocumentTokenUpdateOne { + if id != nil { + dtuo = dtuo.SetDocumentID(*id) + } + return dtuo +} + +// SetDocument sets the "document" edge to the Document entity. +func (dtuo *DocumentTokenUpdateOne) SetDocument(d *Document) *DocumentTokenUpdateOne { + return dtuo.SetDocumentID(d.ID) +} + +// Mutation returns the DocumentTokenMutation object of the builder. +func (dtuo *DocumentTokenUpdateOne) Mutation() *DocumentTokenMutation { + return dtuo.mutation +} + +// ClearDocument clears the "document" edge to the Document entity. +func (dtuo *DocumentTokenUpdateOne) ClearDocument() *DocumentTokenUpdateOne { + dtuo.mutation.ClearDocument() + return dtuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (dtuo *DocumentTokenUpdateOne) Select(field string, fields ...string) *DocumentTokenUpdateOne { + dtuo.fields = append([]string{field}, fields...) + return dtuo +} + +// Save executes the query and returns the updated DocumentToken entity. +func (dtuo *DocumentTokenUpdateOne) Save(ctx context.Context) (*DocumentToken, error) { + var ( + err error + node *DocumentToken + ) + dtuo.defaults() + if len(dtuo.hooks) == 0 { + if err = dtuo.check(); err != nil { + return nil, err + } + node, err = dtuo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DocumentTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = dtuo.check(); err != nil { + return nil, err + } + dtuo.mutation = mutation + node, err = dtuo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(dtuo.hooks) - 1; i >= 0; i-- { + if dtuo.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dtuo.hooks[i](mut) + } + v, err := mut.Mutate(ctx, dtuo.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*DocumentToken) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from DocumentTokenMutation", v) + } + node = nv + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (dtuo *DocumentTokenUpdateOne) SaveX(ctx context.Context) *DocumentToken { + node, err := dtuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (dtuo *DocumentTokenUpdateOne) Exec(ctx context.Context) error { + _, err := dtuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtuo *DocumentTokenUpdateOne) ExecX(ctx context.Context) { + if err := dtuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dtuo *DocumentTokenUpdateOne) defaults() { + if _, ok := dtuo.mutation.UpdatedAt(); !ok { + v := documenttoken.UpdateDefaultUpdatedAt() + dtuo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dtuo *DocumentTokenUpdateOne) check() error { + if v, ok := dtuo.mutation.Token(); ok { + if err := documenttoken.TokenValidator(v); err != nil { + return &ValidationError{Name: "token", err: fmt.Errorf(`ent: validator failed for field "DocumentToken.token": %w`, err)} + } + } + return nil +} + +func (dtuo *DocumentTokenUpdateOne) sqlSave(ctx context.Context) (_node *DocumentToken, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: documenttoken.Table, + Columns: documenttoken.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: documenttoken.FieldID, + }, + }, + } + id, ok := dtuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DocumentToken.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := dtuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, documenttoken.FieldID) + for _, f := range fields { + if !documenttoken.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != documenttoken.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := dtuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := dtuo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldUpdatedAt, + }) + } + if value, ok := dtuo.mutation.Token(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBytes, + Value: value, + Column: documenttoken.FieldToken, + }) + } + if value, ok := dtuo.mutation.Uses(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: documenttoken.FieldUses, + }) + } + if value, ok := dtuo.mutation.AddedUses(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: documenttoken.FieldUses, + }) + } + if value, ok := dtuo.mutation.ExpiresAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: documenttoken.FieldExpiresAt, + }) + } + if dtuo.mutation.DocumentCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: documenttoken.DocumentTable, + Columns: []string{documenttoken.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dtuo.mutation.DocumentIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: documenttoken.DocumentTable, + Columns: []string{documenttoken.DocumentColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &DocumentToken{config: dtuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, dtuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{documenttoken.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} diff --git a/backend/ent/ent.go b/backend/ent/ent.go index f976756..997c25e 100644 --- a/backend/ent/ent.go +++ b/backend/ent/ent.go @@ -10,7 +10,10 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/hay-kot/content/backend/ent/attachment" "github.com/hay-kot/content/backend/ent/authtokens" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/itemfield" @@ -37,13 +40,16 @@ type OrderFunc func(*sql.Selector) // columnChecker returns a function indicates if the column exists in the given column. func columnChecker(table string) func(string) error { checks := map[string]func(string) bool{ - authtokens.Table: authtokens.ValidColumn, - group.Table: group.ValidColumn, - item.Table: item.ValidColumn, - itemfield.Table: itemfield.ValidColumn, - label.Table: label.ValidColumn, - location.Table: location.ValidColumn, - user.Table: user.ValidColumn, + attachment.Table: attachment.ValidColumn, + authtokens.Table: authtokens.ValidColumn, + document.Table: document.ValidColumn, + documenttoken.Table: documenttoken.ValidColumn, + group.Table: group.ValidColumn, + item.Table: item.ValidColumn, + itemfield.Table: itemfield.ValidColumn, + label.Table: label.ValidColumn, + location.Table: location.ValidColumn, + user.Table: user.ValidColumn, } check, ok := checks[table] if !ok { diff --git a/backend/ent/group.go b/backend/ent/group.go index 6724ec5..39f38ca 100644 --- a/backend/ent/group.go +++ b/backend/ent/group.go @@ -40,9 +40,11 @@ type GroupEdges struct { Items []*Item `json:"items,omitempty"` // Labels holds the value of the labels edge. Labels []*Label `json:"labels,omitempty"` + // Documents holds the value of the documents edge. + Documents []*Document `json:"documents,omitempty"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. - loadedTypes [4]bool + loadedTypes [5]bool } // UsersOrErr returns the Users value or an error if the edge @@ -81,6 +83,15 @@ func (e GroupEdges) LabelsOrErr() ([]*Label, error) { return nil, &NotLoadedError{edge: "labels"} } +// DocumentsOrErr returns the Documents value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) DocumentsOrErr() ([]*Document, error) { + if e.loadedTypes[4] { + return e.Documents, nil + } + return nil, &NotLoadedError{edge: "documents"} +} + // scanValues returns the types for scanning values from sql.Rows. func (*Group) scanValues(columns []string) ([]interface{}, error) { values := make([]interface{}, len(columns)) @@ -162,6 +173,11 @@ func (gr *Group) QueryLabels() *LabelQuery { return (&GroupClient{config: gr.config}).QueryLabels(gr) } +// QueryDocuments queries the "documents" edge of the Group entity. +func (gr *Group) QueryDocuments() *DocumentQuery { + return (&GroupClient{config: gr.config}).QueryDocuments(gr) +} + // Update returns a builder for updating this Group. // Note that you need to call Group.Unwrap() before calling this method if this Group // was returned from a transaction, and the transaction was committed or rolled back. diff --git a/backend/ent/group/group.go b/backend/ent/group/group.go index c7ccf72..d30b781 100644 --- a/backend/ent/group/group.go +++ b/backend/ent/group/group.go @@ -30,6 +30,8 @@ const ( EdgeItems = "items" // EdgeLabels holds the string denoting the labels edge name in mutations. EdgeLabels = "labels" + // EdgeDocuments holds the string denoting the documents edge name in mutations. + EdgeDocuments = "documents" // Table holds the table name of the group in the database. Table = "groups" // UsersTable is the table that holds the users relation/edge. @@ -60,6 +62,13 @@ const ( LabelsInverseTable = "labels" // LabelsColumn is the table column denoting the labels relation/edge. LabelsColumn = "group_labels" + // DocumentsTable is the table that holds the documents relation/edge. + DocumentsTable = "documents" + // DocumentsInverseTable is the table name for the Document entity. + // It exists in this package in order to avoid circular dependency with the "document" package. + DocumentsInverseTable = "documents" + // DocumentsColumn is the table column denoting the documents relation/edge. + DocumentsColumn = "group_documents" ) // Columns holds all SQL columns for group fields. diff --git a/backend/ent/group/where.go b/backend/ent/group/where.go index 35e3c7d..f6c759d 100644 --- a/backend/ent/group/where.go +++ b/backend/ent/group/where.go @@ -478,6 +478,34 @@ func HasLabelsWith(preds ...predicate.Label) predicate.Group { }) } +// HasDocuments applies the HasEdge predicate on the "documents" edge. +func HasDocuments() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentsTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDocumentsWith applies the HasEdge predicate on the "documents" edge with a given conditions (other predicates). +func HasDocumentsWith(preds ...predicate.Document) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + // And groups predicates with the AND operator between them. func And(predicates ...predicate.Group) predicate.Group { return predicate.Group(func(s *sql.Selector) { diff --git a/backend/ent/group_create.go b/backend/ent/group_create.go index eda86d6..a72eefe 100644 --- a/backend/ent/group_create.go +++ b/backend/ent/group_create.go @@ -11,6 +11,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/label" @@ -147,6 +148,21 @@ func (gc *GroupCreate) AddLabels(l ...*Label) *GroupCreate { return gc.AddLabelIDs(ids...) } +// AddDocumentIDs adds the "documents" edge to the Document entity by IDs. +func (gc *GroupCreate) AddDocumentIDs(ids ...uuid.UUID) *GroupCreate { + gc.mutation.AddDocumentIDs(ids...) + return gc +} + +// AddDocuments adds the "documents" edges to the Document entity. +func (gc *GroupCreate) AddDocuments(d ...*Document) *GroupCreate { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return gc.AddDocumentIDs(ids...) +} + // Mutation returns the GroupMutation object of the builder. func (gc *GroupCreate) Mutation() *GroupMutation { return gc.mutation @@ -410,6 +426,25 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } + if nodes := gc.mutation.DocumentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } return _node, _spec } diff --git a/backend/ent/group_query.go b/backend/ent/group_query.go index 8206f68..cb3a1c6 100644 --- a/backend/ent/group_query.go +++ b/backend/ent/group_query.go @@ -12,6 +12,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/label" @@ -33,6 +34,7 @@ type GroupQuery struct { withLocations *LocationQuery withItems *ItemQuery withLabels *LabelQuery + withDocuments *DocumentQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -157,6 +159,28 @@ func (gq *GroupQuery) QueryLabels() *LabelQuery { return query } +// QueryDocuments chains the current query on the "documents" edge. +func (gq *GroupQuery) QueryDocuments() *DocumentQuery { + query := &DocumentQuery{config: gq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := gq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := gq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(document.Table, document.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.DocumentsTable, group.DocumentsColumn), + ) + fromU = sqlgraph.SetNeighbors(gq.driver.Dialect(), step) + return fromU, nil + } + return query +} + // First returns the first Group entity from the query. // Returns a *NotFoundError when no Group was found. func (gq *GroupQuery) First(ctx context.Context) (*Group, error) { @@ -342,6 +366,7 @@ func (gq *GroupQuery) Clone() *GroupQuery { withLocations: gq.withLocations.Clone(), withItems: gq.withItems.Clone(), withLabels: gq.withLabels.Clone(), + withDocuments: gq.withDocuments.Clone(), // clone intermediate query. sql: gq.sql.Clone(), path: gq.path, @@ -393,6 +418,17 @@ func (gq *GroupQuery) WithLabels(opts ...func(*LabelQuery)) *GroupQuery { return gq } +// WithDocuments tells the query-builder to eager-load the nodes that are connected to +// the "documents" edge. The optional arguments are used to configure the query builder of the edge. +func (gq *GroupQuery) WithDocuments(opts ...func(*DocumentQuery)) *GroupQuery { + query := &DocumentQuery{config: gq.config} + for _, opt := range opts { + opt(query) + } + gq.withDocuments = query + return gq +} + // GroupBy is used to group vertices by one or more fields/columns. // It is often used with aggregate functions, like: count, max, mean, min, sum. // @@ -461,11 +497,12 @@ func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, var ( nodes = []*Group{} _spec = gq.querySpec() - loadedTypes = [4]bool{ + loadedTypes = [5]bool{ gq.withUsers != nil, gq.withLocations != nil, gq.withItems != nil, gq.withLabels != nil, + gq.withDocuments != nil, } ) _spec.ScanValues = func(columns []string) ([]interface{}, error) { @@ -514,6 +551,13 @@ func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, return nil, err } } + if query := gq.withDocuments; query != nil { + if err := gq.loadDocuments(ctx, query, nodes, + func(n *Group) { n.Edges.Documents = []*Document{} }, + func(n *Group, e *Document) { n.Edges.Documents = append(n.Edges.Documents, e) }); err != nil { + return nil, err + } + } return nodes, nil } @@ -641,6 +685,37 @@ func (gq *GroupQuery) loadLabels(ctx context.Context, query *LabelQuery, nodes [ } return nil } +func (gq *GroupQuery) loadDocuments(ctx context.Context, query *DocumentQuery, nodes []*Group, init func(*Group), assign func(*Group, *Document)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Document(func(s *sql.Selector) { + s.Where(sql.InValues(group.DocumentsColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.group_documents + if fk == nil { + return fmt.Errorf(`foreign-key "group_documents" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_documents" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} func (gq *GroupQuery) sqlCount(ctx context.Context) (int, error) { _spec := gq.querySpec() diff --git a/backend/ent/group_update.go b/backend/ent/group_update.go index 4fa63a8..4ebf709 100644 --- a/backend/ent/group_update.go +++ b/backend/ent/group_update.go @@ -12,6 +12,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/document" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/label" @@ -119,6 +120,21 @@ func (gu *GroupUpdate) AddLabels(l ...*Label) *GroupUpdate { return gu.AddLabelIDs(ids...) } +// AddDocumentIDs adds the "documents" edge to the Document entity by IDs. +func (gu *GroupUpdate) AddDocumentIDs(ids ...uuid.UUID) *GroupUpdate { + gu.mutation.AddDocumentIDs(ids...) + return gu +} + +// AddDocuments adds the "documents" edges to the Document entity. +func (gu *GroupUpdate) AddDocuments(d ...*Document) *GroupUpdate { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return gu.AddDocumentIDs(ids...) +} + // Mutation returns the GroupMutation object of the builder. func (gu *GroupUpdate) Mutation() *GroupMutation { return gu.mutation @@ -208,6 +224,27 @@ func (gu *GroupUpdate) RemoveLabels(l ...*Label) *GroupUpdate { return gu.RemoveLabelIDs(ids...) } +// ClearDocuments clears all "documents" edges to the Document entity. +func (gu *GroupUpdate) ClearDocuments() *GroupUpdate { + gu.mutation.ClearDocuments() + return gu +} + +// RemoveDocumentIDs removes the "documents" edge to Document entities by IDs. +func (gu *GroupUpdate) RemoveDocumentIDs(ids ...uuid.UUID) *GroupUpdate { + gu.mutation.RemoveDocumentIDs(ids...) + return gu +} + +// RemoveDocuments removes "documents" edges to Document entities. +func (gu *GroupUpdate) RemoveDocuments(d ...*Document) *GroupUpdate { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return gu.RemoveDocumentIDs(ids...) +} + // Save executes the query and returns the number of nodes affected by the update operation. func (gu *GroupUpdate) Save(ctx context.Context) (int, error) { var ( @@ -547,6 +584,60 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if gu.mutation.DocumentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := gu.mutation.RemovedDocumentsIDs(); len(nodes) > 0 && !gu.mutation.DocumentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := gu.mutation.DocumentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if n, err = sqlgraph.UpdateNodes(ctx, gu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{group.Label} @@ -652,6 +743,21 @@ func (guo *GroupUpdateOne) AddLabels(l ...*Label) *GroupUpdateOne { return guo.AddLabelIDs(ids...) } +// AddDocumentIDs adds the "documents" edge to the Document entity by IDs. +func (guo *GroupUpdateOne) AddDocumentIDs(ids ...uuid.UUID) *GroupUpdateOne { + guo.mutation.AddDocumentIDs(ids...) + return guo +} + +// AddDocuments adds the "documents" edges to the Document entity. +func (guo *GroupUpdateOne) AddDocuments(d ...*Document) *GroupUpdateOne { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return guo.AddDocumentIDs(ids...) +} + // Mutation returns the GroupMutation object of the builder. func (guo *GroupUpdateOne) Mutation() *GroupMutation { return guo.mutation @@ -741,6 +847,27 @@ func (guo *GroupUpdateOne) RemoveLabels(l ...*Label) *GroupUpdateOne { return guo.RemoveLabelIDs(ids...) } +// ClearDocuments clears all "documents" edges to the Document entity. +func (guo *GroupUpdateOne) ClearDocuments() *GroupUpdateOne { + guo.mutation.ClearDocuments() + return guo +} + +// RemoveDocumentIDs removes the "documents" edge to Document entities by IDs. +func (guo *GroupUpdateOne) RemoveDocumentIDs(ids ...uuid.UUID) *GroupUpdateOne { + guo.mutation.RemoveDocumentIDs(ids...) + return guo +} + +// RemoveDocuments removes "documents" edges to Document entities. +func (guo *GroupUpdateOne) RemoveDocuments(d ...*Document) *GroupUpdateOne { + ids := make([]uuid.UUID, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return guo.RemoveDocumentIDs(ids...) +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (guo *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOne { @@ -1110,6 +1237,60 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if guo.mutation.DocumentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := guo.mutation.RemovedDocumentsIDs(); len(nodes) > 0 && !guo.mutation.DocumentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := guo.mutation.DocumentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.DocumentsTable, + Columns: []string{group.DocumentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: document.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } _node = &Group{config: guo.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues diff --git a/backend/ent/has_id.go b/backend/ent/has_id.go new file mode 100644 index 0000000..a6afc6a --- /dev/null +++ b/backend/ent/has_id.go @@ -0,0 +1,45 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import "github.com/google/uuid" + +func (a *Attachment) GetID() uuid.UUID { + return a.ID +} + +func (at *AuthTokens) GetID() uuid.UUID { + return at.ID +} + +func (d *Document) GetID() uuid.UUID { + return d.ID +} + +func (dt *DocumentToken) GetID() uuid.UUID { + return dt.ID +} + +func (gr *Group) GetID() uuid.UUID { + return gr.ID +} + +func (i *Item) GetID() uuid.UUID { + return i.ID +} + +func (_if *ItemField) GetID() uuid.UUID { + return _if.ID +} + +func (l *Label) GetID() uuid.UUID { + return l.ID +} + +func (l *Location) GetID() uuid.UUID { + return l.ID +} + +func (u *User) GetID() uuid.UUID { + return u.ID +} diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go index 6bffd21..095e929 100644 --- a/backend/ent/hook/hook.go +++ b/backend/ent/hook/hook.go @@ -9,6 +9,19 @@ import ( "github.com/hay-kot/content/backend/ent" ) +// The AttachmentFunc type is an adapter to allow the use of ordinary +// function as Attachment mutator. +type AttachmentFunc func(context.Context, *ent.AttachmentMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AttachmentFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.AttachmentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AttachmentMutation", m) + } + return f(ctx, mv) +} + // The AuthTokensFunc type is an adapter to allow the use of ordinary // function as AuthTokens mutator. type AuthTokensFunc func(context.Context, *ent.AuthTokensMutation) (ent.Value, error) @@ -22,6 +35,32 @@ func (f AuthTokensFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, return f(ctx, mv) } +// The DocumentFunc type is an adapter to allow the use of ordinary +// function as Document mutator. +type DocumentFunc func(context.Context, *ent.DocumentMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f DocumentFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.DocumentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DocumentMutation", m) + } + return f(ctx, mv) +} + +// The DocumentTokenFunc type is an adapter to allow the use of ordinary +// function as DocumentToken mutator. +type DocumentTokenFunc func(context.Context, *ent.DocumentTokenMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f DocumentTokenFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.DocumentTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DocumentTokenMutation", m) + } + return f(ctx, mv) +} + // The GroupFunc type is an adapter to allow the use of ordinary // function as Group mutator. type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error) diff --git a/backend/ent/item.go b/backend/ent/item.go index 7e427d5..b9f47b7 100644 --- a/backend/ent/item.go +++ b/backend/ent/item.go @@ -29,6 +29,10 @@ type Item struct { Description string `json:"description,omitempty"` // Notes holds the value of the "notes" field. Notes string `json:"notes,omitempty"` + // Quantity holds the value of the "quantity" field. + Quantity int `json:"quantity,omitempty"` + // Insured holds the value of the "insured" field. + Insured bool `json:"insured,omitempty"` // SerialNumber holds the value of the "serial_number" field. SerialNumber string `json:"serial_number,omitempty"` // ModelNumber holds the value of the "model_number" field. @@ -72,9 +76,11 @@ type ItemEdges struct { Fields []*ItemField `json:"fields,omitempty"` // Label holds the value of the label edge. Label []*Label `json:"label,omitempty"` + // Attachments holds the value of the attachments edge. + Attachments []*Attachment `json:"attachments,omitempty"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. - loadedTypes [4]bool + loadedTypes [5]bool } // GroupOrErr returns the Group value or an error if the edge @@ -121,15 +127,26 @@ func (e ItemEdges) LabelOrErr() ([]*Label, error) { return nil, &NotLoadedError{edge: "label"} } +// AttachmentsOrErr returns the Attachments value or an error if the edge +// was not loaded in eager-loading. +func (e ItemEdges) AttachmentsOrErr() ([]*Attachment, error) { + if e.loadedTypes[4] { + return e.Attachments, nil + } + return nil, &NotLoadedError{edge: "attachments"} +} + // scanValues returns the types for scanning values from sql.Rows. func (*Item) scanValues(columns []string) ([]interface{}, error) { values := make([]interface{}, len(columns)) for i := range columns { switch columns[i] { - case item.FieldLifetimeWarranty: + case item.FieldInsured, item.FieldLifetimeWarranty: values[i] = new(sql.NullBool) case item.FieldPurchasePrice, item.FieldSoldPrice: values[i] = new(sql.NullFloat64) + case item.FieldQuantity: + values[i] = new(sql.NullInt64) case item.FieldName, item.FieldDescription, item.FieldNotes, item.FieldSerialNumber, item.FieldModelNumber, item.FieldManufacturer, item.FieldWarrantyDetails, item.FieldPurchaseFrom, item.FieldSoldTo, item.FieldSoldNotes: values[i] = new(sql.NullString) case item.FieldCreatedAt, item.FieldUpdatedAt, item.FieldWarrantyExpires, item.FieldPurchaseTime, item.FieldSoldTime: @@ -191,6 +208,18 @@ func (i *Item) assignValues(columns []string, values []interface{}) error { } else if value.Valid { i.Notes = value.String } + case item.FieldQuantity: + if value, ok := values[j].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field quantity", values[j]) + } else if value.Valid { + i.Quantity = int(value.Int64) + } + case item.FieldInsured: + if value, ok := values[j].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field insured", values[j]) + } else if value.Valid { + i.Insured = value.Bool + } case item.FieldSerialNumber: if value, ok := values[j].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field serial_number", values[j]) @@ -308,6 +337,11 @@ func (i *Item) QueryLabel() *LabelQuery { return (&ItemClient{config: i.config}).QueryLabel(i) } +// QueryAttachments queries the "attachments" edge of the Item entity. +func (i *Item) QueryAttachments() *AttachmentQuery { + return (&ItemClient{config: i.config}).QueryAttachments(i) +} + // Update returns a builder for updating this Item. // Note that you need to call Item.Unwrap() before calling this method if this Item // was returned from a transaction, and the transaction was committed or rolled back. @@ -346,6 +380,12 @@ func (i *Item) String() string { builder.WriteString("notes=") builder.WriteString(i.Notes) builder.WriteString(", ") + builder.WriteString("quantity=") + builder.WriteString(fmt.Sprintf("%v", i.Quantity)) + builder.WriteString(", ") + builder.WriteString("insured=") + builder.WriteString(fmt.Sprintf("%v", i.Insured)) + builder.WriteString(", ") builder.WriteString("serial_number=") builder.WriteString(i.SerialNumber) builder.WriteString(", ") diff --git a/backend/ent/item/item.go b/backend/ent/item/item.go index 481d8e7..bdc7593 100644 --- a/backend/ent/item/item.go +++ b/backend/ent/item/item.go @@ -23,6 +23,10 @@ const ( FieldDescription = "description" // FieldNotes holds the string denoting the notes field in the database. FieldNotes = "notes" + // FieldQuantity holds the string denoting the quantity field in the database. + FieldQuantity = "quantity" + // FieldInsured holds the string denoting the insured field in the database. + FieldInsured = "insured" // FieldSerialNumber holds the string denoting the serial_number field in the database. FieldSerialNumber = "serial_number" // FieldModelNumber holds the string denoting the model_number field in the database. @@ -57,6 +61,8 @@ const ( EdgeFields = "fields" // EdgeLabel holds the string denoting the label edge name in mutations. EdgeLabel = "label" + // EdgeAttachments holds the string denoting the attachments edge name in mutations. + EdgeAttachments = "attachments" // Table holds the table name of the item in the database. Table = "items" // GroupTable is the table that holds the group relation/edge. @@ -85,6 +91,13 @@ const ( // LabelInverseTable is the table name for the Label entity. // It exists in this package in order to avoid circular dependency with the "label" package. LabelInverseTable = "labels" + // AttachmentsTable is the table that holds the attachments relation/edge. + AttachmentsTable = "attachments" + // AttachmentsInverseTable is the table name for the Attachment entity. + // It exists in this package in order to avoid circular dependency with the "attachment" package. + AttachmentsInverseTable = "attachments" + // AttachmentsColumn is the table column denoting the attachments relation/edge. + AttachmentsColumn = "item_attachments" ) // Columns holds all SQL columns for item fields. @@ -95,6 +108,8 @@ var Columns = []string{ FieldName, FieldDescription, FieldNotes, + FieldQuantity, + FieldInsured, FieldSerialNumber, FieldModelNumber, FieldManufacturer, @@ -151,6 +166,10 @@ var ( DescriptionValidator func(string) error // NotesValidator is a validator for the "notes" field. It is called by the builders before save. NotesValidator func(string) error + // DefaultQuantity holds the default value on creation for the "quantity" field. + DefaultQuantity int + // DefaultInsured holds the default value on creation for the "insured" field. + DefaultInsured bool // SerialNumberValidator is a validator for the "serial_number" field. It is called by the builders before save. SerialNumberValidator func(string) error // ModelNumberValidator is a validator for the "model_number" field. It is called by the builders before save. diff --git a/backend/ent/item/where.go b/backend/ent/item/where.go index 58d9f68..7f31665 100644 --- a/backend/ent/item/where.go +++ b/backend/ent/item/where.go @@ -117,6 +117,20 @@ func Notes(v string) predicate.Item { }) } +// Quantity applies equality check predicate on the "quantity" field. It's identical to QuantityEQ. +func Quantity(v int) predicate.Item { + return predicate.Item(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldQuantity), v)) + }) +} + +// Insured applies equality check predicate on the "insured" field. It's identical to InsuredEQ. +func Insured(v bool) predicate.Item { + return predicate.Item(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldInsured), v)) + }) +} + // SerialNumber applies equality check predicate on the "serial_number" field. It's identical to SerialNumberEQ. func SerialNumber(v string) predicate.Item { return predicate.Item(func(s *sql.Selector) { @@ -661,6 +675,84 @@ func NotesContainsFold(v string) predicate.Item { }) } +// QuantityEQ applies the EQ predicate on the "quantity" field. +func QuantityEQ(v int) predicate.Item { + return predicate.Item(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldQuantity), v)) + }) +} + +// QuantityNEQ applies the NEQ predicate on the "quantity" field. +func QuantityNEQ(v int) predicate.Item { + return predicate.Item(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldQuantity), v)) + }) +} + +// QuantityIn applies the In predicate on the "quantity" field. +func QuantityIn(vs ...int) predicate.Item { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Item(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldQuantity), v...)) + }) +} + +// QuantityNotIn applies the NotIn predicate on the "quantity" field. +func QuantityNotIn(vs ...int) predicate.Item { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Item(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldQuantity), v...)) + }) +} + +// QuantityGT applies the GT predicate on the "quantity" field. +func QuantityGT(v int) predicate.Item { + return predicate.Item(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldQuantity), v)) + }) +} + +// QuantityGTE applies the GTE predicate on the "quantity" field. +func QuantityGTE(v int) predicate.Item { + return predicate.Item(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldQuantity), v)) + }) +} + +// QuantityLT applies the LT predicate on the "quantity" field. +func QuantityLT(v int) predicate.Item { + return predicate.Item(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldQuantity), v)) + }) +} + +// QuantityLTE applies the LTE predicate on the "quantity" field. +func QuantityLTE(v int) predicate.Item { + return predicate.Item(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldQuantity), v)) + }) +} + +// InsuredEQ applies the EQ predicate on the "insured" field. +func InsuredEQ(v bool) predicate.Item { + return predicate.Item(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldInsured), v)) + }) +} + +// InsuredNEQ applies the NEQ predicate on the "insured" field. +func InsuredNEQ(v bool) predicate.Item { + return predicate.Item(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldInsured), v)) + }) +} + // SerialNumberEQ applies the EQ predicate on the "serial_number" field. func SerialNumberEQ(v string) predicate.Item { return predicate.Item(func(s *sql.Selector) { @@ -1940,6 +2032,34 @@ func HasLabelWith(preds ...predicate.Label) predicate.Item { }) } +// HasAttachments applies the HasEdge predicate on the "attachments" edge. +func HasAttachments() predicate.Item { + return predicate.Item(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AttachmentsTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAttachmentsWith applies the HasEdge predicate on the "attachments" edge with a given conditions (other predicates). +func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Item { + return predicate.Item(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AttachmentsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + // And groups predicates with the AND operator between them. func And(predicates ...predicate.Item) predicate.Item { return predicate.Item(func(s *sql.Selector) { diff --git a/backend/ent/item_create.go b/backend/ent/item_create.go index 2715420..cfe3975 100644 --- a/backend/ent/item_create.go +++ b/backend/ent/item_create.go @@ -11,6 +11,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/attachment" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/itemfield" @@ -87,6 +88,34 @@ func (ic *ItemCreate) SetNillableNotes(s *string) *ItemCreate { return ic } +// SetQuantity sets the "quantity" field. +func (ic *ItemCreate) SetQuantity(i int) *ItemCreate { + ic.mutation.SetQuantity(i) + return ic +} + +// SetNillableQuantity sets the "quantity" field if the given value is not nil. +func (ic *ItemCreate) SetNillableQuantity(i *int) *ItemCreate { + if i != nil { + ic.SetQuantity(*i) + } + return ic +} + +// SetInsured sets the "insured" field. +func (ic *ItemCreate) SetInsured(b bool) *ItemCreate { + ic.mutation.SetInsured(b) + return ic +} + +// SetNillableInsured sets the "insured" field if the given value is not nil. +func (ic *ItemCreate) SetNillableInsured(b *bool) *ItemCreate { + if b != nil { + ic.SetInsured(*b) + } + return ic +} + // SetSerialNumber sets the "serial_number" field. func (ic *ItemCreate) SetSerialNumber(s string) *ItemCreate { ic.mutation.SetSerialNumber(s) @@ -343,6 +372,21 @@ func (ic *ItemCreate) AddLabel(l ...*Label) *ItemCreate { return ic.AddLabelIDs(ids...) } +// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs. +func (ic *ItemCreate) AddAttachmentIDs(ids ...uuid.UUID) *ItemCreate { + ic.mutation.AddAttachmentIDs(ids...) + return ic +} + +// AddAttachments adds the "attachments" edges to the Attachment entity. +func (ic *ItemCreate) AddAttachments(a ...*Attachment) *ItemCreate { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return ic.AddAttachmentIDs(ids...) +} + // Mutation returns the ItemMutation object of the builder. func (ic *ItemCreate) Mutation() *ItemMutation { return ic.mutation @@ -428,6 +472,14 @@ func (ic *ItemCreate) defaults() { v := item.DefaultUpdatedAt() ic.mutation.SetUpdatedAt(v) } + if _, ok := ic.mutation.Quantity(); !ok { + v := item.DefaultQuantity + ic.mutation.SetQuantity(v) + } + if _, ok := ic.mutation.Insured(); !ok { + v := item.DefaultInsured + ic.mutation.SetInsured(v) + } if _, ok := ic.mutation.LifetimeWarranty(); !ok { v := item.DefaultLifetimeWarranty ic.mutation.SetLifetimeWarranty(v) @@ -472,6 +524,12 @@ func (ic *ItemCreate) check() error { return &ValidationError{Name: "notes", err: fmt.Errorf(`ent: validator failed for field "Item.notes": %w`, err)} } } + if _, ok := ic.mutation.Quantity(); !ok { + return &ValidationError{Name: "quantity", err: errors.New(`ent: missing required field "Item.quantity"`)} + } + if _, ok := ic.mutation.Insured(); !ok { + return &ValidationError{Name: "insured", err: errors.New(`ent: missing required field "Item.insured"`)} + } if v, ok := ic.mutation.SerialNumber(); ok { if err := item.SerialNumberValidator(v); err != nil { return &ValidationError{Name: "serial_number", err: fmt.Errorf(`ent: validator failed for field "Item.serial_number": %w`, err)} @@ -585,6 +643,22 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) { }) _node.Notes = value } + if value, ok := ic.mutation.Quantity(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: item.FieldQuantity, + }) + _node.Quantity = value + } + if value, ok := ic.mutation.Insured(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: item.FieldInsured, + }) + _node.Insured = value + } if value, ok := ic.mutation.SerialNumber(); ok { _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ Type: field.TypeString, @@ -767,6 +841,25 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } + if nodes := ic.mutation.AttachmentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: item.AttachmentsTable, + Columns: []string{item.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } return _node, _spec } diff --git a/backend/ent/item_query.go b/backend/ent/item_query.go index b164239..a922b5b 100644 --- a/backend/ent/item_query.go +++ b/backend/ent/item_query.go @@ -12,6 +12,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/attachment" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/itemfield" @@ -23,17 +24,18 @@ import ( // ItemQuery is the builder for querying Item entities. type ItemQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Item - withGroup *GroupQuery - withLocation *LocationQuery - withFields *ItemFieldQuery - withLabel *LabelQuery - withFKs bool + limit *int + offset *int + unique *bool + order []OrderFunc + fields []string + predicates []predicate.Item + withGroup *GroupQuery + withLocation *LocationQuery + withFields *ItemFieldQuery + withLabel *LabelQuery + withAttachments *AttachmentQuery + withFKs bool // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -158,6 +160,28 @@ func (iq *ItemQuery) QueryLabel() *LabelQuery { return query } +// QueryAttachments chains the current query on the "attachments" edge. +func (iq *ItemQuery) QueryAttachments() *AttachmentQuery { + query := &AttachmentQuery{config: iq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := iq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := iq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(item.Table, item.FieldID, selector), + sqlgraph.To(attachment.Table, attachment.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, item.AttachmentsTable, item.AttachmentsColumn), + ) + fromU = sqlgraph.SetNeighbors(iq.driver.Dialect(), step) + return fromU, nil + } + return query +} + // First returns the first Item entity from the query. // Returns a *NotFoundError when no Item was found. func (iq *ItemQuery) First(ctx context.Context) (*Item, error) { @@ -334,15 +358,16 @@ func (iq *ItemQuery) Clone() *ItemQuery { return nil } return &ItemQuery{ - config: iq.config, - limit: iq.limit, - offset: iq.offset, - order: append([]OrderFunc{}, iq.order...), - predicates: append([]predicate.Item{}, iq.predicates...), - withGroup: iq.withGroup.Clone(), - withLocation: iq.withLocation.Clone(), - withFields: iq.withFields.Clone(), - withLabel: iq.withLabel.Clone(), + config: iq.config, + limit: iq.limit, + offset: iq.offset, + order: append([]OrderFunc{}, iq.order...), + predicates: append([]predicate.Item{}, iq.predicates...), + withGroup: iq.withGroup.Clone(), + withLocation: iq.withLocation.Clone(), + withFields: iq.withFields.Clone(), + withLabel: iq.withLabel.Clone(), + withAttachments: iq.withAttachments.Clone(), // clone intermediate query. sql: iq.sql.Clone(), path: iq.path, @@ -394,6 +419,17 @@ func (iq *ItemQuery) WithLabel(opts ...func(*LabelQuery)) *ItemQuery { return iq } +// WithAttachments tells the query-builder to eager-load the nodes that are connected to +// the "attachments" edge. The optional arguments are used to configure the query builder of the edge. +func (iq *ItemQuery) WithAttachments(opts ...func(*AttachmentQuery)) *ItemQuery { + query := &AttachmentQuery{config: iq.config} + for _, opt := range opts { + opt(query) + } + iq.withAttachments = query + return iq +} + // GroupBy is used to group vertices by one or more fields/columns. // It is often used with aggregate functions, like: count, max, mean, min, sum. // @@ -463,11 +499,12 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e nodes = []*Item{} withFKs = iq.withFKs _spec = iq.querySpec() - loadedTypes = [4]bool{ + loadedTypes = [5]bool{ iq.withGroup != nil, iq.withLocation != nil, iq.withFields != nil, iq.withLabel != nil, + iq.withAttachments != nil, } ) if iq.withGroup != nil || iq.withLocation != nil { @@ -520,6 +557,13 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e return nil, err } } + if query := iq.withAttachments; query != nil { + if err := iq.loadAttachments(ctx, query, nodes, + func(n *Item) { n.Edges.Attachments = []*Attachment{} }, + func(n *Item, e *Attachment) { n.Edges.Attachments = append(n.Edges.Attachments, e) }); err != nil { + return nil, err + } + } return nodes, nil } @@ -670,6 +714,37 @@ func (iq *ItemQuery) loadLabel(ctx context.Context, query *LabelQuery, nodes []* } return nil } +func (iq *ItemQuery) loadAttachments(ctx context.Context, query *AttachmentQuery, nodes []*Item, init func(*Item), assign func(*Item, *Attachment)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Item) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.InValues(item.AttachmentsColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.item_attachments + if fk == nil { + return fmt.Errorf(`foreign-key "item_attachments" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected foreign-key "item_attachments" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} func (iq *ItemQuery) sqlCount(ctx context.Context) (int, error) { _spec := iq.querySpec() diff --git a/backend/ent/item_update.go b/backend/ent/item_update.go index f2aa155..0bfe356 100644 --- a/backend/ent/item_update.go +++ b/backend/ent/item_update.go @@ -12,6 +12,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/attachment" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/itemfield" @@ -85,6 +86,41 @@ func (iu *ItemUpdate) ClearNotes() *ItemUpdate { return iu } +// SetQuantity sets the "quantity" field. +func (iu *ItemUpdate) SetQuantity(i int) *ItemUpdate { + iu.mutation.ResetQuantity() + iu.mutation.SetQuantity(i) + return iu +} + +// SetNillableQuantity sets the "quantity" field if the given value is not nil. +func (iu *ItemUpdate) SetNillableQuantity(i *int) *ItemUpdate { + if i != nil { + iu.SetQuantity(*i) + } + return iu +} + +// AddQuantity adds i to the "quantity" field. +func (iu *ItemUpdate) AddQuantity(i int) *ItemUpdate { + iu.mutation.AddQuantity(i) + return iu +} + +// SetInsured sets the "insured" field. +func (iu *ItemUpdate) SetInsured(b bool) *ItemUpdate { + iu.mutation.SetInsured(b) + return iu +} + +// SetNillableInsured sets the "insured" field if the given value is not nil. +func (iu *ItemUpdate) SetNillableInsured(b *bool) *ItemUpdate { + if b != nil { + iu.SetInsured(*b) + } + return iu +} + // SetSerialNumber sets the "serial_number" field. func (iu *ItemUpdate) SetSerialNumber(s string) *ItemUpdate { iu.mutation.SetSerialNumber(s) @@ -401,6 +437,21 @@ func (iu *ItemUpdate) AddLabel(l ...*Label) *ItemUpdate { return iu.AddLabelIDs(ids...) } +// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs. +func (iu *ItemUpdate) AddAttachmentIDs(ids ...uuid.UUID) *ItemUpdate { + iu.mutation.AddAttachmentIDs(ids...) + return iu +} + +// AddAttachments adds the "attachments" edges to the Attachment entity. +func (iu *ItemUpdate) AddAttachments(a ...*Attachment) *ItemUpdate { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return iu.AddAttachmentIDs(ids...) +} + // Mutation returns the ItemMutation object of the builder. func (iu *ItemUpdate) Mutation() *ItemMutation { return iu.mutation @@ -460,6 +511,27 @@ func (iu *ItemUpdate) RemoveLabel(l ...*Label) *ItemUpdate { return iu.RemoveLabelIDs(ids...) } +// ClearAttachments clears all "attachments" edges to the Attachment entity. +func (iu *ItemUpdate) ClearAttachments() *ItemUpdate { + iu.mutation.ClearAttachments() + return iu +} + +// RemoveAttachmentIDs removes the "attachments" edge to Attachment entities by IDs. +func (iu *ItemUpdate) RemoveAttachmentIDs(ids ...uuid.UUID) *ItemUpdate { + iu.mutation.RemoveAttachmentIDs(ids...) + return iu +} + +// RemoveAttachments removes "attachments" edges to Attachment entities. +func (iu *ItemUpdate) RemoveAttachments(a ...*Attachment) *ItemUpdate { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return iu.RemoveAttachmentIDs(ids...) +} + // Save executes the query and returns the number of nodes affected by the update operation. func (iu *ItemUpdate) Save(ctx context.Context) (int, error) { var ( @@ -635,6 +707,27 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Column: item.FieldNotes, }) } + if value, ok := iu.mutation.Quantity(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: item.FieldQuantity, + }) + } + if value, ok := iu.mutation.AddedQuantity(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: item.FieldQuantity, + }) + } + if value, ok := iu.mutation.Insured(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: item.FieldInsured, + }) + } if value, ok := iu.mutation.SerialNumber(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeString, @@ -978,6 +1071,60 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if iu.mutation.AttachmentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: item.AttachmentsTable, + Columns: []string{item.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := iu.mutation.RemovedAttachmentsIDs(); len(nodes) > 0 && !iu.mutation.AttachmentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: item.AttachmentsTable, + Columns: []string{item.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := iu.mutation.AttachmentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: item.AttachmentsTable, + Columns: []string{item.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if n, err = sqlgraph.UpdateNodes(ctx, iu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{item.Label} @@ -1049,6 +1196,41 @@ func (iuo *ItemUpdateOne) ClearNotes() *ItemUpdateOne { return iuo } +// SetQuantity sets the "quantity" field. +func (iuo *ItemUpdateOne) SetQuantity(i int) *ItemUpdateOne { + iuo.mutation.ResetQuantity() + iuo.mutation.SetQuantity(i) + return iuo +} + +// SetNillableQuantity sets the "quantity" field if the given value is not nil. +func (iuo *ItemUpdateOne) SetNillableQuantity(i *int) *ItemUpdateOne { + if i != nil { + iuo.SetQuantity(*i) + } + return iuo +} + +// AddQuantity adds i to the "quantity" field. +func (iuo *ItemUpdateOne) AddQuantity(i int) *ItemUpdateOne { + iuo.mutation.AddQuantity(i) + return iuo +} + +// SetInsured sets the "insured" field. +func (iuo *ItemUpdateOne) SetInsured(b bool) *ItemUpdateOne { + iuo.mutation.SetInsured(b) + return iuo +} + +// SetNillableInsured sets the "insured" field if the given value is not nil. +func (iuo *ItemUpdateOne) SetNillableInsured(b *bool) *ItemUpdateOne { + if b != nil { + iuo.SetInsured(*b) + } + return iuo +} + // SetSerialNumber sets the "serial_number" field. func (iuo *ItemUpdateOne) SetSerialNumber(s string) *ItemUpdateOne { iuo.mutation.SetSerialNumber(s) @@ -1365,6 +1547,21 @@ func (iuo *ItemUpdateOne) AddLabel(l ...*Label) *ItemUpdateOne { return iuo.AddLabelIDs(ids...) } +// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs. +func (iuo *ItemUpdateOne) AddAttachmentIDs(ids ...uuid.UUID) *ItemUpdateOne { + iuo.mutation.AddAttachmentIDs(ids...) + return iuo +} + +// AddAttachments adds the "attachments" edges to the Attachment entity. +func (iuo *ItemUpdateOne) AddAttachments(a ...*Attachment) *ItemUpdateOne { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return iuo.AddAttachmentIDs(ids...) +} + // Mutation returns the ItemMutation object of the builder. func (iuo *ItemUpdateOne) Mutation() *ItemMutation { return iuo.mutation @@ -1424,6 +1621,27 @@ func (iuo *ItemUpdateOne) RemoveLabel(l ...*Label) *ItemUpdateOne { return iuo.RemoveLabelIDs(ids...) } +// ClearAttachments clears all "attachments" edges to the Attachment entity. +func (iuo *ItemUpdateOne) ClearAttachments() *ItemUpdateOne { + iuo.mutation.ClearAttachments() + return iuo +} + +// RemoveAttachmentIDs removes the "attachments" edge to Attachment entities by IDs. +func (iuo *ItemUpdateOne) RemoveAttachmentIDs(ids ...uuid.UUID) *ItemUpdateOne { + iuo.mutation.RemoveAttachmentIDs(ids...) + return iuo +} + +// RemoveAttachments removes "attachments" edges to Attachment entities. +func (iuo *ItemUpdateOne) RemoveAttachments(a ...*Attachment) *ItemUpdateOne { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return iuo.RemoveAttachmentIDs(ids...) +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (iuo *ItemUpdateOne) Select(field string, fields ...string) *ItemUpdateOne { @@ -1629,6 +1847,27 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Column: item.FieldNotes, }) } + if value, ok := iuo.mutation.Quantity(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: item.FieldQuantity, + }) + } + if value, ok := iuo.mutation.AddedQuantity(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Value: value, + Column: item.FieldQuantity, + }) + } + if value, ok := iuo.mutation.Insured(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: item.FieldInsured, + }) + } if value, ok := iuo.mutation.SerialNumber(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeString, @@ -1972,6 +2211,60 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if iuo.mutation.AttachmentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: item.AttachmentsTable, + Columns: []string{item.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := iuo.mutation.RemovedAttachmentsIDs(); len(nodes) > 0 && !iuo.mutation.AttachmentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: item.AttachmentsTable, + Columns: []string{item.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := iuo.mutation.AttachmentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: item.AttachmentsTable, + Columns: []string{item.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeUUID, + Column: attachment.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } _node = &Item{config: iuo.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index a705906..ee923cd 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -8,6 +8,35 @@ import ( ) var ( + // AttachmentsColumns holds the columns for the "attachments" table. + AttachmentsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "type", Type: field.TypeEnum, Enums: []string{"photo", "manual", "warranty", "attachment"}, Default: "attachment"}, + {Name: "document_attachments", Type: field.TypeUUID}, + {Name: "item_attachments", Type: field.TypeUUID}, + } + // AttachmentsTable holds the schema information for the "attachments" table. + AttachmentsTable = &schema.Table{ + Name: "attachments", + Columns: AttachmentsColumns, + PrimaryKey: []*schema.Column{AttachmentsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "attachments_documents_attachments", + Columns: []*schema.Column{AttachmentsColumns[4]}, + RefColumns: []*schema.Column{DocumentsColumns[0]}, + OnDelete: schema.Cascade, + }, + { + Symbol: "attachments_items_attachments", + Columns: []*schema.Column{AttachmentsColumns[5]}, + RefColumns: []*schema.Column{ItemsColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + } // AuthTokensColumns holds the columns for the "auth_tokens" table. AuthTokensColumns = []*schema.Column{ {Name: "id", Type: field.TypeUUID}, @@ -38,6 +67,60 @@ var ( }, }, } + // DocumentsColumns holds the columns for the "documents" table. + DocumentsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "title", Type: field.TypeString, Size: 255}, + {Name: "path", Type: field.TypeString, Size: 500}, + {Name: "group_documents", Type: field.TypeUUID}, + } + // DocumentsTable holds the schema information for the "documents" table. + DocumentsTable = &schema.Table{ + Name: "documents", + Columns: DocumentsColumns, + PrimaryKey: []*schema.Column{DocumentsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "documents_groups_documents", + Columns: []*schema.Column{DocumentsColumns[5]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + } + // DocumentTokensColumns holds the columns for the "document_tokens" table. + DocumentTokensColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "token", Type: field.TypeBytes, Unique: true}, + {Name: "uses", Type: field.TypeInt, Default: 1}, + {Name: "expires_at", Type: field.TypeTime}, + {Name: "document_document_tokens", Type: field.TypeUUID, Nullable: true}, + } + // DocumentTokensTable holds the schema information for the "document_tokens" table. + DocumentTokensTable = &schema.Table{ + Name: "document_tokens", + Columns: DocumentTokensColumns, + PrimaryKey: []*schema.Column{DocumentTokensColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "document_tokens_documents_document_tokens", + Columns: []*schema.Column{DocumentTokensColumns[6]}, + RefColumns: []*schema.Column{DocumentsColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + Indexes: []*schema.Index{ + { + Name: "documenttoken_token", + Unique: false, + Columns: []*schema.Column{DocumentTokensColumns[3]}, + }, + }, + } // GroupsColumns holds the columns for the "groups" table. GroupsColumns = []*schema.Column{ {Name: "id", Type: field.TypeUUID}, @@ -60,6 +143,8 @@ var ( {Name: "name", Type: field.TypeString, Size: 255}, {Name: "description", Type: field.TypeString, Nullable: true, Size: 1000}, {Name: "notes", Type: field.TypeString, Nullable: true, Size: 1000}, + {Name: "quantity", Type: field.TypeInt, Default: 1}, + {Name: "insured", Type: field.TypeBool, Default: false}, {Name: "serial_number", Type: field.TypeString, Nullable: true, Size: 255}, {Name: "model_number", Type: field.TypeString, Nullable: true, Size: 255}, {Name: "manufacturer", Type: field.TypeString, Nullable: true, Size: 255}, @@ -84,13 +169,13 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "items_groups_items", - Columns: []*schema.Column{ItemsColumns[19]}, + Columns: []*schema.Column{ItemsColumns[21]}, RefColumns: []*schema.Column{GroupsColumns[0]}, OnDelete: schema.Cascade, }, { Symbol: "items_locations_items", - Columns: []*schema.Column{ItemsColumns[20]}, + Columns: []*schema.Column{ItemsColumns[22]}, RefColumns: []*schema.Column{LocationsColumns[0]}, OnDelete: schema.SetNull, }, @@ -104,17 +189,17 @@ var ( { Name: "item_manufacturer", Unique: false, - Columns: []*schema.Column{ItemsColumns[8]}, + Columns: []*schema.Column{ItemsColumns[10]}, }, { Name: "item_model_number", Unique: false, - Columns: []*schema.Column{ItemsColumns[7]}, + Columns: []*schema.Column{ItemsColumns[9]}, }, { Name: "item_serial_number", Unique: false, - Columns: []*schema.Column{ItemsColumns[6]}, + Columns: []*schema.Column{ItemsColumns[8]}, }, }, } @@ -245,7 +330,10 @@ var ( } // Tables holds all the tables in the schema. Tables = []*schema.Table{ + AttachmentsTable, AuthTokensTable, + DocumentsTable, + DocumentTokensTable, GroupsTable, ItemsTable, ItemFieldsTable, @@ -257,7 +345,11 @@ var ( ) func init() { + AttachmentsTable.ForeignKeys[0].RefTable = DocumentsTable + AttachmentsTable.ForeignKeys[1].RefTable = ItemsTable AuthTokensTable.ForeignKeys[0].RefTable = UsersTable + DocumentsTable.ForeignKeys[0].RefTable = GroupsTable + DocumentTokensTable.ForeignKeys[0].RefTable = DocumentsTable ItemsTable.ForeignKeys[0].RefTable = GroupsTable ItemsTable.ForeignKeys[1].RefTable = LocationsTable ItemFieldsTable.ForeignKeys[0].RefTable = ItemsTable diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index 1f5157b..ae3c606 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -10,7 +10,10 @@ import ( "time" "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/attachment" "github.com/hay-kot/content/backend/ent/authtokens" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/itemfield" @@ -31,15 +34,571 @@ const ( OpUpdateOne = ent.OpUpdateOne // Node types. - TypeAuthTokens = "AuthTokens" - TypeGroup = "Group" - TypeItem = "Item" - TypeItemField = "ItemField" - TypeLabel = "Label" - TypeLocation = "Location" - TypeUser = "User" + TypeAttachment = "Attachment" + TypeAuthTokens = "AuthTokens" + TypeDocument = "Document" + TypeDocumentToken = "DocumentToken" + TypeGroup = "Group" + TypeItem = "Item" + TypeItemField = "ItemField" + TypeLabel = "Label" + TypeLocation = "Location" + TypeUser = "User" ) +// AttachmentMutation represents an operation that mutates the Attachment nodes in the graph. +type AttachmentMutation struct { + config + op Op + typ string + id *uuid.UUID + created_at *time.Time + updated_at *time.Time + _type *attachment.Type + clearedFields map[string]struct{} + item *uuid.UUID + cleareditem bool + document *uuid.UUID + cleareddocument bool + done bool + oldValue func(context.Context) (*Attachment, error) + predicates []predicate.Attachment +} + +var _ ent.Mutation = (*AttachmentMutation)(nil) + +// attachmentOption allows management of the mutation configuration using functional options. +type attachmentOption func(*AttachmentMutation) + +// newAttachmentMutation creates new mutation for the Attachment entity. +func newAttachmentMutation(c config, op Op, opts ...attachmentOption) *AttachmentMutation { + m := &AttachmentMutation{ + config: c, + op: op, + typ: TypeAttachment, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAttachmentID sets the ID field of the mutation. +func withAttachmentID(id uuid.UUID) attachmentOption { + return func(m *AttachmentMutation) { + var ( + err error + once sync.Once + value *Attachment + ) + m.oldValue = func(ctx context.Context) (*Attachment, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Attachment.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAttachment sets the old Attachment of the mutation. +func withAttachment(node *Attachment) attachmentOption { + return func(m *AttachmentMutation) { + m.oldValue = func(context.Context) (*Attachment, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AttachmentMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AttachmentMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Attachment entities. +func (m *AttachmentMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *AttachmentMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *AttachmentMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Attachment.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *AttachmentMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AttachmentMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AttachmentMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *AttachmentMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *AttachmentMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *AttachmentMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetType sets the "type" field. +func (m *AttachmentMutation) SetType(a attachment.Type) { + m._type = &a +} + +// GetType returns the value of the "type" field in the mutation. +func (m *AttachmentMutation) GetType() (r attachment.Type, exists bool) { + v := m._type + if v == nil { + return + } + return *v, true +} + +// OldType returns the old "type" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldType(ctx context.Context) (v attachment.Type, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldType: %w", err) + } + return oldValue.Type, nil +} + +// ResetType resets all changes to the "type" field. +func (m *AttachmentMutation) ResetType() { + m._type = nil +} + +// SetItemID sets the "item" edge to the Item entity by id. +func (m *AttachmentMutation) SetItemID(id uuid.UUID) { + m.item = &id +} + +// ClearItem clears the "item" edge to the Item entity. +func (m *AttachmentMutation) ClearItem() { + m.cleareditem = true +} + +// ItemCleared reports if the "item" edge to the Item entity was cleared. +func (m *AttachmentMutation) ItemCleared() bool { + return m.cleareditem +} + +// ItemID returns the "item" edge ID in the mutation. +func (m *AttachmentMutation) ItemID() (id uuid.UUID, exists bool) { + if m.item != nil { + return *m.item, true + } + return +} + +// ItemIDs returns the "item" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// ItemID instead. It exists only for internal usage by the builders. +func (m *AttachmentMutation) ItemIDs() (ids []uuid.UUID) { + if id := m.item; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetItem resets all changes to the "item" edge. +func (m *AttachmentMutation) ResetItem() { + m.item = nil + m.cleareditem = false +} + +// SetDocumentID sets the "document" edge to the Document entity by id. +func (m *AttachmentMutation) SetDocumentID(id uuid.UUID) { + m.document = &id +} + +// ClearDocument clears the "document" edge to the Document entity. +func (m *AttachmentMutation) ClearDocument() { + m.cleareddocument = true +} + +// DocumentCleared reports if the "document" edge to the Document entity was cleared. +func (m *AttachmentMutation) DocumentCleared() bool { + return m.cleareddocument +} + +// DocumentID returns the "document" edge ID in the mutation. +func (m *AttachmentMutation) DocumentID() (id uuid.UUID, exists bool) { + if m.document != nil { + return *m.document, true + } + return +} + +// DocumentIDs returns the "document" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// DocumentID instead. It exists only for internal usage by the builders. +func (m *AttachmentMutation) DocumentIDs() (ids []uuid.UUID) { + if id := m.document; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetDocument resets all changes to the "document" edge. +func (m *AttachmentMutation) ResetDocument() { + m.document = nil + m.cleareddocument = false +} + +// Where appends a list predicates to the AttachmentMutation builder. +func (m *AttachmentMutation) Where(ps ...predicate.Attachment) { + m.predicates = append(m.predicates, ps...) +} + +// Op returns the operation name. +func (m *AttachmentMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Attachment). +func (m *AttachmentMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AttachmentMutation) Fields() []string { + fields := make([]string, 0, 3) + if m.created_at != nil { + fields = append(fields, attachment.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, attachment.FieldUpdatedAt) + } + if m._type != nil { + fields = append(fields, attachment.FieldType) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AttachmentMutation) Field(name string) (ent.Value, bool) { + switch name { + case attachment.FieldCreatedAt: + return m.CreatedAt() + case attachment.FieldUpdatedAt: + return m.UpdatedAt() + case attachment.FieldType: + return m.GetType() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AttachmentMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case attachment.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case attachment.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case attachment.FieldType: + return m.OldType(ctx) + } + return nil, fmt.Errorf("unknown Attachment field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AttachmentMutation) SetField(name string, value ent.Value) error { + switch name { + case attachment.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case attachment.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case attachment.FieldType: + v, ok := value.(attachment.Type) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetType(v) + return nil + } + return fmt.Errorf("unknown Attachment field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AttachmentMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AttachmentMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AttachmentMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Attachment numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AttachmentMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AttachmentMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AttachmentMutation) ClearField(name string) error { + return fmt.Errorf("unknown Attachment nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AttachmentMutation) ResetField(name string) error { + switch name { + case attachment.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case attachment.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case attachment.FieldType: + m.ResetType() + return nil + } + return fmt.Errorf("unknown Attachment field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AttachmentMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.item != nil { + edges = append(edges, attachment.EdgeItem) + } + if m.document != nil { + edges = append(edges, attachment.EdgeDocument) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AttachmentMutation) AddedIDs(name string) []ent.Value { + switch name { + case attachment.EdgeItem: + if id := m.item; id != nil { + return []ent.Value{*id} + } + case attachment.EdgeDocument: + if id := m.document; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AttachmentMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AttachmentMutation) RemovedIDs(name string) []ent.Value { + switch name { + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AttachmentMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.cleareditem { + edges = append(edges, attachment.EdgeItem) + } + if m.cleareddocument { + edges = append(edges, attachment.EdgeDocument) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AttachmentMutation) EdgeCleared(name string) bool { + switch name { + case attachment.EdgeItem: + return m.cleareditem + case attachment.EdgeDocument: + return m.cleareddocument + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AttachmentMutation) ClearEdge(name string) error { + switch name { + case attachment.EdgeItem: + m.ClearItem() + return nil + case attachment.EdgeDocument: + m.ClearDocument() + return nil + } + return fmt.Errorf("unknown Attachment unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AttachmentMutation) ResetEdge(name string) error { + switch name { + case attachment.EdgeItem: + m.ResetItem() + return nil + case attachment.EdgeDocument: + m.ResetDocument() + return nil + } + return fmt.Errorf("unknown Attachment edge %s", name) +} + // AuthTokensMutation represents an operation that mutates the AuthTokens nodes in the graph. type AuthTokensMutation struct { config @@ -588,6 +1147,1358 @@ func (m *AuthTokensMutation) ResetEdge(name string) error { return fmt.Errorf("unknown AuthTokens edge %s", name) } +// DocumentMutation represents an operation that mutates the Document nodes in the graph. +type DocumentMutation struct { + config + op Op + typ string + id *uuid.UUID + created_at *time.Time + updated_at *time.Time + title *string + _path *string + clearedFields map[string]struct{} + group *uuid.UUID + clearedgroup bool + document_tokens map[uuid.UUID]struct{} + removeddocument_tokens map[uuid.UUID]struct{} + cleareddocument_tokens bool + attachments map[uuid.UUID]struct{} + removedattachments map[uuid.UUID]struct{} + clearedattachments bool + done bool + oldValue func(context.Context) (*Document, error) + predicates []predicate.Document +} + +var _ ent.Mutation = (*DocumentMutation)(nil) + +// documentOption allows management of the mutation configuration using functional options. +type documentOption func(*DocumentMutation) + +// newDocumentMutation creates new mutation for the Document entity. +func newDocumentMutation(c config, op Op, opts ...documentOption) *DocumentMutation { + m := &DocumentMutation{ + config: c, + op: op, + typ: TypeDocument, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withDocumentID sets the ID field of the mutation. +func withDocumentID(id uuid.UUID) documentOption { + return func(m *DocumentMutation) { + var ( + err error + once sync.Once + value *Document + ) + m.oldValue = func(ctx context.Context) (*Document, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Document.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withDocument sets the old Document of the mutation. +func withDocument(node *Document) documentOption { + return func(m *DocumentMutation) { + m.oldValue = func(context.Context) (*Document, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m DocumentMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m DocumentMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Document entities. +func (m *DocumentMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *DocumentMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *DocumentMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Document.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *DocumentMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *DocumentMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Document entity. +// If the Document object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *DocumentMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *DocumentMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *DocumentMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Document entity. +// If the Document object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *DocumentMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetTitle sets the "title" field. +func (m *DocumentMutation) SetTitle(s string) { + m.title = &s +} + +// Title returns the value of the "title" field in the mutation. +func (m *DocumentMutation) Title() (r string, exists bool) { + v := m.title + if v == nil { + return + } + return *v, true +} + +// OldTitle returns the old "title" field's value of the Document entity. +// If the Document object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentMutation) OldTitle(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTitle is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTitle requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTitle: %w", err) + } + return oldValue.Title, nil +} + +// ResetTitle resets all changes to the "title" field. +func (m *DocumentMutation) ResetTitle() { + m.title = nil +} + +// SetPath sets the "path" field. +func (m *DocumentMutation) SetPath(s string) { + m._path = &s +} + +// Path returns the value of the "path" field in the mutation. +func (m *DocumentMutation) Path() (r string, exists bool) { + v := m._path + if v == nil { + return + } + return *v, true +} + +// OldPath returns the old "path" field's value of the Document entity. +// If the Document object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentMutation) OldPath(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPath is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPath requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPath: %w", err) + } + return oldValue.Path, nil +} + +// ResetPath resets all changes to the "path" field. +func (m *DocumentMutation) ResetPath() { + m._path = nil +} + +// SetGroupID sets the "group" edge to the Group entity by id. +func (m *DocumentMutation) SetGroupID(id uuid.UUID) { + m.group = &id +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *DocumentMutation) ClearGroup() { + m.clearedgroup = true +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *DocumentMutation) GroupCleared() bool { + return m.clearedgroup +} + +// GroupID returns the "group" edge ID in the mutation. +func (m *DocumentMutation) GroupID() (id uuid.UUID, exists bool) { + if m.group != nil { + return *m.group, true + } + return +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *DocumentMutation) GroupIDs() (ids []uuid.UUID) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *DocumentMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// AddDocumentTokenIDs adds the "document_tokens" edge to the DocumentToken entity by ids. +func (m *DocumentMutation) AddDocumentTokenIDs(ids ...uuid.UUID) { + if m.document_tokens == nil { + m.document_tokens = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.document_tokens[ids[i]] = struct{}{} + } +} + +// ClearDocumentTokens clears the "document_tokens" edge to the DocumentToken entity. +func (m *DocumentMutation) ClearDocumentTokens() { + m.cleareddocument_tokens = true +} + +// DocumentTokensCleared reports if the "document_tokens" edge to the DocumentToken entity was cleared. +func (m *DocumentMutation) DocumentTokensCleared() bool { + return m.cleareddocument_tokens +} + +// RemoveDocumentTokenIDs removes the "document_tokens" edge to the DocumentToken entity by IDs. +func (m *DocumentMutation) RemoveDocumentTokenIDs(ids ...uuid.UUID) { + if m.removeddocument_tokens == nil { + m.removeddocument_tokens = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.document_tokens, ids[i]) + m.removeddocument_tokens[ids[i]] = struct{}{} + } +} + +// RemovedDocumentTokens returns the removed IDs of the "document_tokens" edge to the DocumentToken entity. +func (m *DocumentMutation) RemovedDocumentTokensIDs() (ids []uuid.UUID) { + for id := range m.removeddocument_tokens { + ids = append(ids, id) + } + return +} + +// DocumentTokensIDs returns the "document_tokens" edge IDs in the mutation. +func (m *DocumentMutation) DocumentTokensIDs() (ids []uuid.UUID) { + for id := range m.document_tokens { + ids = append(ids, id) + } + return +} + +// ResetDocumentTokens resets all changes to the "document_tokens" edge. +func (m *DocumentMutation) ResetDocumentTokens() { + m.document_tokens = nil + m.cleareddocument_tokens = false + m.removeddocument_tokens = nil +} + +// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by ids. +func (m *DocumentMutation) AddAttachmentIDs(ids ...uuid.UUID) { + if m.attachments == nil { + m.attachments = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.attachments[ids[i]] = struct{}{} + } +} + +// ClearAttachments clears the "attachments" edge to the Attachment entity. +func (m *DocumentMutation) ClearAttachments() { + m.clearedattachments = true +} + +// AttachmentsCleared reports if the "attachments" edge to the Attachment entity was cleared. +func (m *DocumentMutation) AttachmentsCleared() bool { + return m.clearedattachments +} + +// RemoveAttachmentIDs removes the "attachments" edge to the Attachment entity by IDs. +func (m *DocumentMutation) RemoveAttachmentIDs(ids ...uuid.UUID) { + if m.removedattachments == nil { + m.removedattachments = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.attachments, ids[i]) + m.removedattachments[ids[i]] = struct{}{} + } +} + +// RemovedAttachments returns the removed IDs of the "attachments" edge to the Attachment entity. +func (m *DocumentMutation) RemovedAttachmentsIDs() (ids []uuid.UUID) { + for id := range m.removedattachments { + ids = append(ids, id) + } + return +} + +// AttachmentsIDs returns the "attachments" edge IDs in the mutation. +func (m *DocumentMutation) AttachmentsIDs() (ids []uuid.UUID) { + for id := range m.attachments { + ids = append(ids, id) + } + return +} + +// ResetAttachments resets all changes to the "attachments" edge. +func (m *DocumentMutation) ResetAttachments() { + m.attachments = nil + m.clearedattachments = false + m.removedattachments = nil +} + +// Where appends a list predicates to the DocumentMutation builder. +func (m *DocumentMutation) Where(ps ...predicate.Document) { + m.predicates = append(m.predicates, ps...) +} + +// Op returns the operation name. +func (m *DocumentMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Document). +func (m *DocumentMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *DocumentMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.created_at != nil { + fields = append(fields, document.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, document.FieldUpdatedAt) + } + if m.title != nil { + fields = append(fields, document.FieldTitle) + } + if m._path != nil { + fields = append(fields, document.FieldPath) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *DocumentMutation) Field(name string) (ent.Value, bool) { + switch name { + case document.FieldCreatedAt: + return m.CreatedAt() + case document.FieldUpdatedAt: + return m.UpdatedAt() + case document.FieldTitle: + return m.Title() + case document.FieldPath: + return m.Path() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *DocumentMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case document.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case document.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case document.FieldTitle: + return m.OldTitle(ctx) + case document.FieldPath: + return m.OldPath(ctx) + } + return nil, fmt.Errorf("unknown Document field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DocumentMutation) SetField(name string, value ent.Value) error { + switch name { + case document.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case document.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case document.FieldTitle: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTitle(v) + return nil + case document.FieldPath: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPath(v) + return nil + } + return fmt.Errorf("unknown Document field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *DocumentMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *DocumentMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DocumentMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Document numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *DocumentMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *DocumentMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *DocumentMutation) ClearField(name string) error { + return fmt.Errorf("unknown Document nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *DocumentMutation) ResetField(name string) error { + switch name { + case document.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case document.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case document.FieldTitle: + m.ResetTitle() + return nil + case document.FieldPath: + m.ResetPath() + return nil + } + return fmt.Errorf("unknown Document field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *DocumentMutation) AddedEdges() []string { + edges := make([]string, 0, 3) + if m.group != nil { + edges = append(edges, document.EdgeGroup) + } + if m.document_tokens != nil { + edges = append(edges, document.EdgeDocumentTokens) + } + if m.attachments != nil { + edges = append(edges, document.EdgeAttachments) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *DocumentMutation) AddedIDs(name string) []ent.Value { + switch name { + case document.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + case document.EdgeDocumentTokens: + ids := make([]ent.Value, 0, len(m.document_tokens)) + for id := range m.document_tokens { + ids = append(ids, id) + } + return ids + case document.EdgeAttachments: + ids := make([]ent.Value, 0, len(m.attachments)) + for id := range m.attachments { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *DocumentMutation) RemovedEdges() []string { + edges := make([]string, 0, 3) + if m.removeddocument_tokens != nil { + edges = append(edges, document.EdgeDocumentTokens) + } + if m.removedattachments != nil { + edges = append(edges, document.EdgeAttachments) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *DocumentMutation) RemovedIDs(name string) []ent.Value { + switch name { + case document.EdgeDocumentTokens: + ids := make([]ent.Value, 0, len(m.removeddocument_tokens)) + for id := range m.removeddocument_tokens { + ids = append(ids, id) + } + return ids + case document.EdgeAttachments: + ids := make([]ent.Value, 0, len(m.removedattachments)) + for id := range m.removedattachments { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *DocumentMutation) ClearedEdges() []string { + edges := make([]string, 0, 3) + if m.clearedgroup { + edges = append(edges, document.EdgeGroup) + } + if m.cleareddocument_tokens { + edges = append(edges, document.EdgeDocumentTokens) + } + if m.clearedattachments { + edges = append(edges, document.EdgeAttachments) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *DocumentMutation) EdgeCleared(name string) bool { + switch name { + case document.EdgeGroup: + return m.clearedgroup + case document.EdgeDocumentTokens: + return m.cleareddocument_tokens + case document.EdgeAttachments: + return m.clearedattachments + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *DocumentMutation) ClearEdge(name string) error { + switch name { + case document.EdgeGroup: + m.ClearGroup() + return nil + } + return fmt.Errorf("unknown Document unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *DocumentMutation) ResetEdge(name string) error { + switch name { + case document.EdgeGroup: + m.ResetGroup() + return nil + case document.EdgeDocumentTokens: + m.ResetDocumentTokens() + return nil + case document.EdgeAttachments: + m.ResetAttachments() + return nil + } + return fmt.Errorf("unknown Document edge %s", name) +} + +// DocumentTokenMutation represents an operation that mutates the DocumentToken nodes in the graph. +type DocumentTokenMutation struct { + config + op Op + typ string + id *uuid.UUID + created_at *time.Time + updated_at *time.Time + token *[]byte + uses *int + adduses *int + expires_at *time.Time + clearedFields map[string]struct{} + document *uuid.UUID + cleareddocument bool + done bool + oldValue func(context.Context) (*DocumentToken, error) + predicates []predicate.DocumentToken +} + +var _ ent.Mutation = (*DocumentTokenMutation)(nil) + +// documenttokenOption allows management of the mutation configuration using functional options. +type documenttokenOption func(*DocumentTokenMutation) + +// newDocumentTokenMutation creates new mutation for the DocumentToken entity. +func newDocumentTokenMutation(c config, op Op, opts ...documenttokenOption) *DocumentTokenMutation { + m := &DocumentTokenMutation{ + config: c, + op: op, + typ: TypeDocumentToken, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withDocumentTokenID sets the ID field of the mutation. +func withDocumentTokenID(id uuid.UUID) documenttokenOption { + return func(m *DocumentTokenMutation) { + var ( + err error + once sync.Once + value *DocumentToken + ) + m.oldValue = func(ctx context.Context) (*DocumentToken, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().DocumentToken.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withDocumentToken sets the old DocumentToken of the mutation. +func withDocumentToken(node *DocumentToken) documenttokenOption { + return func(m *DocumentTokenMutation) { + m.oldValue = func(context.Context) (*DocumentToken, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m DocumentTokenMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m DocumentTokenMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of DocumentToken entities. +func (m *DocumentTokenMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *DocumentTokenMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *DocumentTokenMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().DocumentToken.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *DocumentTokenMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *DocumentTokenMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the DocumentToken entity. +// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentTokenMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *DocumentTokenMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *DocumentTokenMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *DocumentTokenMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the DocumentToken entity. +// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentTokenMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *DocumentTokenMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetToken sets the "token" field. +func (m *DocumentTokenMutation) SetToken(b []byte) { + m.token = &b +} + +// Token returns the value of the "token" field in the mutation. +func (m *DocumentTokenMutation) Token() (r []byte, exists bool) { + v := m.token + if v == nil { + return + } + return *v, true +} + +// OldToken returns the old "token" field's value of the DocumentToken entity. +// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentTokenMutation) OldToken(ctx context.Context) (v []byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldToken is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldToken requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldToken: %w", err) + } + return oldValue.Token, nil +} + +// ResetToken resets all changes to the "token" field. +func (m *DocumentTokenMutation) ResetToken() { + m.token = nil +} + +// SetUses sets the "uses" field. +func (m *DocumentTokenMutation) SetUses(i int) { + m.uses = &i + m.adduses = nil +} + +// Uses returns the value of the "uses" field in the mutation. +func (m *DocumentTokenMutation) Uses() (r int, exists bool) { + v := m.uses + if v == nil { + return + } + return *v, true +} + +// OldUses returns the old "uses" field's value of the DocumentToken entity. +// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentTokenMutation) OldUses(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUses is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUses requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUses: %w", err) + } + return oldValue.Uses, nil +} + +// AddUses adds i to the "uses" field. +func (m *DocumentTokenMutation) AddUses(i int) { + if m.adduses != nil { + *m.adduses += i + } else { + m.adduses = &i + } +} + +// AddedUses returns the value that was added to the "uses" field in this mutation. +func (m *DocumentTokenMutation) AddedUses() (r int, exists bool) { + v := m.adduses + if v == nil { + return + } + return *v, true +} + +// ResetUses resets all changes to the "uses" field. +func (m *DocumentTokenMutation) ResetUses() { + m.uses = nil + m.adduses = nil +} + +// SetExpiresAt sets the "expires_at" field. +func (m *DocumentTokenMutation) SetExpiresAt(t time.Time) { + m.expires_at = &t +} + +// ExpiresAt returns the value of the "expires_at" field in the mutation. +func (m *DocumentTokenMutation) ExpiresAt() (r time.Time, exists bool) { + v := m.expires_at + if v == nil { + return + } + return *v, true +} + +// OldExpiresAt returns the old "expires_at" field's value of the DocumentToken entity. +// If the DocumentToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DocumentTokenMutation) OldExpiresAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiresAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiresAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiresAt: %w", err) + } + return oldValue.ExpiresAt, nil +} + +// ResetExpiresAt resets all changes to the "expires_at" field. +func (m *DocumentTokenMutation) ResetExpiresAt() { + m.expires_at = nil +} + +// SetDocumentID sets the "document" edge to the Document entity by id. +func (m *DocumentTokenMutation) SetDocumentID(id uuid.UUID) { + m.document = &id +} + +// ClearDocument clears the "document" edge to the Document entity. +func (m *DocumentTokenMutation) ClearDocument() { + m.cleareddocument = true +} + +// DocumentCleared reports if the "document" edge to the Document entity was cleared. +func (m *DocumentTokenMutation) DocumentCleared() bool { + return m.cleareddocument +} + +// DocumentID returns the "document" edge ID in the mutation. +func (m *DocumentTokenMutation) DocumentID() (id uuid.UUID, exists bool) { + if m.document != nil { + return *m.document, true + } + return +} + +// DocumentIDs returns the "document" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// DocumentID instead. It exists only for internal usage by the builders. +func (m *DocumentTokenMutation) DocumentIDs() (ids []uuid.UUID) { + if id := m.document; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetDocument resets all changes to the "document" edge. +func (m *DocumentTokenMutation) ResetDocument() { + m.document = nil + m.cleareddocument = false +} + +// Where appends a list predicates to the DocumentTokenMutation builder. +func (m *DocumentTokenMutation) Where(ps ...predicate.DocumentToken) { + m.predicates = append(m.predicates, ps...) +} + +// Op returns the operation name. +func (m *DocumentTokenMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (DocumentToken). +func (m *DocumentTokenMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *DocumentTokenMutation) Fields() []string { + fields := make([]string, 0, 5) + if m.created_at != nil { + fields = append(fields, documenttoken.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, documenttoken.FieldUpdatedAt) + } + if m.token != nil { + fields = append(fields, documenttoken.FieldToken) + } + if m.uses != nil { + fields = append(fields, documenttoken.FieldUses) + } + if m.expires_at != nil { + fields = append(fields, documenttoken.FieldExpiresAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *DocumentTokenMutation) Field(name string) (ent.Value, bool) { + switch name { + case documenttoken.FieldCreatedAt: + return m.CreatedAt() + case documenttoken.FieldUpdatedAt: + return m.UpdatedAt() + case documenttoken.FieldToken: + return m.Token() + case documenttoken.FieldUses: + return m.Uses() + case documenttoken.FieldExpiresAt: + return m.ExpiresAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *DocumentTokenMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case documenttoken.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case documenttoken.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case documenttoken.FieldToken: + return m.OldToken(ctx) + case documenttoken.FieldUses: + return m.OldUses(ctx) + case documenttoken.FieldExpiresAt: + return m.OldExpiresAt(ctx) + } + return nil, fmt.Errorf("unknown DocumentToken field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DocumentTokenMutation) SetField(name string, value ent.Value) error { + switch name { + case documenttoken.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case documenttoken.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case documenttoken.FieldToken: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetToken(v) + return nil + case documenttoken.FieldUses: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUses(v) + return nil + case documenttoken.FieldExpiresAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiresAt(v) + return nil + } + return fmt.Errorf("unknown DocumentToken field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *DocumentTokenMutation) AddedFields() []string { + var fields []string + if m.adduses != nil { + fields = append(fields, documenttoken.FieldUses) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *DocumentTokenMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case documenttoken.FieldUses: + return m.AddedUses() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DocumentTokenMutation) AddField(name string, value ent.Value) error { + switch name { + case documenttoken.FieldUses: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddUses(v) + return nil + } + return fmt.Errorf("unknown DocumentToken numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *DocumentTokenMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *DocumentTokenMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *DocumentTokenMutation) ClearField(name string) error { + return fmt.Errorf("unknown DocumentToken nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *DocumentTokenMutation) ResetField(name string) error { + switch name { + case documenttoken.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case documenttoken.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case documenttoken.FieldToken: + m.ResetToken() + return nil + case documenttoken.FieldUses: + m.ResetUses() + return nil + case documenttoken.FieldExpiresAt: + m.ResetExpiresAt() + return nil + } + return fmt.Errorf("unknown DocumentToken field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *DocumentTokenMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.document != nil { + edges = append(edges, documenttoken.EdgeDocument) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *DocumentTokenMutation) AddedIDs(name string) []ent.Value { + switch name { + case documenttoken.EdgeDocument: + if id := m.document; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *DocumentTokenMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *DocumentTokenMutation) RemovedIDs(name string) []ent.Value { + switch name { + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *DocumentTokenMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.cleareddocument { + edges = append(edges, documenttoken.EdgeDocument) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *DocumentTokenMutation) EdgeCleared(name string) bool { + switch name { + case documenttoken.EdgeDocument: + return m.cleareddocument + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *DocumentTokenMutation) ClearEdge(name string) error { + switch name { + case documenttoken.EdgeDocument: + m.ClearDocument() + return nil + } + return fmt.Errorf("unknown DocumentToken unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *DocumentTokenMutation) ResetEdge(name string) error { + switch name { + case documenttoken.EdgeDocument: + m.ResetDocument() + return nil + } + return fmt.Errorf("unknown DocumentToken edge %s", name) +} + // GroupMutation represents an operation that mutates the Group nodes in the graph. type GroupMutation struct { config @@ -611,6 +2522,9 @@ type GroupMutation struct { labels map[uuid.UUID]struct{} removedlabels map[uuid.UUID]struct{} clearedlabels bool + documents map[uuid.UUID]struct{} + removeddocuments map[uuid.UUID]struct{} + cleareddocuments bool done bool oldValue func(context.Context) (*Group, error) predicates []predicate.Group @@ -1080,6 +2994,60 @@ func (m *GroupMutation) ResetLabels() { m.removedlabels = nil } +// AddDocumentIDs adds the "documents" edge to the Document entity by ids. +func (m *GroupMutation) AddDocumentIDs(ids ...uuid.UUID) { + if m.documents == nil { + m.documents = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.documents[ids[i]] = struct{}{} + } +} + +// ClearDocuments clears the "documents" edge to the Document entity. +func (m *GroupMutation) ClearDocuments() { + m.cleareddocuments = true +} + +// DocumentsCleared reports if the "documents" edge to the Document entity was cleared. +func (m *GroupMutation) DocumentsCleared() bool { + return m.cleareddocuments +} + +// RemoveDocumentIDs removes the "documents" edge to the Document entity by IDs. +func (m *GroupMutation) RemoveDocumentIDs(ids ...uuid.UUID) { + if m.removeddocuments == nil { + m.removeddocuments = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.documents, ids[i]) + m.removeddocuments[ids[i]] = struct{}{} + } +} + +// RemovedDocuments returns the removed IDs of the "documents" edge to the Document entity. +func (m *GroupMutation) RemovedDocumentsIDs() (ids []uuid.UUID) { + for id := range m.removeddocuments { + ids = append(ids, id) + } + return +} + +// DocumentsIDs returns the "documents" edge IDs in the mutation. +func (m *GroupMutation) DocumentsIDs() (ids []uuid.UUID) { + for id := range m.documents { + ids = append(ids, id) + } + return +} + +// ResetDocuments resets all changes to the "documents" edge. +func (m *GroupMutation) ResetDocuments() { + m.documents = nil + m.cleareddocuments = false + m.removeddocuments = nil +} + // Where appends a list predicates to the GroupMutation builder. func (m *GroupMutation) Where(ps ...predicate.Group) { m.predicates = append(m.predicates, ps...) @@ -1249,7 +3217,7 @@ func (m *GroupMutation) ResetField(name string) error { // AddedEdges returns all edge names that were set/added in this mutation. func (m *GroupMutation) AddedEdges() []string { - edges := make([]string, 0, 4) + edges := make([]string, 0, 5) if m.users != nil { edges = append(edges, group.EdgeUsers) } @@ -1262,6 +3230,9 @@ func (m *GroupMutation) AddedEdges() []string { if m.labels != nil { edges = append(edges, group.EdgeLabels) } + if m.documents != nil { + edges = append(edges, group.EdgeDocuments) + } return edges } @@ -1293,13 +3264,19 @@ func (m *GroupMutation) AddedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case group.EdgeDocuments: + ids := make([]ent.Value, 0, len(m.documents)) + for id := range m.documents { + ids = append(ids, id) + } + return ids } return nil } // RemovedEdges returns all edge names that were removed in this mutation. func (m *GroupMutation) RemovedEdges() []string { - edges := make([]string, 0, 4) + edges := make([]string, 0, 5) if m.removedusers != nil { edges = append(edges, group.EdgeUsers) } @@ -1312,6 +3289,9 @@ func (m *GroupMutation) RemovedEdges() []string { if m.removedlabels != nil { edges = append(edges, group.EdgeLabels) } + if m.removeddocuments != nil { + edges = append(edges, group.EdgeDocuments) + } return edges } @@ -1343,13 +3323,19 @@ func (m *GroupMutation) RemovedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case group.EdgeDocuments: + ids := make([]ent.Value, 0, len(m.removeddocuments)) + for id := range m.removeddocuments { + ids = append(ids, id) + } + return ids } return nil } // ClearedEdges returns all edge names that were cleared in this mutation. func (m *GroupMutation) ClearedEdges() []string { - edges := make([]string, 0, 4) + edges := make([]string, 0, 5) if m.clearedusers { edges = append(edges, group.EdgeUsers) } @@ -1362,6 +3348,9 @@ func (m *GroupMutation) ClearedEdges() []string { if m.clearedlabels { edges = append(edges, group.EdgeLabels) } + if m.cleareddocuments { + edges = append(edges, group.EdgeDocuments) + } return edges } @@ -1377,6 +3366,8 @@ func (m *GroupMutation) EdgeCleared(name string) bool { return m.cleareditems case group.EdgeLabels: return m.clearedlabels + case group.EdgeDocuments: + return m.cleareddocuments } return false } @@ -1405,6 +3396,9 @@ func (m *GroupMutation) ResetEdge(name string) error { case group.EdgeLabels: m.ResetLabels() return nil + case group.EdgeDocuments: + m.ResetDocuments() + return nil } return fmt.Errorf("unknown Group edge %s", name) } @@ -1412,43 +3406,49 @@ func (m *GroupMutation) ResetEdge(name string) error { // ItemMutation represents an operation that mutates the Item nodes in the graph. type ItemMutation struct { config - op Op - typ string - id *uuid.UUID - created_at *time.Time - updated_at *time.Time - name *string - description *string - notes *string - serial_number *string - model_number *string - manufacturer *string - lifetime_warranty *bool - warranty_expires *time.Time - warranty_details *string - purchase_time *time.Time - purchase_from *string - purchase_price *float64 - addpurchase_price *float64 - sold_time *time.Time - sold_to *string - sold_price *float64 - addsold_price *float64 - sold_notes *string - clearedFields map[string]struct{} - group *uuid.UUID - clearedgroup bool - location *uuid.UUID - clearedlocation bool - fields map[uuid.UUID]struct{} - removedfields map[uuid.UUID]struct{} - clearedfields bool - label map[uuid.UUID]struct{} - removedlabel map[uuid.UUID]struct{} - clearedlabel bool - done bool - oldValue func(context.Context) (*Item, error) - predicates []predicate.Item + op Op + typ string + id *uuid.UUID + created_at *time.Time + updated_at *time.Time + name *string + description *string + notes *string + quantity *int + addquantity *int + insured *bool + serial_number *string + model_number *string + manufacturer *string + lifetime_warranty *bool + warranty_expires *time.Time + warranty_details *string + purchase_time *time.Time + purchase_from *string + purchase_price *float64 + addpurchase_price *float64 + sold_time *time.Time + sold_to *string + sold_price *float64 + addsold_price *float64 + sold_notes *string + clearedFields map[string]struct{} + group *uuid.UUID + clearedgroup bool + location *uuid.UUID + clearedlocation bool + fields map[uuid.UUID]struct{} + removedfields map[uuid.UUID]struct{} + clearedfields bool + label map[uuid.UUID]struct{} + removedlabel map[uuid.UUID]struct{} + clearedlabel bool + attachments map[uuid.UUID]struct{} + removedattachments map[uuid.UUID]struct{} + clearedattachments bool + done bool + oldValue func(context.Context) (*Item, error) + predicates []predicate.Item } var _ ent.Mutation = (*ItemMutation)(nil) @@ -1761,6 +3761,98 @@ func (m *ItemMutation) ResetNotes() { delete(m.clearedFields, item.FieldNotes) } +// SetQuantity sets the "quantity" field. +func (m *ItemMutation) SetQuantity(i int) { + m.quantity = &i + m.addquantity = nil +} + +// Quantity returns the value of the "quantity" field in the mutation. +func (m *ItemMutation) Quantity() (r int, exists bool) { + v := m.quantity + if v == nil { + return + } + return *v, true +} + +// OldQuantity returns the old "quantity" field's value of the Item entity. +// If the Item object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ItemMutation) OldQuantity(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldQuantity is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldQuantity requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldQuantity: %w", err) + } + return oldValue.Quantity, nil +} + +// AddQuantity adds i to the "quantity" field. +func (m *ItemMutation) AddQuantity(i int) { + if m.addquantity != nil { + *m.addquantity += i + } else { + m.addquantity = &i + } +} + +// AddedQuantity returns the value that was added to the "quantity" field in this mutation. +func (m *ItemMutation) AddedQuantity() (r int, exists bool) { + v := m.addquantity + if v == nil { + return + } + return *v, true +} + +// ResetQuantity resets all changes to the "quantity" field. +func (m *ItemMutation) ResetQuantity() { + m.quantity = nil + m.addquantity = nil +} + +// SetInsured sets the "insured" field. +func (m *ItemMutation) SetInsured(b bool) { + m.insured = &b +} + +// Insured returns the value of the "insured" field in the mutation. +func (m *ItemMutation) Insured() (r bool, exists bool) { + v := m.insured + if v == nil { + return + } + return *v, true +} + +// OldInsured returns the old "insured" field's value of the Item entity. +// If the Item object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ItemMutation) OldInsured(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldInsured is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldInsured requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldInsured: %w", err) + } + return oldValue.Insured, nil +} + +// ResetInsured resets all changes to the "insured" field. +func (m *ItemMutation) ResetInsured() { + m.insured = nil +} + // SetSerialNumber sets the "serial_number" field. func (m *ItemMutation) SetSerialNumber(s string) { m.serial_number = &s @@ -2585,6 +4677,60 @@ func (m *ItemMutation) ResetLabel() { m.removedlabel = nil } +// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by ids. +func (m *ItemMutation) AddAttachmentIDs(ids ...uuid.UUID) { + if m.attachments == nil { + m.attachments = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.attachments[ids[i]] = struct{}{} + } +} + +// ClearAttachments clears the "attachments" edge to the Attachment entity. +func (m *ItemMutation) ClearAttachments() { + m.clearedattachments = true +} + +// AttachmentsCleared reports if the "attachments" edge to the Attachment entity was cleared. +func (m *ItemMutation) AttachmentsCleared() bool { + return m.clearedattachments +} + +// RemoveAttachmentIDs removes the "attachments" edge to the Attachment entity by IDs. +func (m *ItemMutation) RemoveAttachmentIDs(ids ...uuid.UUID) { + if m.removedattachments == nil { + m.removedattachments = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.attachments, ids[i]) + m.removedattachments[ids[i]] = struct{}{} + } +} + +// RemovedAttachments returns the removed IDs of the "attachments" edge to the Attachment entity. +func (m *ItemMutation) RemovedAttachmentsIDs() (ids []uuid.UUID) { + for id := range m.removedattachments { + ids = append(ids, id) + } + return +} + +// AttachmentsIDs returns the "attachments" edge IDs in the mutation. +func (m *ItemMutation) AttachmentsIDs() (ids []uuid.UUID) { + for id := range m.attachments { + ids = append(ids, id) + } + return +} + +// ResetAttachments resets all changes to the "attachments" edge. +func (m *ItemMutation) ResetAttachments() { + m.attachments = nil + m.clearedattachments = false + m.removedattachments = nil +} + // Where appends a list predicates to the ItemMutation builder. func (m *ItemMutation) Where(ps ...predicate.Item) { m.predicates = append(m.predicates, ps...) @@ -2604,7 +4750,7 @@ func (m *ItemMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *ItemMutation) Fields() []string { - fields := make([]string, 0, 18) + fields := make([]string, 0, 20) if m.created_at != nil { fields = append(fields, item.FieldCreatedAt) } @@ -2620,6 +4766,12 @@ func (m *ItemMutation) Fields() []string { if m.notes != nil { fields = append(fields, item.FieldNotes) } + if m.quantity != nil { + fields = append(fields, item.FieldQuantity) + } + if m.insured != nil { + fields = append(fields, item.FieldInsured) + } if m.serial_number != nil { fields = append(fields, item.FieldSerialNumber) } @@ -2677,6 +4829,10 @@ func (m *ItemMutation) Field(name string) (ent.Value, bool) { return m.Description() case item.FieldNotes: return m.Notes() + case item.FieldQuantity: + return m.Quantity() + case item.FieldInsured: + return m.Insured() case item.FieldSerialNumber: return m.SerialNumber() case item.FieldModelNumber: @@ -2722,6 +4878,10 @@ func (m *ItemMutation) OldField(ctx context.Context, name string) (ent.Value, er return m.OldDescription(ctx) case item.FieldNotes: return m.OldNotes(ctx) + case item.FieldQuantity: + return m.OldQuantity(ctx) + case item.FieldInsured: + return m.OldInsured(ctx) case item.FieldSerialNumber: return m.OldSerialNumber(ctx) case item.FieldModelNumber: @@ -2792,6 +4952,20 @@ func (m *ItemMutation) SetField(name string, value ent.Value) error { } m.SetNotes(v) return nil + case item.FieldQuantity: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetQuantity(v) + return nil + case item.FieldInsured: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetInsured(v) + return nil case item.FieldSerialNumber: v, ok := value.(string) if !ok { @@ -2891,6 +5065,9 @@ func (m *ItemMutation) SetField(name string, value ent.Value) error { // this mutation. func (m *ItemMutation) AddedFields() []string { var fields []string + if m.addquantity != nil { + fields = append(fields, item.FieldQuantity) + } if m.addpurchase_price != nil { fields = append(fields, item.FieldPurchasePrice) } @@ -2905,6 +5082,8 @@ func (m *ItemMutation) AddedFields() []string { // was not set, or was not defined in the schema. func (m *ItemMutation) AddedField(name string) (ent.Value, bool) { switch name { + case item.FieldQuantity: + return m.AddedQuantity() case item.FieldPurchasePrice: return m.AddedPurchasePrice() case item.FieldSoldPrice: @@ -2918,6 +5097,13 @@ func (m *ItemMutation) AddedField(name string) (ent.Value, bool) { // type. func (m *ItemMutation) AddField(name string, value ent.Value) error { switch name { + case item.FieldQuantity: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddQuantity(v) + return nil case item.FieldPurchasePrice: v, ok := value.(float64) if !ok { @@ -3049,6 +5235,12 @@ func (m *ItemMutation) ResetField(name string) error { case item.FieldNotes: m.ResetNotes() return nil + case item.FieldQuantity: + m.ResetQuantity() + return nil + case item.FieldInsured: + m.ResetInsured() + return nil case item.FieldSerialNumber: m.ResetSerialNumber() return nil @@ -3094,7 +5286,7 @@ func (m *ItemMutation) ResetField(name string) error { // AddedEdges returns all edge names that were set/added in this mutation. func (m *ItemMutation) AddedEdges() []string { - edges := make([]string, 0, 4) + edges := make([]string, 0, 5) if m.group != nil { edges = append(edges, item.EdgeGroup) } @@ -3107,6 +5299,9 @@ func (m *ItemMutation) AddedEdges() []string { if m.label != nil { edges = append(edges, item.EdgeLabel) } + if m.attachments != nil { + edges = append(edges, item.EdgeAttachments) + } return edges } @@ -3134,19 +5329,28 @@ func (m *ItemMutation) AddedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case item.EdgeAttachments: + ids := make([]ent.Value, 0, len(m.attachments)) + for id := range m.attachments { + ids = append(ids, id) + } + return ids } return nil } // RemovedEdges returns all edge names that were removed in this mutation. func (m *ItemMutation) RemovedEdges() []string { - edges := make([]string, 0, 4) + edges := make([]string, 0, 5) if m.removedfields != nil { edges = append(edges, item.EdgeFields) } if m.removedlabel != nil { edges = append(edges, item.EdgeLabel) } + if m.removedattachments != nil { + edges = append(edges, item.EdgeAttachments) + } return edges } @@ -3166,13 +5370,19 @@ func (m *ItemMutation) RemovedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case item.EdgeAttachments: + ids := make([]ent.Value, 0, len(m.removedattachments)) + for id := range m.removedattachments { + ids = append(ids, id) + } + return ids } return nil } // ClearedEdges returns all edge names that were cleared in this mutation. func (m *ItemMutation) ClearedEdges() []string { - edges := make([]string, 0, 4) + edges := make([]string, 0, 5) if m.clearedgroup { edges = append(edges, item.EdgeGroup) } @@ -3185,6 +5395,9 @@ func (m *ItemMutation) ClearedEdges() []string { if m.clearedlabel { edges = append(edges, item.EdgeLabel) } + if m.clearedattachments { + edges = append(edges, item.EdgeAttachments) + } return edges } @@ -3200,6 +5413,8 @@ func (m *ItemMutation) EdgeCleared(name string) bool { return m.clearedfields case item.EdgeLabel: return m.clearedlabel + case item.EdgeAttachments: + return m.clearedattachments } return false } @@ -3234,6 +5449,9 @@ func (m *ItemMutation) ResetEdge(name string) error { case item.EdgeLabel: m.ResetLabel() return nil + case item.EdgeAttachments: + m.ResetAttachments() + return nil } return fmt.Errorf("unknown Item edge %s", name) } diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go index 6053082..4287b9d 100644 --- a/backend/ent/predicate/predicate.go +++ b/backend/ent/predicate/predicate.go @@ -6,9 +6,18 @@ import ( "entgo.io/ent/dialect/sql" ) +// Attachment is the predicate function for attachment builders. +type Attachment func(*sql.Selector) + // AuthTokens is the predicate function for authtokens builders. type AuthTokens func(*sql.Selector) +// Document is the predicate function for document builders. +type Document func(*sql.Selector) + +// DocumentToken is the predicate function for documenttoken builders. +type DocumentToken func(*sql.Selector) + // Group is the predicate function for group builders. type Group func(*sql.Selector) diff --git a/backend/ent/runtime.go b/backend/ent/runtime.go index 6a5b8df..58fd270 100644 --- a/backend/ent/runtime.go +++ b/backend/ent/runtime.go @@ -6,7 +6,10 @@ import ( "time" "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/attachment" "github.com/hay-kot/content/backend/ent/authtokens" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/documenttoken" "github.com/hay-kot/content/backend/ent/group" "github.com/hay-kot/content/backend/ent/item" "github.com/hay-kot/content/backend/ent/itemfield" @@ -20,6 +23,25 @@ import ( // (default values, validators, hooks and policies) and stitches it // to their package variables. func init() { + attachmentMixin := schema.Attachment{}.Mixin() + attachmentMixinFields0 := attachmentMixin[0].Fields() + _ = attachmentMixinFields0 + attachmentFields := schema.Attachment{}.Fields() + _ = attachmentFields + // attachmentDescCreatedAt is the schema descriptor for created_at field. + attachmentDescCreatedAt := attachmentMixinFields0[1].Descriptor() + // attachment.DefaultCreatedAt holds the default value on creation for the created_at field. + attachment.DefaultCreatedAt = attachmentDescCreatedAt.Default.(func() time.Time) + // attachmentDescUpdatedAt is the schema descriptor for updated_at field. + attachmentDescUpdatedAt := attachmentMixinFields0[2].Descriptor() + // attachment.DefaultUpdatedAt holds the default value on creation for the updated_at field. + attachment.DefaultUpdatedAt = attachmentDescUpdatedAt.Default.(func() time.Time) + // attachment.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + attachment.UpdateDefaultUpdatedAt = attachmentDescUpdatedAt.UpdateDefault.(func() time.Time) + // attachmentDescID is the schema descriptor for id field. + attachmentDescID := attachmentMixinFields0[0].Descriptor() + // attachment.DefaultID holds the default value on creation for the id field. + attachment.DefaultID = attachmentDescID.Default.(func() uuid.UUID) authtokensMixin := schema.AuthTokens{}.Mixin() authtokensMixinFields0 := authtokensMixin[0].Fields() _ = authtokensMixinFields0 @@ -43,6 +65,92 @@ func init() { authtokensDescID := authtokensMixinFields0[0].Descriptor() // authtokens.DefaultID holds the default value on creation for the id field. authtokens.DefaultID = authtokensDescID.Default.(func() uuid.UUID) + documentMixin := schema.Document{}.Mixin() + documentMixinFields0 := documentMixin[0].Fields() + _ = documentMixinFields0 + documentFields := schema.Document{}.Fields() + _ = documentFields + // documentDescCreatedAt is the schema descriptor for created_at field. + documentDescCreatedAt := documentMixinFields0[1].Descriptor() + // document.DefaultCreatedAt holds the default value on creation for the created_at field. + document.DefaultCreatedAt = documentDescCreatedAt.Default.(func() time.Time) + // documentDescUpdatedAt is the schema descriptor for updated_at field. + documentDescUpdatedAt := documentMixinFields0[2].Descriptor() + // document.DefaultUpdatedAt holds the default value on creation for the updated_at field. + document.DefaultUpdatedAt = documentDescUpdatedAt.Default.(func() time.Time) + // document.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + document.UpdateDefaultUpdatedAt = documentDescUpdatedAt.UpdateDefault.(func() time.Time) + // documentDescTitle is the schema descriptor for title field. + documentDescTitle := documentFields[0].Descriptor() + // document.TitleValidator is a validator for the "title" field. It is called by the builders before save. + document.TitleValidator = func() func(string) error { + validators := documentDescTitle.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(title string) error { + for _, fn := range fns { + if err := fn(title); err != nil { + return err + } + } + return nil + } + }() + // documentDescPath is the schema descriptor for path field. + documentDescPath := documentFields[1].Descriptor() + // document.PathValidator is a validator for the "path" field. It is called by the builders before save. + document.PathValidator = func() func(string) error { + validators := documentDescPath.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(_path string) error { + for _, fn := range fns { + if err := fn(_path); err != nil { + return err + } + } + return nil + } + }() + // documentDescID is the schema descriptor for id field. + documentDescID := documentMixinFields0[0].Descriptor() + // document.DefaultID holds the default value on creation for the id field. + document.DefaultID = documentDescID.Default.(func() uuid.UUID) + documenttokenMixin := schema.DocumentToken{}.Mixin() + documenttokenMixinFields0 := documenttokenMixin[0].Fields() + _ = documenttokenMixinFields0 + documenttokenFields := schema.DocumentToken{}.Fields() + _ = documenttokenFields + // documenttokenDescCreatedAt is the schema descriptor for created_at field. + documenttokenDescCreatedAt := documenttokenMixinFields0[1].Descriptor() + // documenttoken.DefaultCreatedAt holds the default value on creation for the created_at field. + documenttoken.DefaultCreatedAt = documenttokenDescCreatedAt.Default.(func() time.Time) + // documenttokenDescUpdatedAt is the schema descriptor for updated_at field. + documenttokenDescUpdatedAt := documenttokenMixinFields0[2].Descriptor() + // documenttoken.DefaultUpdatedAt holds the default value on creation for the updated_at field. + documenttoken.DefaultUpdatedAt = documenttokenDescUpdatedAt.Default.(func() time.Time) + // documenttoken.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + documenttoken.UpdateDefaultUpdatedAt = documenttokenDescUpdatedAt.UpdateDefault.(func() time.Time) + // documenttokenDescToken is the schema descriptor for token field. + documenttokenDescToken := documenttokenFields[0].Descriptor() + // documenttoken.TokenValidator is a validator for the "token" field. It is called by the builders before save. + documenttoken.TokenValidator = documenttokenDescToken.Validators[0].(func([]byte) error) + // documenttokenDescUses is the schema descriptor for uses field. + documenttokenDescUses := documenttokenFields[1].Descriptor() + // documenttoken.DefaultUses holds the default value on creation for the uses field. + documenttoken.DefaultUses = documenttokenDescUses.Default.(int) + // documenttokenDescExpiresAt is the schema descriptor for expires_at field. + documenttokenDescExpiresAt := documenttokenFields[2].Descriptor() + // documenttoken.DefaultExpiresAt holds the default value on creation for the expires_at field. + documenttoken.DefaultExpiresAt = documenttokenDescExpiresAt.Default.(func() time.Time) + // documenttokenDescID is the schema descriptor for id field. + documenttokenDescID := documenttokenMixinFields0[0].Descriptor() + // documenttoken.DefaultID holds the default value on creation for the id field. + documenttoken.DefaultID = documenttokenDescID.Default.(func() uuid.UUID) groupMixin := schema.Group{}.Mixin() groupMixinFields0 := groupMixin[0].Fields() _ = groupMixinFields0 @@ -123,36 +231,44 @@ func init() { itemDescNotes := itemFields[0].Descriptor() // item.NotesValidator is a validator for the "notes" field. It is called by the builders before save. item.NotesValidator = itemDescNotes.Validators[0].(func(string) error) + // itemDescQuantity is the schema descriptor for quantity field. + itemDescQuantity := itemFields[1].Descriptor() + // item.DefaultQuantity holds the default value on creation for the quantity field. + item.DefaultQuantity = itemDescQuantity.Default.(int) + // itemDescInsured is the schema descriptor for insured field. + itemDescInsured := itemFields[2].Descriptor() + // item.DefaultInsured holds the default value on creation for the insured field. + item.DefaultInsured = itemDescInsured.Default.(bool) // itemDescSerialNumber is the schema descriptor for serial_number field. - itemDescSerialNumber := itemFields[1].Descriptor() + itemDescSerialNumber := itemFields[3].Descriptor() // item.SerialNumberValidator is a validator for the "serial_number" field. It is called by the builders before save. item.SerialNumberValidator = itemDescSerialNumber.Validators[0].(func(string) error) // itemDescModelNumber is the schema descriptor for model_number field. - itemDescModelNumber := itemFields[2].Descriptor() + itemDescModelNumber := itemFields[4].Descriptor() // item.ModelNumberValidator is a validator for the "model_number" field. It is called by the builders before save. item.ModelNumberValidator = itemDescModelNumber.Validators[0].(func(string) error) // itemDescManufacturer is the schema descriptor for manufacturer field. - itemDescManufacturer := itemFields[3].Descriptor() + itemDescManufacturer := itemFields[5].Descriptor() // item.ManufacturerValidator is a validator for the "manufacturer" field. It is called by the builders before save. item.ManufacturerValidator = itemDescManufacturer.Validators[0].(func(string) error) // itemDescLifetimeWarranty is the schema descriptor for lifetime_warranty field. - itemDescLifetimeWarranty := itemFields[4].Descriptor() + itemDescLifetimeWarranty := itemFields[6].Descriptor() // item.DefaultLifetimeWarranty holds the default value on creation for the lifetime_warranty field. item.DefaultLifetimeWarranty = itemDescLifetimeWarranty.Default.(bool) // itemDescWarrantyDetails is the schema descriptor for warranty_details field. - itemDescWarrantyDetails := itemFields[6].Descriptor() + itemDescWarrantyDetails := itemFields[8].Descriptor() // item.WarrantyDetailsValidator is a validator for the "warranty_details" field. It is called by the builders before save. item.WarrantyDetailsValidator = itemDescWarrantyDetails.Validators[0].(func(string) error) // itemDescPurchasePrice is the schema descriptor for purchase_price field. - itemDescPurchasePrice := itemFields[9].Descriptor() + itemDescPurchasePrice := itemFields[11].Descriptor() // item.DefaultPurchasePrice holds the default value on creation for the purchase_price field. item.DefaultPurchasePrice = itemDescPurchasePrice.Default.(float64) // itemDescSoldPrice is the schema descriptor for sold_price field. - itemDescSoldPrice := itemFields[12].Descriptor() + itemDescSoldPrice := itemFields[14].Descriptor() // item.DefaultSoldPrice holds the default value on creation for the sold_price field. item.DefaultSoldPrice = itemDescSoldPrice.Default.(float64) // itemDescSoldNotes is the schema descriptor for sold_notes field. - itemDescSoldNotes := itemFields[13].Descriptor() + itemDescSoldNotes := itemFields[15].Descriptor() // item.SoldNotesValidator is a validator for the "sold_notes" field. It is called by the builders before save. item.SoldNotesValidator = itemDescSoldNotes.Validators[0].(func(string) error) // itemDescID is the schema descriptor for id field. diff --git a/backend/ent/schema/attachment.go b/backend/ent/schema/attachment.go new file mode 100644 index 0000000..b0adb8c --- /dev/null +++ b/backend/ent/schema/attachment.go @@ -0,0 +1,42 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "github.com/hay-kot/content/backend/ent/schema/mixins" +) + +// Attachment holds the schema definition for the Attachment entity. +type Attachment struct { + ent.Schema +} + +func (Attachment) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.BaseMixin{}, + } +} + +// Fields of the Attachment. +func (Attachment) Fields() []ent.Field { + return []ent.Field{ + field.Enum("type"). + Values("photo", "manual", "warranty", "attachment"). + Default("attachment"), + } +} + +// Edges of the Attachment. +func (Attachment) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("item", Item.Type). + Ref("attachments"). + Required(). + Unique(), + edge.From("document", Document.Type). + Ref("attachments"). + Required(). + Unique(), + } +} diff --git a/backend/ent/schema/auth_tokens.go b/backend/ent/schema/auth_tokens.go index 09297d0..9063581 100644 --- a/backend/ent/schema/auth_tokens.go +++ b/backend/ent/schema/auth_tokens.go @@ -42,7 +42,6 @@ func (AuthTokens) Edges() []ent.Edge { func (AuthTokens) Indexes() []ent.Index { return []ent.Index{ - // non-unique index. index.Fields("token"), } } diff --git a/backend/ent/schema/document.go b/backend/ent/schema/document.go new file mode 100644 index 0000000..9797b0f --- /dev/null +++ b/backend/ent/schema/document.go @@ -0,0 +1,50 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "github.com/hay-kot/content/backend/ent/schema/mixins" +) + +// Document holds the schema definition for the Document entity. +type Document struct { + ent.Schema +} + +func (Document) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.BaseMixin{}, + } +} + +// Fields of the Document. +func (Document) Fields() []ent.Field { + return []ent.Field{ + field.String("title"). + MaxLen(255). + NotEmpty(), + field.String("path"). + MaxLen(500). + NotEmpty(), + } +} + +// Edges of the Document. +func (Document) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("group", Group.Type). + Ref("documents"). + Required(). + Unique(), + edge.To("document_tokens", DocumentToken.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + edge.To("attachments", Attachment.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + } +} diff --git a/backend/ent/schema/document_token.go b/backend/ent/schema/document_token.go new file mode 100644 index 0000000..8a4d10a --- /dev/null +++ b/backend/ent/schema/document_token.go @@ -0,0 +1,50 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + "github.com/hay-kot/content/backend/ent/schema/mixins" +) + +// DocumentToken holds the schema definition for the DocumentToken entity. +type DocumentToken struct { + ent.Schema +} + +func (DocumentToken) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.BaseMixin{}, + } +} + +// Fields of the DocumentToken. +func (DocumentToken) Fields() []ent.Field { + return []ent.Field{ + field.Bytes("token"). + NotEmpty(). + Unique(), + field.Int("uses"). + Default(1), + field.Time("expires_at"). + Default(func() time.Time { return time.Now().Add(time.Minute * 10) }), + } +} + +// Edges of the DocumentToken. +func (DocumentToken) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("document", Document.Type). + Ref("document_tokens"). + Unique(), + } +} + +func (DocumentToken) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("token"), + } +} diff --git a/backend/ent/schema/group.go b/backend/ent/schema/group.go index 68c3b99..a5c863e 100644 --- a/backend/ent/schema/group.go +++ b/backend/ent/schema/group.go @@ -34,17 +34,25 @@ func (Group) Fields() []ent.Field { // Edges of the Home. func (Group) Edges() []ent.Edge { return []ent.Edge{ - edge.To("users", User.Type).Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), - edge.To("locations", Location.Type).Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), - edge.To("items", Item.Type).Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), - edge.To("labels", Label.Type).Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), + edge.To("users", User.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + edge.To("locations", Location.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + edge.To("items", Item.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + edge.To("labels", Label.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + edge.To("documents", Document.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), } } diff --git a/backend/ent/schema/item.go b/backend/ent/schema/item.go index 8c2f4cc..7d2190d 100644 --- a/backend/ent/schema/item.go +++ b/backend/ent/schema/item.go @@ -37,6 +37,10 @@ func (Item) Fields() []ent.Field { field.String("notes"). MaxLen(1000). Optional(), + field.Int("quantity"). + Default(1), + field.Bool("insured"). + Default(false), // ------------------------------------ // item identification @@ -93,10 +97,15 @@ func (Item) Edges() []ent.Edge { edge.From("location", Location.Type). Ref("items"). Unique(), - edge.To("fields", ItemField.Type).Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), + edge.To("fields", ItemField.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), edge.From("label", Label.Type). Ref("items"), + edge.To("attachments", Attachment.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), } } diff --git a/backend/ent/schema/templates/has_id.tmpl b/backend/ent/schema/templates/has_id.tmpl new file mode 100644 index 0000000..42b0cd8 --- /dev/null +++ b/backend/ent/schema/templates/has_id.tmpl @@ -0,0 +1,18 @@ +{{/* The line below tells Intellij/GoLand to enable the autocompletion based on the *gen.Graph type. */}} +{{/* gotype: entgo.io/ent/entc/gen.Graph */}} + +{{ define "has_id" }} + +{{/* Add the base header for the generated file */}} +{{ $pkg := base $.Config.Package }} +{{ template "header" $ }} +import "github.com/google/uuid" +{{/* Loop over all nodes and implement the "HasID" interface */}} +{{ range $n := $.Nodes }} + {{ $receiver := $n.Receiver }} + func ({{ $receiver }} *{{ $n.Name }}) GetID() uuid.UUID { + return {{ $receiver }}.ID + } +{{ end }} + +{{ end }} \ No newline at end of file diff --git a/backend/ent/tx.go b/backend/ent/tx.go index 69d1007..b062e19 100644 --- a/backend/ent/tx.go +++ b/backend/ent/tx.go @@ -12,8 +12,14 @@ import ( // Tx is a transactional client that is created by calling Client.Tx(). type Tx struct { config + // Attachment is the client for interacting with the Attachment builders. + Attachment *AttachmentClient // AuthTokens is the client for interacting with the AuthTokens builders. AuthTokens *AuthTokensClient + // Document is the client for interacting with the Document builders. + Document *DocumentClient + // DocumentToken is the client for interacting with the DocumentToken builders. + DocumentToken *DocumentTokenClient // Group is the client for interacting with the Group builders. Group *GroupClient // Item is the client for interacting with the Item builders. @@ -161,7 +167,10 @@ func (tx *Tx) Client() *Client { } func (tx *Tx) init() { + tx.Attachment = NewAttachmentClient(tx.config) tx.AuthTokens = NewAuthTokensClient(tx.config) + tx.Document = NewDocumentClient(tx.config) + tx.DocumentToken = NewDocumentTokenClient(tx.config) tx.Group = NewGroupClient(tx.config) tx.Item = NewItemClient(tx.config) tx.ItemField = NewItemFieldClient(tx.config) @@ -177,7 +186,7 @@ func (tx *Tx) init() { // of them in order to commit or rollback the transaction. // // If a closed transaction is embedded in one of the generated entities, and the entity -// applies a query, for example: AuthTokens.QueryXXX(), the query will be executed +// applies a query, for example: Attachment.QueryXXX(), the query will be executed // through the driver which created this transaction. // // Note that txDriver is not goroutine safe. diff --git a/backend/internal/mocks/factories/users.go b/backend/internal/mocks/factories/users.go index 438b2d8..1265768 100644 --- a/backend/internal/mocks/factories/users.go +++ b/backend/internal/mocks/factories/users.go @@ -8,9 +8,9 @@ import ( func UserFactory() types.UserCreate { f := faker.NewFaker() return types.UserCreate{ - Name: f.RandomString(10), - Email: f.RandomEmail(), - Password: f.RandomString(10), - IsSuperuser: f.RandomBool(), + Name: f.Str(10), + Email: f.Email(), + Password: f.Str(10), + IsSuperuser: f.Bool(), } } diff --git a/backend/internal/repo/id_set.go b/backend/internal/repo/id_set.go new file mode 100644 index 0000000..0041d93 --- /dev/null +++ b/backend/internal/repo/id_set.go @@ -0,0 +1,62 @@ +package repo + +import "github.com/google/uuid" + +// HasID is an interface to entities that have an ID uuid.UUID field and a GetID() method. +// This interface is fulfilled by all entities generated by entgo.io/ent via a custom template +type HasID interface { + GetID() uuid.UUID +} + +// IDSet is a utility set-like type for working with sets of uuid.UUIDs within a repository +// instance. Most useful for comparing lists of UUIDs for processing relationship +// IDs and remove/adding relationships as required. +// +// # See how ItemRepo uses it to manage the Labels-To-Items relationship +// +// NOTE: may be worth moving this to a more generic package/set implementation +// or use a 3rd party set library, but this is good enough for now +type IDSet struct { + mp map[uuid.UUID]struct{} +} + +func NewIDSet(l int) *IDSet { + return &IDSet{ + mp: make(map[uuid.UUID]struct{}, l), + } +} + +func EntitiesToIDSet[T HasID](entities []T) *IDSet { + s := NewIDSet(len(entities)) + for _, e := range entities { + s.Add(e.GetID()) + } + return s +} + +func (t *IDSet) Slice() []uuid.UUID { + s := make([]uuid.UUID, 0, len(t.mp)) + for k := range t.mp { + s = append(s, k) + } + return s +} + +func (t *IDSet) Add(ids ...uuid.UUID) { + for _, id := range ids { + t.mp[id] = struct{}{} + } +} + +func (t *IDSet) Has(id uuid.UUID) bool { + _, ok := t.mp[id] + return ok +} + +func (t *IDSet) Len() int { + return len(t.mp) +} + +func (t *IDSet) Remove(id uuid.UUID) { + delete(t.mp, id) +} diff --git a/backend/internal/repo/repo_documents.go b/backend/internal/repo/repo_documents.go new file mode 100644 index 0000000..9cb4c0f --- /dev/null +++ b/backend/internal/repo/repo_documents.go @@ -0,0 +1,47 @@ +package repo + +import ( + "context" + + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent" + "github.com/hay-kot/content/backend/ent/document" + "github.com/hay-kot/content/backend/ent/group" + "github.com/hay-kot/content/backend/internal/types" +) + +// DocumentRepository is a repository for Document entity +type DocumentRepository struct { + db *ent.Client +} + +func (r *DocumentRepository) Create(ctx context.Context, gid uuid.UUID, doc types.DocumentCreate) (*ent.Document, error) { + return r.db.Document.Create(). + SetGroupID(gid). + SetTitle(doc.Title). + SetPath(doc.Path). + Save(ctx) +} + +func (r *DocumentRepository) GetAll(ctx context.Context, gid uuid.UUID) ([]*ent.Document, error) { + return r.db.Document.Query(). + Where(document.HasGroupWith(group.ID(gid))). + All(ctx) +} + +func (r *DocumentRepository) Get(ctx context.Context, id uuid.UUID) (*ent.Document, error) { + return r.db.Document.Query(). + Where(document.ID(id)). + Only(ctx) +} + +func (r *DocumentRepository) Update(ctx context.Context, id uuid.UUID, doc types.DocumentUpdate) (*ent.Document, error) { + return r.db.Document.UpdateOneID(id). + SetTitle(doc.Title). + SetPath(doc.Path). + Save(ctx) +} + +func (r *DocumentRepository) Delete(ctx context.Context, id uuid.UUID) error { + return r.db.Document.DeleteOneID(id).Exec(ctx) +} diff --git a/backend/internal/repo/repo_documents_test.go b/backend/internal/repo/repo_documents_test.go new file mode 100644 index 0000000..187ca0f --- /dev/null +++ b/backend/internal/repo/repo_documents_test.go @@ -0,0 +1,202 @@ +package repo + +import ( + "context" + "testing" + + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent" + "github.com/hay-kot/content/backend/internal/types" + "github.com/stretchr/testify/assert" +) + +func TestDocumentRepository_Create(t *testing.T) { + type args struct { + ctx context.Context + gid uuid.UUID + doc types.DocumentCreate + } + tests := []struct { + name string + args args + want *ent.Document + wantErr bool + }{ + { + name: "create document", + args: args{ + ctx: context.Background(), + gid: tGroup.ID, + doc: types.DocumentCreate{ + Title: "test document", + Path: "/test/document", + }, + }, + want: &ent.Document{ + Title: "test document", + Path: "/test/document", + }, + wantErr: false, + }, + { + name: "create document with empty title", + args: args{ + ctx: context.Background(), + gid: tGroup.ID, + doc: types.DocumentCreate{ + Title: "", + Path: "/test/document", + }, + }, + want: nil, + wantErr: true, + }, + { + name: "create document with empty path", + args: args{ + ctx: context.Background(), + gid: tGroup.ID, + doc: types.DocumentCreate{ + Title: "test document", + Path: "", + }, + }, + want: nil, + wantErr: true, + }, + } + ids := make([]uuid.UUID, 0, len(tests)) + + t.Cleanup(func() { + for _, id := range ids { + err := tRepos.Docs.Delete(context.Background(), id) + assert.NoError(t, err) + } + }) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tRepos.Docs.Create(tt.args.ctx, tt.args.gid, tt.args.doc) + if (err != nil) != tt.wantErr { + t.Errorf("DocumentRepository.Create() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, got) + return + } + + assert.Equal(t, tt.want.Title, got.Title) + assert.Equal(t, tt.want.Path, got.Path) + ids = append(ids, got.ID) + }) + } +} + +func useDocs(t *testing.T, num int) []*ent.Document { + t.Helper() + + results := make([]*ent.Document, 0, num) + ids := make([]uuid.UUID, 0, num) + + for i := 0; i < num; i++ { + doc, err := tRepos.Docs.Create(context.Background(), tGroup.ID, types.DocumentCreate{ + Title: fk.Str(10), + Path: fk.Path(), + }) + + assert.NoError(t, err) + assert.NotNil(t, doc) + results = append(results, doc) + ids = append(ids, doc.ID) + } + + t.Cleanup(func() { + for _, id := range ids { + err := tRepos.Docs.Delete(context.Background(), id) + + if err != nil { + assert.True(t, ent.IsNotFound(err)) + } + } + }) + + return results +} + +func TestDocumentRepository_GetAll(t *testing.T) { + entities := useDocs(t, 10) + + for _, entity := range entities { + assert.NotNil(t, entity) + } + + all, err := tRepos.Docs.GetAll(context.Background(), tGroup.ID) + assert.NoError(t, err) + + assert.Len(t, all, 10) + for _, entity := range all { + assert.NotNil(t, entity) + + for _, e := range entities { + if e.ID == entity.ID { + assert.Equal(t, e.Title, entity.Title) + assert.Equal(t, e.Path, entity.Path) + } + } + } +} + +func TestDocumentRepository_Get(t *testing.T) { + entities := useDocs(t, 10) + + for _, entity := range entities { + got, err := tRepos.Docs.Get(context.Background(), entity.ID) + + assert.NoError(t, err) + assert.Equal(t, entity.ID, got.ID) + assert.Equal(t, entity.Title, got.Title) + assert.Equal(t, entity.Path, got.Path) + } +} + +func TestDocumentRepository_Update(t *testing.T) { + entities := useDocs(t, 10) + + for _, entity := range entities { + got, err := tRepos.Docs.Get(context.Background(), entity.ID) + + assert.NoError(t, err) + assert.Equal(t, entity.ID, got.ID) + assert.Equal(t, entity.Title, got.Title) + assert.Equal(t, entity.Path, got.Path) + } + + for _, entity := range entities { + updateData := types.DocumentUpdate{ + Title: fk.Str(10), + Path: fk.Path(), + } + + updated, err := tRepos.Docs.Update(context.Background(), entity.ID, updateData) + + assert.NoError(t, err) + assert.Equal(t, entity.ID, updated.ID) + assert.Equal(t, updateData.Title, updated.Title) + assert.Equal(t, updateData.Path, updated.Path) + } +} + +func TestDocumentRepository_Delete(t *testing.T) { + entities := useDocs(t, 10) + + for _, entity := range entities { + err := tRepos.Docs.Delete(context.Background(), entity.ID) + assert.NoError(t, err) + + _, err = tRepos.Docs.Get(context.Background(), entity.ID) + assert.Error(t, err) + } +} diff --git a/backend/internal/repo/repo_documents_tokens.go b/backend/internal/repo/repo_documents_tokens.go new file mode 100644 index 0000000..7c260c4 --- /dev/null +++ b/backend/internal/repo/repo_documents_tokens.go @@ -0,0 +1,41 @@ +package repo + +import ( + "context" + "time" + + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/internal/types" +) + +// DocumentTokensRepository is a repository for Document entity +type DocumentTokensRepository struct { + db *ent.Client +} + +func (r *DocumentTokensRepository) Create(ctx context.Context, data types.DocumentTokenCreate) (*ent.DocumentToken, error) { + result, err := r.db.DocumentToken.Create(). + SetDocumentID(data.DocumentID). + SetToken(data.TokenHash). + SetExpiresAt(data.ExpiresAt). + Save(ctx) + + if err != nil { + return nil, err + } + + return r.db.DocumentToken.Query(). + Where(documenttoken.ID(result.ID)). + WithDocument(). + Only(ctx) +} + +func (r *DocumentTokensRepository) PurgeExpiredTokens(ctx context.Context) (int, error) { + return r.db.DocumentToken.Delete().Where(documenttoken.ExpiresAtLT(time.Now())).Exec(ctx) +} + +func (r *DocumentTokensRepository) Delete(ctx context.Context, id uuid.UUID) error { + return r.db.DocumentToken.DeleteOneID(id).Exec(ctx) +} diff --git a/backend/internal/repo/repo_documents_tokens_test.go b/backend/internal/repo/repo_documents_tokens_test.go new file mode 100644 index 0000000..7106253 --- /dev/null +++ b/backend/internal/repo/repo_documents_tokens_test.go @@ -0,0 +1,149 @@ +package repo + +import ( + "context" + "testing" + "time" + + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent" + "github.com/hay-kot/content/backend/ent/documenttoken" + "github.com/hay-kot/content/backend/internal/types" + "github.com/stretchr/testify/assert" +) + +func TestDocumentTokensRepository_Create(t *testing.T) { + entities := useDocs(t, 1) + doc := entities[0] + expires := fk.Time() + + type args struct { + ctx context.Context + data types.DocumentTokenCreate + } + tests := []struct { + name string + args args + want *ent.DocumentToken + wantErr bool + }{ + { + name: "create document token", + args: args{ + ctx: context.Background(), + data: types.DocumentTokenCreate{ + DocumentID: doc.ID, + TokenHash: []byte("token"), + ExpiresAt: expires, + }, + }, + want: &ent.DocumentToken{ + Edges: ent.DocumentTokenEdges{ + Document: doc, + }, + Token: []byte("token"), + ExpiresAt: expires, + }, + wantErr: false, + }, + { + name: "create document token with empty token", + args: args{ + ctx: context.Background(), + data: types.DocumentTokenCreate{ + DocumentID: doc.ID, + TokenHash: []byte(""), + ExpiresAt: expires, + }, + }, + want: nil, + wantErr: true, + }, + { + name: "create document token with empty document id", + args: args{ + ctx: context.Background(), + data: types.DocumentTokenCreate{ + DocumentID: uuid.Nil, + TokenHash: []byte("token"), + ExpiresAt: expires, + }, + }, + want: nil, + wantErr: true, + }, + } + + ids := make([]uuid.UUID, 0, len(tests)) + + t.Cleanup(func() { + for _, id := range ids { + _ = tRepos.DocTokens.Delete(context.Background(), id) + } + }) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + got, err := tRepos.DocTokens.Create(tt.args.ctx, tt.args.data) + if (err != nil) != tt.wantErr { + t.Errorf("DocumentTokensRepository.Create() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tt.wantErr { + return + } + + assert.Equal(t, tt.want.Token, got.Token) + assert.WithinDuration(t, tt.want.ExpiresAt, got.ExpiresAt, time.Duration(1)*time.Second) + assert.Equal(t, tt.want.Edges.Document.ID, got.Edges.Document.ID) + }) + + } +} + +func useDocTokens(t *testing.T, num int) []*ent.DocumentToken { + entity := useDocs(t, 1)[0] + + results := make([]*ent.DocumentToken, 0, num) + + ids := make([]uuid.UUID, 0, num) + t.Cleanup(func() { + for _, id := range ids { + _ = tRepos.DocTokens.Delete(context.Background(), id) + } + }) + + for i := 0; i < num; i++ { + e, err := tRepos.DocTokens.Create(context.Background(), types.DocumentTokenCreate{ + DocumentID: entity.ID, + TokenHash: []byte(fk.Str(10)), + ExpiresAt: fk.Time(), + }) + + assert.NoError(t, err) + results = append(results, e) + ids = append(ids, e.ID) + } + + return results +} + +func TestDocumentTokensRepository_PurgeExpiredTokens(t *testing.T) { + entities := useDocTokens(t, 2) + + // set expired token + tRepos.DocTokens.db.DocumentToken.Update(). + Where(documenttoken.ID(entities[0].ID)). + SetExpiresAt(time.Now().Add(-time.Hour)). + ExecX(context.Background()) + + count, err := tRepos.DocTokens.PurgeExpiredTokens(context.Background()) + assert.NoError(t, err) + assert.Equal(t, 1, count) + + all, err := tRepos.DocTokens.db.DocumentToken.Query().All(context.Background()) + assert.NoError(t, err) + assert.Len(t, all, 1) + assert.Equal(t, entities[1].ID, all[0].ID) +} diff --git a/backend/internal/repo/repo_item_attachments.go b/backend/internal/repo/repo_item_attachments.go new file mode 100644 index 0000000..f21bb7e --- /dev/null +++ b/backend/internal/repo/repo_item_attachments.go @@ -0,0 +1,44 @@ +package repo + +import ( + "context" + + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent" + "github.com/hay-kot/content/backend/ent/attachment" +) + +// AttachmentRepo is a repository for Attachments table that links Items to Documents +// While also specifying the type of the attachment. This _ONLY_ provides basic Create Update +// And Delete operations. For accessing the actual documents, use the Items repository since it +// provides the attachments with the documents. +type AttachmentRepo struct { + db *ent.Client +} + +func (r *AttachmentRepo) Create(ctx context.Context, itemId, docId uuid.UUID, typ attachment.Type) (*ent.Attachment, error) { + return r.db.Attachment.Create(). + SetType(typ). + SetDocumentID(docId). + SetItemID(itemId). + Save(ctx) +} + +func (r *AttachmentRepo) Get(ctx context.Context, id uuid.UUID) (*ent.Attachment, error) { + return r.db.Attachment. + Query(). + Where(attachment.ID(id)). + WithItem(). + WithDocument(). + Only(ctx) +} + +func (r *AttachmentRepo) Update(ctx context.Context, itemId uuid.UUID, typ attachment.Type) (*ent.Attachment, error) { + return r.db.Attachment.UpdateOneID(itemId). + SetType(typ). + Save(ctx) +} + +func (r *AttachmentRepo) Delete(ctx context.Context, id uuid.UUID) error { + return r.db.Attachment.DeleteOneID(id).Exec(ctx) +} diff --git a/backend/internal/repo/repo_item_attachments_test.go b/backend/internal/repo/repo_item_attachments_test.go new file mode 100644 index 0000000..0165b8e --- /dev/null +++ b/backend/internal/repo/repo_item_attachments_test.go @@ -0,0 +1,133 @@ +package repo + +import ( + "context" + "testing" + + "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent" + "github.com/hay-kot/content/backend/ent/attachment" + "github.com/stretchr/testify/assert" +) + +func TestAttachmentRepo_Create(t *testing.T) { + doc := useDocs(t, 1)[0] + item := useItems(t, 1)[0] + + ids := []uuid.UUID{doc.ID, item.ID} + t.Cleanup(func() { + for _, id := range ids { + _ = tRepos.Attachments.Delete(context.Background(), id) + } + }) + + type args struct { + ctx context.Context + itemId uuid.UUID + docId uuid.UUID + typ attachment.Type + } + tests := []struct { + name string + args args + want *ent.Attachment + wantErr bool + }{ + { + name: "create attachment", + args: args{ + ctx: context.Background(), + itemId: item.ID, + docId: doc.ID, + typ: attachment.TypePhoto, + }, + want: &ent.Attachment{ + Type: attachment.TypePhoto, + }, + }, + { + name: "create attachment with invalid item id", + args: args{ + ctx: context.Background(), + itemId: uuid.New(), + docId: doc.ID, + typ: "blarg", + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + got, err := tRepos.Attachments.Create(tt.args.ctx, tt.args.itemId, tt.args.docId, tt.args.typ) + if (err != nil) != tt.wantErr { + t.Errorf("AttachmentRepo.Create() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return + } + + assert.Equal(t, tt.want.Type, got.Type) + + withItems, err := tRepos.Attachments.Get(tt.args.ctx, got.ID) + assert.NoError(t, err) + assert.Equal(t, tt.args.itemId, withItems.Edges.Item.ID) + assert.Equal(t, tt.args.docId, withItems.Edges.Document.ID) + + ids = append(ids, got.ID) + }) + } +} + +func useAttachments(t *testing.T, n int) []*ent.Attachment { + t.Helper() + + doc := useDocs(t, 1)[0] + item := useItems(t, 1)[0] + + ids := make([]uuid.UUID, 0, n) + t.Cleanup(func() { + for _, id := range ids { + _ = tRepos.Attachments.Delete(context.Background(), id) + } + }) + + attachments := make([]*ent.Attachment, n) + for i := 0; i < n; i++ { + attachment, err := tRepos.Attachments.Create(context.Background(), item.ID, doc.ID, attachment.TypePhoto) + assert.NoError(t, err) + attachments[i] = attachment + + ids = append(ids, attachment.ID) + } + + return attachments +} + +func TestAttachmentRepo_Update(t *testing.T) { + entity := useAttachments(t, 1)[0] + + for _, typ := range []attachment.Type{"photo", "manual", "warranty", "attachment"} { + t.Run(string(typ), func(t *testing.T) { + _, err := tRepos.Attachments.Update(context.Background(), entity.ID, typ) + assert.NoError(t, err) + + updated, err := tRepos.Attachments.Get(context.Background(), entity.ID) + assert.NoError(t, err) + assert.Equal(t, typ, updated.Type) + }) + } + +} + +func TestAttachmentRepo_Delete(t *testing.T) { + entity := useAttachments(t, 1)[0] + + err := tRepos.Attachments.Delete(context.Background(), entity.ID) + assert.NoError(t, err) + + _, err = tRepos.Attachments.Get(context.Background(), entity.ID) + assert.Error(t, err) +} diff --git a/backend/internal/repo/repo_items.go b/backend/internal/repo/repo_items.go index 90fc911..8a95a22 100644 --- a/backend/internal/repo/repo_items.go +++ b/backend/internal/repo/repo_items.go @@ -21,9 +21,13 @@ func (e *ItemsRepository) GetOne(ctx context.Context, id uuid.UUID) (*ent.Item, WithLabel(). WithLocation(). WithGroup(). + WithAttachments(func(aq *ent.AttachmentQuery) { + aq.WithDocument() + }). Only(ctx) } +// GetAll returns all the items in the database with the Labels and Locations eager loaded. func (e *ItemsRepository) GetAll(ctx context.Context, gid uuid.UUID) ([]*ent.Item, error) { return e.db.Item.Query(). Where(item.HasGroupWith(group.ID(gid))). @@ -72,11 +76,31 @@ func (e *ItemsRepository) Update(ctx context.Context, data types.ItemUpdate) (*e SetSoldNotes(data.SoldNotes). SetNotes(data.Notes). SetLifetimeWarranty(data.LifetimeWarranty). + SetInsured(data.Insured). SetWarrantyExpires(data.WarrantyExpires). - SetWarrantyDetails(data.WarrantyDetails) + SetWarrantyDetails(data.WarrantyDetails). + SetQuantity(data.Quantity) - err := q.Exec(ctx) + currentLabels, err := e.db.Item.Query().Where(item.ID(data.ID)).QueryLabel().All(ctx) + if err != nil { + return nil, err + } + set := EntitiesToIDSet(currentLabels) + + for _, l := range data.LabelIDs { + if set.Has(l) { + set.Remove(l) + continue + } + q.AddLabelIDs(l) + } + + if set.Len() > 0 { + q.RemoveLabelIDs(set.Slice()...) + } + + err = q.Exec(ctx) if err != nil { return nil, err } diff --git a/backend/internal/repo/repo_items_test.go b/backend/internal/repo/repo_items_test.go index 2768dc4..893bcb3 100644 --- a/backend/internal/repo/repo_items_test.go +++ b/backend/internal/repo/repo_items_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/google/uuid" "github.com/hay-kot/content/backend/ent" "github.com/hay-kot/content/backend/internal/types" "github.com/stretchr/testify/assert" @@ -12,12 +13,12 @@ import ( func itemFactory() types.ItemCreate { return types.ItemCreate{ - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), } } -func useItems(t *testing.T, len int) ([]*ent.Item, func()) { +func useItems(t *testing.T, len int) []*ent.Item { t.Helper() location, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory()) @@ -33,17 +34,17 @@ func useItems(t *testing.T, len int) ([]*ent.Item, func()) { items[i] = item } - return items, func() { + t.Cleanup(func() { for _, item := range items { - err := tRepos.Items.Delete(context.Background(), item.ID) - assert.NoError(t, err) + _ = tRepos.Items.Delete(context.Background(), item.ID) } - } + }) + + return items } func TestItemsRepository_GetOne(t *testing.T) { - entity, cleanup := useItems(t, 3) - defer cleanup() + entity := useItems(t, 3) for _, item := range entity { result, err := tRepos.Items.GetOne(context.Background(), item.ID) @@ -54,8 +55,7 @@ func TestItemsRepository_GetOne(t *testing.T) { func TestItemsRepository_GetAll(t *testing.T) { length := 10 - expected, cleanup := useItems(t, length) - defer cleanup() + expected := useItems(t, length) results, err := tRepos.Items.GetAll(context.Background(), tGroup.ID) assert.NoError(t, err) @@ -119,7 +119,7 @@ func TestItemsRepository_Create_Location(t *testing.T) { } func TestItemsRepository_Delete(t *testing.T) { - entities, _ := useItems(t, 3) + entities := useItems(t, 3) for _, item := range entities { err := tRepos.Items.Delete(context.Background(), item.ID) @@ -131,9 +131,68 @@ func TestItemsRepository_Delete(t *testing.T) { assert.Empty(t, results) } +func TestItemsRepository_Update_Labels(t *testing.T) { + entity := useItems(t, 1)[0] + labels := useLabels(t, 3) + + labelsIDs := []uuid.UUID{labels[0].ID, labels[1].ID, labels[2].ID} + + type args struct { + labelIds []uuid.UUID + } + + tests := []struct { + name string + args args + want []uuid.UUID + }{ + { + name: "add all labels", + args: args{ + labelIds: labelsIDs, + }, + want: labelsIDs, + }, + { + name: "update with one label", + args: args{ + labelIds: labelsIDs[:1], + }, + want: labelsIDs[:1], + }, + { + name: "add one new label to existing single label", + args: args{ + labelIds: labelsIDs[1:], + }, + want: labelsIDs[1:], + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Apply all labels to entity + updateData := types.ItemUpdate{ + ID: entity.ID, + Name: entity.Name, + LocationID: entity.Edges.Location.ID, + LabelIDs: tt.args.labelIds, + } + + updated, err := tRepos.Items.Update(context.Background(), updateData) + assert.NoError(t, err) + assert.Len(t, tt.want, len(updated.Edges.Label)) + + for _, label := range updated.Edges.Label { + assert.Contains(t, tt.want, label.ID) + } + }) + } + +} + func TestItemsRepository_Update(t *testing.T) { - entities, cleanup := useItems(t, 3) - defer cleanup() + entities := useItems(t, 3) entity := entities[0] @@ -141,20 +200,20 @@ func TestItemsRepository_Update(t *testing.T) { ID: entity.ID, Name: entity.Name, LocationID: entity.Edges.Location.ID, - SerialNumber: fk.RandomString(10), + SerialNumber: fk.Str(10), LabelIDs: nil, - ModelNumber: fk.RandomString(10), - Manufacturer: fk.RandomString(10), + ModelNumber: fk.Str(10), + Manufacturer: fk.Str(10), PurchaseTime: time.Now(), - PurchaseFrom: fk.RandomString(10), + PurchaseFrom: fk.Str(10), PurchasePrice: 300.99, SoldTime: time.Now(), - SoldTo: fk.RandomString(10), + SoldTo: fk.Str(10), SoldPrice: 300.99, - SoldNotes: fk.RandomString(10), - Notes: fk.RandomString(10), + SoldNotes: fk.Str(10), + Notes: fk.Str(10), WarrantyExpires: time.Now(), - WarrantyDetails: fk.RandomString(10), + WarrantyDetails: fk.Str(10), LifetimeWarranty: true, } diff --git a/backend/internal/repo/repo_labels_test.go b/backend/internal/repo/repo_labels_test.go index f647753..137376a 100644 --- a/backend/internal/repo/repo_labels_test.go +++ b/backend/internal/repo/repo_labels_test.go @@ -11,12 +11,12 @@ import ( func labelFactory() types.LabelCreate { return types.LabelCreate{ - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), } } -func useLabels(t *testing.T, len int) ([]*ent.Label, func()) { +func useLabels(t *testing.T, len int) []*ent.Label { t.Helper() labels := make([]*ent.Label, len) @@ -28,17 +28,17 @@ func useLabels(t *testing.T, len int) ([]*ent.Label, func()) { labels[i] = item } - return labels, func() { + t.Cleanup(func() { for _, item := range labels { - err := tRepos.Labels.Delete(context.Background(), item.ID) - assert.NoError(t, err) + _ = tRepos.Labels.Delete(context.Background(), item.ID) } - } + }) + + return labels } func TestLabelRepository_Get(t *testing.T) { - labels, cleanup := useLabels(t, 1) - defer cleanup() + labels := useLabels(t, 1) label := labels[0] // Get by ID @@ -48,8 +48,7 @@ func TestLabelRepository_Get(t *testing.T) { } func TestLabelRepositoryGetAll(t *testing.T) { - _, cleanup := useLabels(t, 10) - defer cleanup() + useLabels(t, 10) all, err := tRepos.Labels.GetAll(context.Background(), tGroup.ID) assert.NoError(t, err) @@ -75,8 +74,8 @@ func TestLabelRepository_Update(t *testing.T) { updateData := types.LabelUpdate{ ID: loc.ID, - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), } update, err := tRepos.Labels.Update(context.Background(), updateData) diff --git a/backend/internal/repo/repo_locations_test.go b/backend/internal/repo/repo_locations_test.go index e8d2f54..9370305 100644 --- a/backend/internal/repo/repo_locations_test.go +++ b/backend/internal/repo/repo_locations_test.go @@ -10,8 +10,8 @@ import ( func locationFactory() types.LocationCreate { return types.LocationCreate{ - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), } } @@ -31,14 +31,14 @@ func TestLocationRepository_Get(t *testing.T) { func TestLocationRepositoryGetAllWithCount(t *testing.T) { ctx := context.Background() result, err := tRepos.Locations.Create(ctx, tGroup.ID, types.LocationCreate{ - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), }) assert.NoError(t, err) _, err = tRepos.Items.Create(ctx, tGroup.ID, types.ItemCreate{ - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), LocationID: result.ID, }) @@ -74,8 +74,8 @@ func TestLocationRepository_Update(t *testing.T) { updateData := types.LocationUpdate{ ID: loc.ID, - Name: fk.RandomString(10), - Description: fk.RandomString(100), + Name: fk.Str(10), + Description: fk.Str(100), } update, err := tRepos.Locations.Update(context.Background(), updateData) diff --git a/backend/internal/repo/repo_users_test.go b/backend/internal/repo/repo_users_test.go index 98f115d..f08acb4 100644 --- a/backend/internal/repo/repo_users_test.go +++ b/backend/internal/repo/repo_users_test.go @@ -13,10 +13,10 @@ import ( func userFactory() types.UserCreate { return types.UserCreate{ - Name: fk.RandomString(10), - Email: fk.RandomEmail(), - Password: fk.RandomString(10), - IsSuperuser: fk.RandomBool(), + Name: fk.Str(10), + Email: fk.Email(), + Password: fk.Str(10), + IsSuperuser: fk.Bool(), GroupID: tGroup.ID, } } @@ -109,8 +109,8 @@ func TestUserRepo_Update(t *testing.T) { assert.NoError(t, err) updateData := types.UserUpdate{ - Name: fk.RandomString(10), - Email: fk.RandomEmail(), + Name: fk.Str(10), + Email: fk.Email(), } // Update diff --git a/backend/internal/repo/repos_all.go b/backend/internal/repo/repos_all.go index 3542728..e89fe33 100644 --- a/backend/internal/repo/repos_all.go +++ b/backend/internal/repo/repos_all.go @@ -4,21 +4,27 @@ import "github.com/hay-kot/content/backend/ent" // AllRepos is a container for all the repository interfaces type AllRepos struct { - Users *UserRepository - AuthTokens *TokenRepository - Groups *GroupRepository - Locations *LocationRepository - Labels *LabelRepository - Items *ItemsRepository + Users *UserRepository + AuthTokens *TokenRepository + Groups *GroupRepository + Locations *LocationRepository + Labels *LabelRepository + Items *ItemsRepository + Docs *DocumentRepository + DocTokens *DocumentTokensRepository + Attachments *AttachmentRepo } func EntAllRepos(db *ent.Client) *AllRepos { return &AllRepos{ - Users: &UserRepository{db}, - AuthTokens: &TokenRepository{db}, - Groups: &GroupRepository{db}, - Locations: &LocationRepository{db}, - Labels: &LabelRepository{db}, - Items: &ItemsRepository{db}, + Users: &UserRepository{db}, + AuthTokens: &TokenRepository{db}, + Groups: &GroupRepository{db}, + Locations: &LocationRepository{db}, + Labels: &LabelRepository{db}, + Items: &ItemsRepository{db}, + Docs: &DocumentRepository{db}, + DocTokens: &DocumentTokensRepository{db}, + Attachments: &AttachmentRepo{db}, } } diff --git a/backend/internal/services/all.go b/backend/internal/services/all.go index 8978983..bba6e43 100644 --- a/backend/internal/services/all.go +++ b/backend/internal/services/all.go @@ -16,6 +16,9 @@ func NewServices(repos *repo.AllRepos) *AllServices { Admin: &AdminService{repos}, Location: &LocationService{repos}, Labels: &LabelService{repos}, - Items: &ItemService{repos}, + Items: &ItemService{ + repo: repos, + filepath: "/tmp/content", + }, } } diff --git a/backend/internal/services/main_test.go b/backend/internal/services/main_test.go index 9f65278..c5f3575 100644 --- a/backend/internal/services/main_test.go +++ b/backend/internal/services/main_test.go @@ -22,6 +22,7 @@ var ( tRepos *repo.AllRepos tUser *ent.User tGroup *ent.Group + tSvc *AllServices ) func bootstrap() { @@ -36,10 +37,10 @@ func bootstrap() { } tUser, err = tRepos.Users.Create(ctx, types.UserCreate{ - Name: fk.RandomString(10), - Email: fk.RandomEmail(), - Password: fk.RandomString(10), - IsSuperuser: fk.RandomBool(), + Name: fk.Str(10), + Email: fk.Email(), + Password: fk.Str(10), + IsSuperuser: fk.Bool(), GroupID: tGroup.ID, }) if err != nil { @@ -62,6 +63,7 @@ func TestMain(m *testing.M) { tClient = client tRepos = repo.EntAllRepos(tClient) + tSvc = NewServices(tRepos) defer client.Close() bootstrap() diff --git a/backend/internal/services/mappers/items.go b/backend/internal/services/mappers/items.go index 11ec73b..7698c86 100644 --- a/backend/internal/services/mappers/items.go +++ b/backend/internal/services/mappers/items.go @@ -5,6 +5,19 @@ import ( "github.com/hay-kot/content/backend/internal/types" ) +func ToItemAttachment(attachment *ent.Attachment) *types.ItemAttachment { + return &types.ItemAttachment{ + ID: attachment.ID, + CreatedAt: attachment.CreatedAt, + UpdatedAt: attachment.UpdatedAt, + Document: types.DocumentOut{ + ID: attachment.Edges.Document.ID, + Title: attachment.Edges.Document.Title, + Path: attachment.Edges.Document.Path, + }, + } +} + func ToItemSummary(item *ent.Item) *types.ItemSummary { var location *types.LocationSummary if item.Edges.Location != nil { @@ -23,6 +36,14 @@ func ToItemSummary(item *ent.Item) *types.ItemSummary { CreatedAt: item.CreatedAt, UpdatedAt: item.UpdatedAt, + Quantity: item.Quantity, + Insured: item.Insured, + + // Warranty + LifetimeWarranty: item.LifetimeWarranty, + WarrantyExpires: item.WarrantyExpires, + WarrantyDetails: item.WarrantyDetails, + // Edges Location: location, Labels: labels, @@ -53,8 +74,14 @@ func ToItemSummaryErr(item *ent.Item, err error) (*types.ItemSummary, error) { } func ToItemOut(item *ent.Item) *types.ItemOut { + var attachments []*types.ItemAttachment + if item.Edges.Attachments != nil { + attachments = MapEach(item.Edges.Attachments, ToItemAttachment) + } + return &types.ItemOut{ ItemSummary: *ToItemSummary(item), + Attachments: attachments, } } diff --git a/backend/internal/services/service_items.go b/backend/internal/services/service_items.go index 40ac207..bb83845 100644 --- a/backend/internal/services/service_items.go +++ b/backend/internal/services/service_items.go @@ -3,8 +3,12 @@ package services import ( "context" "fmt" + "io" + "os" + "path/filepath" "github.com/google/uuid" + "github.com/hay-kot/content/backend/ent/attachment" "github.com/hay-kot/content/backend/internal/repo" "github.com/hay-kot/content/backend/internal/services/mappers" "github.com/hay-kot/content/backend/internal/types" @@ -13,6 +17,9 @@ import ( type ItemService struct { repo *repo.AllRepos + + // filepath is the root of the storage location that will be used to store all files from. + filepath string } func (svc *ItemService) GetOne(ctx context.Context, gid uuid.UUID, id uuid.UUID) (*types.ItemOut, error) { @@ -41,6 +48,7 @@ func (svc *ItemService) GetAll(ctx context.Context, gid uuid.UUID) ([]*types.Ite return itemsOut, nil } + func (svc *ItemService) Create(ctx context.Context, gid uuid.UUID, data types.ItemCreate) (*types.ItemOut, error) { item, err := svc.repo.Items.Create(ctx, gid, data) if err != nil { @@ -49,6 +57,7 @@ func (svc *ItemService) Create(ctx context.Context, gid uuid.UUID, data types.It return mappers.ToItemOut(item), nil } + func (svc *ItemService) Delete(ctx context.Context, gid uuid.UUID, id uuid.UUID) error { item, err := svc.repo.Items.GetOne(ctx, id) if err != nil { @@ -66,8 +75,76 @@ func (svc *ItemService) Delete(ctx context.Context, gid uuid.UUID, id uuid.UUID) return nil } + func (svc *ItemService) Update(ctx context.Context, gid uuid.UUID, data types.ItemUpdate) (*types.ItemOut, error) { - panic("implement me") + item, err := svc.repo.Items.GetOne(ctx, data.ID) + if err != nil { + return nil, err + } + + if item.Edges.Group.ID != gid { + return nil, ErrNotOwner + } + + item, err = svc.repo.Items.Update(ctx, data) + if err != nil { + return nil, err + } + + return mappers.ToItemOut(item), nil +} + +func (svc *ItemService) attachmentPath(gid, itemId uuid.UUID, filename string) string { + return filepath.Join(svc.filepath, gid.String(), itemId.String(), filename) +} + +// AddAttachment adds an attachment to an item by creating an entry in the Documents table and linking it to the Attachment +// Table and Items table. The file provided via the reader is stored on the file system based on the provided +// relative path during construction of the service. +func (svc *ItemService) AddAttachment(ctx context.Context, gid, itemId uuid.UUID, filename string, file io.Reader) (*types.ItemOut, error) { + // Get the Item + item, err := svc.repo.Items.GetOne(ctx, itemId) + if err != nil { + return nil, err + } + + if item.Edges.Group.ID != gid { + return nil, ErrNotOwner + } + + // Create the document + doc, err := svc.repo.Docs.Create(ctx, gid, types.DocumentCreate{ + Title: filename, + Path: svc.attachmentPath(gid, itemId, filename), + }) + if err != nil { + return nil, err + } + + // Create the attachment + _, err = svc.repo.Attachments.Create(ctx, itemId, doc.ID, attachment.TypeAttachment) + if err != nil { + return nil, err + } + + // Read the contents and write them to a file on the file system + err = os.MkdirAll(filepath.Dir(doc.Path), os.ModePerm) + if err != nil { + return nil, err + } + + f, err := os.Create(doc.Path) + if err != nil { + log.Err(err).Msg("failed to create file") + return nil, err + } + + _, err = io.Copy(f, file) + if err != nil { + return nil, err + } + + return svc.GetOne(ctx, gid, itemId) } func (svc *ItemService) CsvImport(ctx context.Context, gid uuid.UUID, data [][]string) error { diff --git a/backend/internal/services/service_items_test.go b/backend/internal/services/service_items_test.go index d5c18c0..ceeb764 100644 --- a/backend/internal/services/service_items_test.go +++ b/backend/internal/services/service_items_test.go @@ -2,13 +2,16 @@ package services import ( "context" + "os" + "path" + "strings" "testing" + "github.com/google/uuid" + "github.com/hay-kot/content/backend/internal/types" "github.com/stretchr/testify/assert" ) - - func TestItemService_CsvImport(t *testing.T) { data := loadcsv() svc := &ItemService{ @@ -55,6 +58,14 @@ func TestItemService_CsvImport(t *testing.T) { labelNames = append(labelNames, label.Name) } + ids := []uuid.UUID{} + t.Cleanup((func() { + for _, id := range ids { + err := svc.repo.Items.Delete(context.Background(), id) + assert.NoError(t, err) + } + })) + for _, item := range items { assert.Contains(t, locNames, item.Location.Name) for _, label := range item.Labels { @@ -79,6 +90,55 @@ func TestItemService_CsvImport(t *testing.T) { assert.Equal(t, csvRow.parsedSoldPrice(), item.SoldPrice) } } - } } + +func TestItemService_AddAttachment(t *testing.T) { + temp := os.TempDir() + + svc := &ItemService{ + repo: tRepos, + filepath: temp, + } + + loc, err := tSvc.Location.Create(context.Background(), tGroup.ID, types.LocationCreate{ + Description: "test", + Name: "test", + }) + assert.NoError(t, err) + assert.NotNil(t, loc) + + itmC := types.ItemCreate{ + Name: fk.Str(10), + Description: fk.Str(10), + LocationID: loc.ID, + } + + itm, err := svc.Create(context.Background(), tGroup.ID, itmC) + assert.NoError(t, err) + assert.NotNil(t, itm) + t.Cleanup(func() { + err := svc.repo.Items.Delete(context.Background(), itm.ID) + assert.NoError(t, err) + }) + + contents := fk.Str(1000) + reader := strings.NewReader(contents) + + // Setup + afterAttachment, err := svc.AddAttachment(context.Background(), tGroup.ID, itm.ID, "testfile.txt", reader) + assert.NoError(t, err) + assert.NotNil(t, afterAttachment) + + // Check that the file exists + storedPath := afterAttachment.Attachments[0].Document.Path + + // {root}/{group}/{item}/{attachment} + assert.Equal(t, path.Join(temp, tGroup.ID.String(), itm.ID.String(), "testfile.txt"), storedPath) + + // Check that the file contents are correct + bts, err := os.ReadFile(storedPath) + assert.NoError(t, err) + assert.Equal(t, contents, string(bts)) + +} diff --git a/backend/internal/types/document_types.go b/backend/internal/types/document_types.go new file mode 100644 index 0000000..c34a37a --- /dev/null +++ b/backend/internal/types/document_types.go @@ -0,0 +1,31 @@ +package types + +import ( + "time" + + "github.com/google/uuid" +) + +type DocumentOut struct { + ID uuid.UUID `json:"id"` + Title string `json:"title"` + Path string +} + +type DocumentCreate struct { + Title string `json:"name"` + Path string `json:"path"` +} + +type DocumentUpdate = DocumentCreate + +type DocumentToken struct { + Raw string `json:"raw"` + ExpiresAt time.Time `json:"expiresAt"` +} + +type DocumentTokenCreate struct { + TokenHash []byte `json:"tokenHash"` + DocumentID uuid.UUID `json:"documentId"` + ExpiresAt time.Time `json:"expiresAt"` +} diff --git a/backend/internal/types/item_types.go b/backend/internal/types/item_types.go index 38ca8d7..36bc8ff 100644 --- a/backend/internal/types/item_types.go +++ b/backend/internal/types/item_types.go @@ -19,6 +19,8 @@ type ItemUpdate struct { ID uuid.UUID `json:"id"` Name string `json:"name"` Description string `json:"description"` + Quantity int `json:"quantity"` + Insured bool `json:"insured"` // Edges LocationID uuid.UUID `json:"locationId"` @@ -37,12 +39,12 @@ type ItemUpdate struct { // Purchase PurchaseTime time.Time `json:"purchaseTime"` PurchaseFrom string `json:"purchaseFrom"` - PurchasePrice float64 `json:"purchasePrice"` + PurchasePrice float64 `json:"purchasePrice,string"` // Sold SoldTime time.Time `json:"soldTime"` SoldTo string `json:"soldTo"` - SoldPrice float64 `json:"soldPrice"` + SoldPrice float64 `json:"soldPrice,string"` SoldNotes string `json:"soldNotes"` // Extras @@ -56,6 +58,8 @@ type ItemSummary struct { Description string `json:"description"` CreatedAt time.Time `json:"createdAt"` UpdatedAt time.Time `json:"updatedAt"` + Quantity int `json:"quantity"` + Insured bool `json:"insured"` // Edges Location *LocationSummary `json:"location"` @@ -74,12 +78,12 @@ type ItemSummary struct { // Purchase PurchaseTime time.Time `json:"purchaseTime"` PurchaseFrom string `json:"purchaseFrom"` - PurchasePrice float64 `json:"purchasePrice"` + PurchasePrice float64 `json:"purchasePrice,string"` // Sold SoldTime time.Time `json:"soldTime"` SoldTo string `json:"soldTo"` - SoldPrice float64 `json:"soldPrice"` + SoldPrice float64 `json:"soldPrice,string"` SoldNotes string `json:"soldNotes"` // Extras @@ -88,6 +92,14 @@ type ItemSummary struct { type ItemOut struct { ItemSummary + Attachments []*ItemAttachment `json:"attachments"` // Future // Fields []*FieldSummary `json:"fields"` } + +type ItemAttachment struct { + ID uuid.UUID `json:"id"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + Document DocumentOut `json:"document"` +} diff --git a/backend/pkgs/faker/random.go b/backend/pkgs/faker/random.go index 42ef538..05428fa 100644 --- a/backend/pkgs/faker/random.go +++ b/backend/pkgs/faker/random.go @@ -15,7 +15,11 @@ func NewFaker() *Faker { return &Faker{} } -func (f *Faker) RandomString(length int) string { +func (f *Faker) Time() time.Time { + return time.Now().Add(time.Duration(f.Num(1, 100)) * time.Hour) +} + +func (f *Faker) Str(length int) string { b := make([]rune, length) for i := range b { @@ -24,14 +28,18 @@ func (f *Faker) RandomString(length int) string { return string(b) } -func (f *Faker) RandomEmail() string { - return f.RandomString(10) + "@email.com" +func (f *Faker) Path() string { + return "/" + f.Str(10) + "/" + f.Str(10) + "/" + f.Str(10) } -func (f *Faker) RandomBool() bool { +func (f *Faker) Email() string { + return f.Str(10) + "@email.com" +} + +func (f *Faker) Bool() bool { return rand.Intn(2) == 1 } -func (f *Faker) RandomNumber(min, max int) int { +func (f *Faker) Num(min, max int) int { return rand.Intn(max-min) + min } diff --git a/backend/pkgs/faker/randoms_test.go b/backend/pkgs/faker/randoms_test.go index 79747c2..0773205 100644 --- a/backend/pkgs/faker/randoms_test.go +++ b/backend/pkgs/faker/randoms_test.go @@ -25,7 +25,7 @@ func Test_GetRandomString(t *testing.T) { faker := NewFaker() for i := 0; i < Loops; i++ { - generated[i] = faker.RandomString(10) + generated[i] = faker.Str(10) } if !ValidateUnique(generated) { @@ -41,7 +41,7 @@ func Test_GetRandomEmail(t *testing.T) { faker := NewFaker() for i := 0; i < Loops; i++ { - generated[i] = faker.RandomEmail() + generated[i] = faker.Email() } if !ValidateUnique(generated) { @@ -58,7 +58,7 @@ func Test_GetRandomBool(t *testing.T) { faker := NewFaker() for i := 0; i < Loops; i++ { - if faker.RandomBool() { + if faker.Bool() { trues++ } else { falses++ @@ -81,7 +81,7 @@ func Test_RandomNumber(t *testing.T) { last := MIN - 1 for i := 0; i < Loops; i++ { - n := f.RandomNumber(MIN, MAX) + n := f.Num(MIN, MAX) if n == last { t.Errorf("RandomNumber() failed to generate unique number") diff --git a/backend/pkgs/server/request.go b/backend/pkgs/server/request.go index c4b30a4..ffb76d1 100644 --- a/backend/pkgs/server/request.go +++ b/backend/pkgs/server/request.go @@ -9,7 +9,7 @@ import ( // body is decoded into the provided value. func Decode(r *http.Request, val interface{}) error { decoder := json.NewDecoder(r.Body) - decoder.DisallowUnknownFields() + // decoder.DisallowUnknownFields() if err := decoder.Decode(val); err != nil { return err } diff --git a/backend/pkgs/server/response_error_builder_test.go b/backend/pkgs/server/response_error_builder_test.go index b556a18..40de141 100644 --- a/backend/pkgs/server/response_error_builder_test.go +++ b/backend/pkgs/server/response_error_builder_test.go @@ -49,7 +49,7 @@ func Test_ErrorBuilder_AddError(t *testing.T) { errorStrings := make([]string, 10) for i := 0; i < 10; i++ { - err := errors.New(f.RandomString(10)) + err := errors.New(f.Str(10)) randomError[i] = err errorStrings[i] = err.Error() } @@ -72,7 +72,7 @@ func Test_ErrorBuilder_Respond(t *testing.T) { randomError := make([]error, 5) for i := 0; i < 5; i++ { - err := errors.New(f.RandomString(5)) + err := errors.New(f.Str(5)) randomError[i] = err } diff --git a/docs/docs/assets/img/favicon.svg b/docs/docs/assets/img/favicon.svg new file mode 100644 index 0000000..08670bb --- /dev/null +++ b/docs/docs/assets/img/favicon.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/docs/docs/assets/img/lilbox.svg b/docs/docs/assets/img/lilbox.svg new file mode 100644 index 0000000..08670bb --- /dev/null +++ b/docs/docs/assets/img/lilbox.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/docs/docs/index.md b/docs/docs/index.md index ac19e2f..d95772c 100644 --- a/docs/docs/index.md +++ b/docs/docs/index.md @@ -1,4 +1,19 @@ -# Welcome to Homebox! +

+
+ +
+ Homebox +
+

+

+ Docs + | + Demo + | + Discord +

+ + Homebox is the inventory and organization system built for the Home User! With a focus on simplicity and ease of use, Homebox is the perfect solution for your home inventory, organization, and management needs. While developing this project I've tried to keep the following principles in mind: diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 0fc39ec..d26e480 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -1,5 +1,5 @@ site_name: Homebox -site_url: https://hay-kot.github.io/homebox/ +# site_url: https://hay-kot.github.io/homebox/ use_directory_urls: true theme: name: material @@ -22,9 +22,8 @@ theme: - navigation.expand - navigation.sections - navigation.tabs.sticky - favicon: assets/img/favicon.png - icon: - logo: material/package-variant + favicon: assets/img/favicon.svg + logo: assets/img/favicon.svg extra_css: - assets/stylesheets/extras.css diff --git a/frontend/.gitignore b/frontend/.gitignore deleted file mode 100644 index 438cb08..0000000 --- a/frontend/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -node_modules -*.log* -.nuxt -.nitro -.cache -.output -.env -dist diff --git a/frontend/README.md b/frontend/README.md deleted file mode 100644 index d90610e..0000000 --- a/frontend/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Nuxt 3 Minimal Starter - -Look at the [nuxt 3 documentation](https://v3.nuxtjs.org) to learn more. - -## Setup - -Make sure to install the dependencies: - -```bash -# yarn -yarn install - -# npm -npm install - -# pnpm -pnpm install --shamefully-hoist -``` - -## Development Server - -Start the development server on http://localhost:3000 - -```bash -npm run dev -``` - -## Production - -Build the application for production: - -```bash -npm run build -``` - -Locally preview production build: - -```bash -npm run preview -``` - -Checkout the [deployment documentation](https://v3.nuxtjs.org/guide/deploy/presets) for more information. diff --git a/frontend/components/App/Header.vue b/frontend/components/App/Header.vue index ca876df..678e1be 100644 --- a/frontend/components/App/Header.vue +++ b/frontend/components/App/Header.vue @@ -70,7 +70,7 @@

HomeB - + x

diff --git a/frontend/components/App/Logo.vue b/frontend/components/App/Logo.vue index 66e587a..72c0c4f 100644 --- a/frontend/components/App/Logo.vue +++ b/frontend/components/App/Logo.vue @@ -1,123 +1,47 @@ diff --git a/frontend/components/Base/Details.vue b/frontend/components/Base/Details.vue index 8206280..27d41f8 100644 --- a/frontend/components/Base/Details.vue +++ b/frontend/components/Base/Details.vue @@ -15,7 +15,7 @@ {{ dKey }}
- + {{ dValue }}
@@ -28,8 +28,13 @@ diff --git a/frontend/components/Form/DatePicker.vue b/frontend/components/Form/DatePicker.vue index 619dcd9..113ffe9 100644 --- a/frontend/components/Form/DatePicker.vue +++ b/frontend/components/Form/DatePicker.vue @@ -52,9 +52,14 @@ const selected = useVModel(props, "modelValue", emit); const dateText = computed(() => { + if (!validDate(selected.value)) { + return ""; + } + if (selected.value) { return selected.value.toLocaleDateString(); } + return ""; }); @@ -91,9 +96,7 @@ }); function select(e: MouseEvent, day: Date) { - console.log(day); selected.value = day; - console.log(selected.value); // @ts-ignore - this is a vue3 bug e.target.blur(); resetTime(); diff --git a/frontend/components/Form/Multiselect.vue b/frontend/components/Form/Multiselect.vue index cc6622a..bf0e0cc 100644 --- a/frontend/components/Form/Multiselect.vue +++ b/frontend/components/Form/Multiselect.vue @@ -17,7 +17,7 @@ v-for="(obj, idx) in items" :key="idx" :class="{ - bordered: selectedIndexes[idx], + bordered: selected[idx], }" >