diff --git a/LICENSE.txt b/LICENSE.txt index 145775438..211d32e75 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,663 +1,663 @@ - GNU AFFERO GENERAL PUBLIC LICENSE - Version 3, 19 November 2007 - - Copyright (c) 2023 AUTOMATIC1111 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU Affero General Public License is a free, copyleft license for -software and other kinds of works, specifically designed to ensure -cooperation with the community in the case of network server software. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -our General Public Licenses are intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - Developers that use our General Public Licenses protect your rights -with two steps: (1) assert copyright on the software, and (2) offer -you this License which gives you legal permission to copy, distribute -and/or modify the software. - - A secondary benefit of defending all users' freedom is that -improvements made in alternate versions of the program, if they -receive widespread use, become available for other developers to -incorporate. Many developers of free software are heartened and -encouraged by the resulting cooperation. However, in the case of -software used on network servers, this result may fail to come about. -The GNU General Public License permits making a modified version and -letting the public access it on a server without ever releasing its -source code to the public. - - The GNU Affero General Public License is designed specifically to -ensure that, in such cases, the modified source code becomes available -to the community. It requires the operator of a network server to -provide the source code of the modified version running there to the -users of that server. Therefore, public use of a modified version, on -a publicly accessible server, gives the public access to the source -code of the modified version. - - An older license, called the Affero General Public License and -published by Affero, was designed to accomplish similar goals. This is -a different license, not a version of the Affero GPL, but Affero has -released a new version of the Affero GPL which permits relicensing under -this license. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU Affero General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Remote Network Interaction; Use with the GNU General Public License. - - Notwithstanding any other provision of this License, if you modify the -Program, your modified version must prominently offer all users -interacting with it remotely through a computer network (if your version -supports such interaction) an opportunity to receive the Corresponding -Source of your version by providing access to the Corresponding Source -from a network server at no charge, through some standard or customary -means of facilitating copying of software. This Corresponding Source -shall include the Corresponding Source for any work covered by version 3 -of the GNU General Public License that is incorporated pursuant to the -following paragraph. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the work with which it is combined will remain governed by version -3 of the GNU General Public License. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU Affero General Public License from time to time. Such new versions -will be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU Affero General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU Affero General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU Affero General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If your software can interact with users remotely through a computer -network, you should also make sure that it provides a way for users to -get its source. For example, if your program is a web application, its -interface could display a "Source" link that leads users to an archive -of the code. There are many ways you could offer source, and different -solutions will be better for different programs; see section 13 for the -specific requirements. - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU AGPL, see -. + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (c) 2023 AUTOMATIC1111 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/README.md b/README.md index df2a01493..1b3679de2 100644 --- a/README.md +++ b/README.md @@ -1,210 +1,210 @@ -
- -# SD.Next - -**Stable Diffusion implementation with advanced features** - -[![Sponsors](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/vladmandic) -![Last Commit](https://img.shields.io/github/last-commit/vladmandic/automatic?svg=true) -![License](https://img.shields.io/github/license/vladmandic/automatic?svg=true) -[![Discord](https://img.shields.io/discord/1101998836328697867?logo=Discord&svg=true)](https://discord.gg/VjvR2tabEX) - -[Wiki](https://github.com/vladmandic/automatic/wiki) | [Discord](https://discord.gg/VjvR2tabEX) | [Changelog](CHANGELOG.md) - -
-
- -## Notable features - -All individual features are not listed here, instead check [ChangeLog](CHANGELOG.md) for full list of changes -- Multiple backends! - ▹ **Original | Diffusers** -- Multiple diffusion models! - ▹ **Stable Diffusion 1.5/2.1 | SD-XL | LCM | Segmind | Kandinsky | Pixart-α | Würstchen | aMUSEd | DeepFloyd IF | UniDiffusion | SD-Distilled | BLiP Diffusion | etc.** -- Built-in Control for Text, Image, Batch and video processing! - ▹ **ControlNet | ControlNet XS | Control LLLite | T2I Adapters | IP Adapters** -- Multiplatform! - ▹ **Windows | Linux | MacOS with CPU | nVidia | AMD | IntelArc | DirectML | OpenVINO | ONNX+Olive** -- Platform specific autodetection and tuning performed on install -- Optimized processing with latest `torch` developments with built-in support for `torch.compile` and multiple compile backends -- Improved prompt parser -- Enhanced *Lora*/*LoCon*/*Lyco* code supporting latest trends in training -- Built-in queue management -- Enterprise level logging and hardened API -- Modern localization and hints engine -- Broad compatibility with existing extensions ecosystem and new extensions manager -- Built in installer with automatic updates and dependency management -- Modernized UI with theme support and number of built-in themes *(dark and light)* - -
- -![Screenshot-Dark](html/xmas-default.jpg) -![Screenshot-Control](html/xmas-control.jpg) -![Screenshot-Light](html/light-teal.jpg) - -
- -## Backend support - -**SD.Next** supports two main backends: *Original* and *Diffusers*: - -- **Original**: Based on [LDM](https://github.com/Stability-AI/stablediffusion) reference implementation and significantly expanded on by [A1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) - This is the default backend and it is fully compatible with all existing functionality and extensions - Supports **SD 1.x** and **SD 2.x** models - All other model types such as *SD-XL, LCM, PixArt, Segmind, Kandinsky, etc.* require backend **Diffusers** -- **Diffusers**: Based on new [Huggingface Diffusers](https://huggingface.co/docs/diffusers/index) implementation - Supports *original* SD models as well as *all* models listed below - See [wiki article](https://github.com/vladmandic/automatic/wiki/Diffusers) for more information - -## Model support - -Additional models will be added as they become available and there is public interest in them - -- [RunwayML Stable Diffusion](https://github.com/Stability-AI/stablediffusion/) 1.x and 2.x *(all variants)* -- [StabilityAI Stable Diffusion XL](https://github.com/Stability-AI/generative-models) -- [StabilityAI Stable Video Diffusion](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid) Base and XT -- [LCM: Latent Consistency Models](https://github.com/openai/consistency_models) -- [aMUSEd 256](https://huggingface.co/amused/amused-256) 256 and 512 -- [Segmind Vega](https://huggingface.co/segmind/Segmind-Vega) -- [Segmind SSD-1B](https://huggingface.co/segmind/SSD-1B) -- [Kandinsky](https://github.com/ai-forever/Kandinsky-2) *2.1 and 2.2 and latest 3.0* -- [PixArt-α XL 2](https://github.com/PixArt-alpha/PixArt-alpha) *Medium and Large* -- [Warp Wuerstchen](https://huggingface.co/blog/wuertschen) -- [Playground](https://huggingface.co/playgroundai/playground-v2-256px-base) *v1, v2 256, v2 512, v2 1024* -- [Tsinghua UniDiffusion](https://github.com/thu-ml/unidiffuser) -- [DeepFloyd IF](https://github.com/deep-floyd/IF) *Medium and Large* -- [ModelScope T2V](https://huggingface.co/damo-vilab/text-to-video-ms-1.7b) -- [Segmind SD Distilled](https://huggingface.co/blog/sd_distillation) *(all variants)* -- [BLIP-Diffusion](https://dxli94.github.io/BLIP-Diffusion-website/) - - -Also supported are modifiers such as: -- **LCM** and **Turbo** (Adversarial Diffusion Distillation) networks -- All **LoRA** types such as LoCon, LyCORIS, HADA, IA3, Lokr, OFT -- **AnimateDiff** for SD 1.5 -- **IP-Adapters** for SD 1.5 and SD-XL - -> [!IMPORTANT] -> - Loading any model other than standard SD 1.x / SD 2.x requires use of backend **Diffusers** -> - Loading any other models using **Original** backend is not supported -> - Loading manually download model `.safetensors` files is supported for SD 1.x / SD 2.x / SD-XL models only -> - For all other model types, use backend **Diffusers** and use built in Model downloader or - select model from Networks -> Models -> Reference list in which case it will be auto-downloaded and loaded - -## Platform support - -- *nVidia* GPUs using **CUDA** libraries on both *Windows and Linux* -- *AMD* GPUs using **ROCm** libraries on *Linux* - Support will be extended to *Windows* once AMD releases ROCm for Windows -- *Intel Arc* GPUs using **OneAPI** with *IPEX XPU* libraries on both *Windows and Linux* -- Any GPU compatible with *DirectX* on *Windows* using **DirectML** libraries - This includes support for AMD GPUs that are not supported by native ROCm libraries -- Any GPU or device compatible with **OpenVINO** libraries on both *Windows and Linux* -- *Apple M1/M2* on *OSX* using built-in support in Torch with **MPS** optimizations -- *ONNX/Olive* (experimental) - -## Install - -- [Step-by-step install guide](https://github.com/vladmandic/automatic/wiki/Installation) -- [Advanced install notes](https://github.com/vladmandic/automatic/wiki/Advanced-Install) -- [Common installation errors](https://github.com/vladmandic/automatic/discussions/1627) -- [FAQ](https://github.com/vladmandic/automatic/discussions/1011) -- If you can't run us locally, try our friends at [RunDuffusion!](https://rundiffusion.com?utm_source=github&utm_medium=referral&utm_campaign=SDNext) - -> [!TIP] -> - Server can run without virtual environment, - Recommended to use `VENV` to avoid library version conflicts with other applications -> - **nVidia/CUDA** / **AMD/ROCm** / **Intel/OneAPI** are auto-detected if present and available, - For any other use case such as **DirectML**, **ONNX/Olive**, **OpenVINO** specify required parameter explicitly - or wrong packages may be installed as installer will assume CPU-only environment -> - Full startup sequence is logged in `sdnext.log`, - so if you encounter any issues, please check it first - -### Run - -Once SD.Next is installed, simply run `webui.ps1` or `webui.bat` (*Windows*) or `webui.sh` (*Linux or MacOS*) - -Below is partial list of all available parameters, run `webui --help` for the full list: - - Server options: - --config CONFIG Use specific server configuration file, default: config.json - --ui-config UI_CONFIG Use specific UI configuration file, default: ui-config.json - --medvram Split model stages and keep only active part in VRAM, default: False - --lowvram Split model components and keep only active part in VRAM, default: False - --ckpt CKPT Path to model checkpoint to load immediately, default: None - --vae VAE Path to VAE checkpoint to load immediately, default: None - --data-dir DATA_DIR Base path where all user data is stored, default: - --models-dir MODELS_DIR Base path where all models are stored, default: models - --share Enable UI accessible through Gradio site, default: False - --insecure Enable extensions tab regardless of other options, default: False - --listen Launch web server using public IP address, default: False - --auth AUTH Set access authentication like "user:pwd,user:pwd"" - --autolaunch Open the UI URL in the system's default browser upon launch - --docs Mount Gradio docs at /docs, default: False - --no-hashing Disable hashing of checkpoints, default: False - --no-metadata Disable reading of metadata from models, default: False - --no-download Disable download of default model, default: False - --backend {original,diffusers} force model pipeline type - - Setup options: - --debug Run installer with debug logging, default: False - --reset Reset main repository to latest version, default: False - --upgrade Upgrade main repository to latest version, default: False - --requirements Force re-check of requirements, default: False - --quick Run with startup sequence only, default: False - --use-directml Use DirectML if no compatible GPU is detected, default: False - --use-openvino Use Intel OpenVINO backend, default: False - --use-ipex Force use Intel OneAPI XPU backend, default: False - --use-cuda Force use nVidia CUDA backend, default: False - --use-rocm Force use AMD ROCm backend, default: False - --use-xformers Force use xFormers cross-optimization, default: False - --skip-requirements Skips checking and installing requirements, default: False - --skip-extensions Skips running individual extension installers, default: False - --skip-git Skips running all GIT operations, default: False - --skip-torch Skips running Torch checks, default: False - --skip-all Skips running all checks, default: False - --experimental Allow unsupported versions of libraries, default: False - --reinstall Force reinstallation of all requirements, default: False - --safe Run in safe mode with no user extensions - - -## Notes - -### **Extensions** - -SD.Next comes with several extensions pre-installed: - -- [ControlNet](https://github.com/Mikubill/sd-webui-controlnet) -- [Agent Scheduler](https://github.com/ArtVentureX/sd-webui-agent-scheduler) -- [Image Browser](https://github.com/AlUlkesh/stable-diffusion-webui-images-browser) - -### **Collab** - -- We'd love to have additional maintainers with full admin rights. If you're interested, ping us! -- In addition to general cross-platform code, desire is to have a lead for each of the main platforms. -This should be fully cross-platform, but we'd really love to have additional contributors and/or maintainers to join and help lead the efforts on different platforms. - -## Credits - -- Main credit goes to [Automatic1111 WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) -- Additional credits are listed in [Credits](https://github.com/AUTOMATIC1111/stable-diffusion-webui/#credits) -- Licenses for modules are listed in [Licenses](html/licenses.html) - -### **Docs** - -If you're unsure how to use a feature, best place to start is [Wiki](https://github.com/vladmandic/automatic/wiki) and if its not there, -check [ChangeLog](CHANGELOG.md) for when feature was first introduced as it will always have a short note on how to use it - -- [Wiki](https://github.com/vladmandic/automatic/wiki) -- [ReadMe](README.md) -- [ToDo](TODO.md) -- [ChangeLog](CHANGELOG.md) -- [CLI Tools](cli/README.md) - -### **Sponsors** - -
-Allan GrantBrent OzarMatthew RunoHELLO WORLD SASSalad Technologiesa.v.mantzarisToby Worth -
- -
+
+ +# SD.Next + +**Stable Diffusion implementation with advanced features** + +[![Sponsors](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/vladmandic) +![Last Commit](https://img.shields.io/github/last-commit/vladmandic/automatic?svg=true) +![License](https://img.shields.io/github/license/vladmandic/automatic?svg=true) +[![Discord](https://img.shields.io/discord/1101998836328697867?logo=Discord&svg=true)](https://discord.gg/VjvR2tabEX) + +[Wiki](https://github.com/vladmandic/automatic/wiki) | [Discord](https://discord.gg/VjvR2tabEX) | [Changelog](CHANGELOG.md) + +
+
+ +## Notable features + +All individual features are not listed here, instead check [ChangeLog](CHANGELOG.md) for full list of changes +- Multiple backends! + ▹ **Original | Diffusers** +- Multiple diffusion models! + ▹ **Stable Diffusion 1.5/2.1 | SD-XL | LCM | Segmind | Kandinsky | Pixart-α | Würstchen | aMUSEd | DeepFloyd IF | UniDiffusion | SD-Distilled | BLiP Diffusion | etc.** +- Built-in Control for Text, Image, Batch and video processing! + ▹ **ControlNet | ControlNet XS | Control LLLite | T2I Adapters | IP Adapters** +- Multiplatform! + ▹ **Windows | Linux | MacOS with CPU | nVidia | AMD | IntelArc | DirectML | OpenVINO | ONNX+Olive** +- Platform specific autodetection and tuning performed on install +- Optimized processing with latest `torch` developments with built-in support for `torch.compile` and multiple compile backends +- Improved prompt parser +- Enhanced *Lora*/*LoCon*/*Lyco* code supporting latest trends in training +- Built-in queue management +- Enterprise level logging and hardened API +- Modern localization and hints engine +- Broad compatibility with existing extensions ecosystem and new extensions manager +- Built in installer with automatic updates and dependency management +- Modernized UI with theme support and number of built-in themes *(dark and light)* + +
+ +![Screenshot-Dark](html/xmas-default.jpg) +![Screenshot-Control](html/xmas-control.jpg) +![Screenshot-Light](html/light-teal.jpg) + +
+ +## Backend support + +**SD.Next** supports two main backends: *Original* and *Diffusers*: + +- **Original**: Based on [LDM](https://github.com/Stability-AI/stablediffusion) reference implementation and significantly expanded on by [A1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) + This is the default backend and it is fully compatible with all existing functionality and extensions + Supports **SD 1.x** and **SD 2.x** models + All other model types such as *SD-XL, LCM, PixArt, Segmind, Kandinsky, etc.* require backend **Diffusers** +- **Diffusers**: Based on new [Huggingface Diffusers](https://huggingface.co/docs/diffusers/index) implementation + Supports *original* SD models as well as *all* models listed below + See [wiki article](https://github.com/vladmandic/automatic/wiki/Diffusers) for more information + +## Model support + +Additional models will be added as they become available and there is public interest in them + +- [RunwayML Stable Diffusion](https://github.com/Stability-AI/stablediffusion/) 1.x and 2.x *(all variants)* +- [StabilityAI Stable Diffusion XL](https://github.com/Stability-AI/generative-models) +- [StabilityAI Stable Video Diffusion](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid) Base and XT +- [LCM: Latent Consistency Models](https://github.com/openai/consistency_models) +- [aMUSEd 256](https://huggingface.co/amused/amused-256) 256 and 512 +- [Segmind Vega](https://huggingface.co/segmind/Segmind-Vega) +- [Segmind SSD-1B](https://huggingface.co/segmind/SSD-1B) +- [Kandinsky](https://github.com/ai-forever/Kandinsky-2) *2.1 and 2.2 and latest 3.0* +- [PixArt-α XL 2](https://github.com/PixArt-alpha/PixArt-alpha) *Medium and Large* +- [Warp Wuerstchen](https://huggingface.co/blog/wuertschen) +- [Playground](https://huggingface.co/playgroundai/playground-v2-256px-base) *v1, v2 256, v2 512, v2 1024* +- [Tsinghua UniDiffusion](https://github.com/thu-ml/unidiffuser) +- [DeepFloyd IF](https://github.com/deep-floyd/IF) *Medium and Large* +- [ModelScope T2V](https://huggingface.co/damo-vilab/text-to-video-ms-1.7b) +- [Segmind SD Distilled](https://huggingface.co/blog/sd_distillation) *(all variants)* +- [BLIP-Diffusion](https://dxli94.github.io/BLIP-Diffusion-website/) + + +Also supported are modifiers such as: +- **LCM** and **Turbo** (Adversarial Diffusion Distillation) networks +- All **LoRA** types such as LoCon, LyCORIS, HADA, IA3, Lokr, OFT +- **AnimateDiff** for SD 1.5 +- **IP-Adapters** for SD 1.5 and SD-XL + +> [!IMPORTANT] +> - Loading any model other than standard SD 1.x / SD 2.x requires use of backend **Diffusers** +> - Loading any other models using **Original** backend is not supported +> - Loading manually download model `.safetensors` files is supported for SD 1.x / SD 2.x / SD-XL models only +> - For all other model types, use backend **Diffusers** and use built in Model downloader or + select model from Networks -> Models -> Reference list in which case it will be auto-downloaded and loaded + +## Platform support + +- *nVidia* GPUs using **CUDA** libraries on both *Windows and Linux* +- *AMD* GPUs using **ROCm** libraries on *Linux* + Support will be extended to *Windows* once AMD releases ROCm for Windows +- *Intel Arc* GPUs using **OneAPI** with *IPEX XPU* libraries on both *Windows and Linux* +- Any GPU compatible with *DirectX* on *Windows* using **DirectML** libraries + This includes support for AMD GPUs that are not supported by native ROCm libraries +- Any GPU or device compatible with **OpenVINO** libraries on both *Windows and Linux* +- *Apple M1/M2* on *OSX* using built-in support in Torch with **MPS** optimizations +- *ONNX/Olive* (experimental) + +## Install + +- [Step-by-step install guide](https://github.com/vladmandic/automatic/wiki/Installation) +- [Advanced install notes](https://github.com/vladmandic/automatic/wiki/Advanced-Install) +- [Common installation errors](https://github.com/vladmandic/automatic/discussions/1627) +- [FAQ](https://github.com/vladmandic/automatic/discussions/1011) +- If you can't run us locally, try our friends at [RunDuffusion!](https://rundiffusion.com?utm_source=github&utm_medium=referral&utm_campaign=SDNext) + +> [!TIP] +> - Server can run without virtual environment, + Recommended to use `VENV` to avoid library version conflicts with other applications +> - **nVidia/CUDA** / **AMD/ROCm** / **Intel/OneAPI** are auto-detected if present and available, + For any other use case such as **DirectML**, **ONNX/Olive**, **OpenVINO** specify required parameter explicitly + or wrong packages may be installed as installer will assume CPU-only environment +> - Full startup sequence is logged in `sdnext.log`, + so if you encounter any issues, please check it first + +### Run + +Once SD.Next is installed, simply run `webui.ps1` or `webui.bat` (*Windows*) or `webui.sh` (*Linux or MacOS*) + +Below is partial list of all available parameters, run `webui --help` for the full list: + + Server options: + --config CONFIG Use specific server configuration file, default: config.json + --ui-config UI_CONFIG Use specific UI configuration file, default: ui-config.json + --medvram Split model stages and keep only active part in VRAM, default: False + --lowvram Split model components and keep only active part in VRAM, default: False + --ckpt CKPT Path to model checkpoint to load immediately, default: None + --vae VAE Path to VAE checkpoint to load immediately, default: None + --data-dir DATA_DIR Base path where all user data is stored, default: + --models-dir MODELS_DIR Base path where all models are stored, default: models + --share Enable UI accessible through Gradio site, default: False + --insecure Enable extensions tab regardless of other options, default: False + --listen Launch web server using public IP address, default: False + --auth AUTH Set access authentication like "user:pwd,user:pwd"" + --autolaunch Open the UI URL in the system's default browser upon launch + --docs Mount Gradio docs at /docs, default: False + --no-hashing Disable hashing of checkpoints, default: False + --no-metadata Disable reading of metadata from models, default: False + --no-download Disable download of default model, default: False + --backend {original,diffusers} force model pipeline type + + Setup options: + --debug Run installer with debug logging, default: False + --reset Reset main repository to latest version, default: False + --upgrade Upgrade main repository to latest version, default: False + --requirements Force re-check of requirements, default: False + --quick Run with startup sequence only, default: False + --use-directml Use DirectML if no compatible GPU is detected, default: False + --use-openvino Use Intel OpenVINO backend, default: False + --use-ipex Force use Intel OneAPI XPU backend, default: False + --use-cuda Force use nVidia CUDA backend, default: False + --use-rocm Force use AMD ROCm backend, default: False + --use-xformers Force use xFormers cross-optimization, default: False + --skip-requirements Skips checking and installing requirements, default: False + --skip-extensions Skips running individual extension installers, default: False + --skip-git Skips running all GIT operations, default: False + --skip-torch Skips running Torch checks, default: False + --skip-all Skips running all checks, default: False + --experimental Allow unsupported versions of libraries, default: False + --reinstall Force reinstallation of all requirements, default: False + --safe Run in safe mode with no user extensions + + +## Notes + +### **Extensions** + +SD.Next comes with several extensions pre-installed: + +- [ControlNet](https://github.com/Mikubill/sd-webui-controlnet) +- [Agent Scheduler](https://github.com/ArtVentureX/sd-webui-agent-scheduler) +- [Image Browser](https://github.com/AlUlkesh/stable-diffusion-webui-images-browser) + +### **Collab** + +- We'd love to have additional maintainers with full admin rights. If you're interested, ping us! +- In addition to general cross-platform code, desire is to have a lead for each of the main platforms. +This should be fully cross-platform, but we'd really love to have additional contributors and/or maintainers to join and help lead the efforts on different platforms. + +## Credits + +- Main credit goes to [Automatic1111 WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) +- Additional credits are listed in [Credits](https://github.com/AUTOMATIC1111/stable-diffusion-webui/#credits) +- Licenses for modules are listed in [Licenses](html/licenses.html) + +### **Docs** + +If you're unsure how to use a feature, best place to start is [Wiki](https://github.com/vladmandic/automatic/wiki) and if its not there, +check [ChangeLog](CHANGELOG.md) for when feature was first introduced as it will always have a short note on how to use it + +- [Wiki](https://github.com/vladmandic/automatic/wiki) +- [ReadMe](README.md) +- [ToDo](TODO.md) +- [ChangeLog](CHANGELOG.md) +- [CLI Tools](cli/README.md) + +### **Sponsors** + +
+Allan GrantBrent OzarMatthew RunoHELLO WORLD SASSalad Technologiesa.v.mantzarisToby Worth +
+ +
diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py index 6cdfd03a8..4ed4c4776 100644 --- a/extensions-builtin/Lora/extra_networks_lora.py +++ b/extensions-builtin/Lora/extra_networks_lora.py @@ -1,85 +1,85 @@ -import time -import networks -import lora_patches -from modules import extra_networks, shared - - -class ExtraNetworkLora(extra_networks.ExtraNetwork): - - def __init__(self): - super().__init__('lora') - self.active = False - self.errors = {} - networks.originals = lora_patches.LoraPatches() - - """mapping of network names to the number of errors the network had during operation""" - - def activate(self, p, params_list): - t0 = time.time() - additional = shared.opts.sd_lora - self.errors.clear() - if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional): - p.all_prompts = [x + f"" for x in p.all_prompts] - params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) - if len(params_list) > 0: - self.active = True - networks.originals.apply() # apply patches - if networks.debug: - shared.log.debug("LoRA activate") - names = [] - te_multipliers = [] - unet_multipliers = [] - dyn_dims = [] - for params in params_list: - assert params.items - names.append(params.positional[0]) - te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0 - te_multiplier = float(params.named.get("te", te_multiplier)) - unet_multiplier = [float(params.positional[2]) if len(params.positional) > 2 else te_multiplier] * 3 - unet_multiplier = [float(params.named.get("unet", unet_multiplier[0]))] * 3 - unet_multiplier[0] = float(params.named.get("in", unet_multiplier[0])) - unet_multiplier[1] = float(params.named.get("mid", unet_multiplier[1])) - unet_multiplier[2] = float(params.named.get("out", unet_multiplier[2])) - dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None - dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim - te_multipliers.append(te_multiplier) - unet_multipliers.append(unet_multiplier) - dyn_dims.append(dyn_dim) - t1 = time.time() - networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims) - t2 = time.time() - if shared.opts.lora_add_hashes_to_infotext: - network_hashes = [] - for item in networks.loaded_networks: - shorthash = item.network_on_disk.shorthash - if not shorthash: - continue - alias = item.mentioned_name - if not alias: - continue - alias = alias.replace(":", "").replace(",", "") - network_hashes.append(f"{alias}: {shorthash}") - if network_hashes: - p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes) - if len(names) > 0: - shared.log.info(f'LoRA apply: {names} patch={t1-t0:.2f} load={t2-t1:.2f}') - elif self.active: - self.active = False - - def deactivate(self, p): - if shared.backend == shared.Backend.DIFFUSERS and hasattr(shared.sd_model, "unload_lora_weights") and hasattr(shared.sd_model, "text_encoder"): - if 'CLIP' in shared.sd_model.text_encoder.__class__.__name__ and not (shared.opts.cuda_compile and shared.opts.cuda_compile_backend == "openvino_fx"): - if shared.opts.lora_fuse_diffusers: - shared.sd_model.unfuse_lora() - shared.sd_model.unload_lora_weights() - if not self.active and getattr(networks, "originals", None ) is not None: - networks.originals.undo() # remove patches - if networks.debug: - shared.log.debug("LoRA deactivate") - if self.active and networks.debug: - shared.log.debug(f"LoRA end: load={networks.timer['load']:.2f} apply={networks.timer['apply']:.2f} restore={networks.timer['restore']:.2f}") - if self.errors: - p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items())) - for k, v in self.errors.items(): - shared.log.error(f'LoRA errors: file="{k}" errors={v}') - self.errors.clear() +import time +import networks +import lora_patches +from modules import extra_networks, shared + + +class ExtraNetworkLora(extra_networks.ExtraNetwork): + + def __init__(self): + super().__init__('lora') + self.active = False + self.errors = {} + networks.originals = lora_patches.LoraPatches() + + """mapping of network names to the number of errors the network had during operation""" + + def activate(self, p, params_list): + t0 = time.time() + additional = shared.opts.sd_lora + self.errors.clear() + if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional): + p.all_prompts = [x + f"" for x in p.all_prompts] + params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) + if len(params_list) > 0: + self.active = True + networks.originals.apply() # apply patches + if networks.debug: + shared.log.debug("LoRA activate") + names = [] + te_multipliers = [] + unet_multipliers = [] + dyn_dims = [] + for params in params_list: + assert params.items + names.append(params.positional[0]) + te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0 + te_multiplier = float(params.named.get("te", te_multiplier)) + unet_multiplier = [float(params.positional[2]) if len(params.positional) > 2 else te_multiplier] * 3 + unet_multiplier = [float(params.named.get("unet", unet_multiplier[0]))] * 3 + unet_multiplier[0] = float(params.named.get("in", unet_multiplier[0])) + unet_multiplier[1] = float(params.named.get("mid", unet_multiplier[1])) + unet_multiplier[2] = float(params.named.get("out", unet_multiplier[2])) + dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None + dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim + te_multipliers.append(te_multiplier) + unet_multipliers.append(unet_multiplier) + dyn_dims.append(dyn_dim) + t1 = time.time() + networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims) + t2 = time.time() + if shared.opts.lora_add_hashes_to_infotext: + network_hashes = [] + for item in networks.loaded_networks: + shorthash = item.network_on_disk.shorthash + if not shorthash: + continue + alias = item.mentioned_name + if not alias: + continue + alias = alias.replace(":", "").replace(",", "") + network_hashes.append(f"{alias}: {shorthash}") + if network_hashes: + p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes) + if len(names) > 0: + shared.log.info(f'LoRA apply: {names} patch={t1-t0:.2f} load={t2-t1:.2f}') + elif self.active: + self.active = False + + def deactivate(self, p): + if shared.backend == shared.Backend.DIFFUSERS and hasattr(shared.sd_model, "unload_lora_weights") and hasattr(shared.sd_model, "text_encoder"): + if 'CLIP' in shared.sd_model.text_encoder.__class__.__name__ and not (shared.opts.cuda_compile and shared.opts.cuda_compile_backend == "openvino_fx"): + if shared.opts.lora_fuse_diffusers: + shared.sd_model.unfuse_lora() + shared.sd_model.unload_lora_weights() + if not self.active and getattr(networks, "originals", None ) is not None: + networks.originals.undo() # remove patches + if networks.debug: + shared.log.debug("LoRA deactivate") + if self.active and networks.debug: + shared.log.debug(f"LoRA end: load={networks.timer['load']:.2f} apply={networks.timer['apply']:.2f} restore={networks.timer['restore']:.2f}") + if self.errors: + p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items())) + for k, v in self.errors.items(): + shared.log.error(f'LoRA errors: file="{k}" errors={v}') + self.errors.clear() diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 742be4c61..45cc5c9df 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -1,8 +1,8 @@ -import networks - -list_available_loras = networks.list_available_networks -available_loras = networks.available_networks -available_lora_aliases = networks.available_network_aliases -available_lora_hash_lookup = networks.available_network_hash_lookup -forbidden_lora_aliases = networks.forbidden_network_aliases -loaded_loras = networks.loaded_networks +import networks + +list_available_loras = networks.list_available_networks +available_loras = networks.available_networks +available_lora_aliases = networks.available_network_aliases +available_lora_hash_lookup = networks.available_network_hash_lookup +forbidden_lora_aliases = networks.forbidden_network_aliases +loaded_loras = networks.loaded_networks diff --git a/extensions-builtin/Lora/lora_patches.py b/extensions-builtin/Lora/lora_patches.py index 9ea6a10c3..ca7c56e2c 100644 --- a/extensions-builtin/Lora/lora_patches.py +++ b/extensions-builtin/Lora/lora_patches.py @@ -1,52 +1,52 @@ -import torch -import networks -from modules import patches, shared - - -class LoraPatches: - def __init__(self): - self.active = False - self.Linear_forward = None - self.Linear_load_state_dict = None - self.Conv2d_forward = None - self.Conv2d_load_state_dict = None - self.GroupNorm_forward = None - self.GroupNorm_load_state_dict = None - self.LayerNorm_forward = None - self.LayerNorm_load_state_dict = None - self.MultiheadAttention_forward = None - self.MultiheadAttention_load_state_dict = None - - def apply(self): - if self.active or shared.opts.lora_force_diffusers: - return - self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward) - self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict) - self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward) - self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict) - self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward) - self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict) - self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward) - self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict) - self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward) - self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict) - networks.timer['load'] = 0 - networks.timer['apply'] = 0 - networks.timer['restore'] = 0 - self.active = True - - def undo(self): - if not self.active or shared.opts.lora_force_diffusers: - return - self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward') # pylint: disable=E1128 - self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict') # pylint: disable=E1128 - self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward') # pylint: disable=E1128 - self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict') # pylint: disable=E1128 - self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward') # pylint: disable=E1128 - self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict') # pylint: disable=E1128 - self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward') # pylint: disable=E1128 - self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict') # pylint: disable=E1128 - self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward') # pylint: disable=E1128 - self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict') # pylint: disable=E1128 - patches.originals.pop(__name__, None) - self.active = False +import torch +import networks +from modules import patches, shared + + +class LoraPatches: + def __init__(self): + self.active = False + self.Linear_forward = None + self.Linear_load_state_dict = None + self.Conv2d_forward = None + self.Conv2d_load_state_dict = None + self.GroupNorm_forward = None + self.GroupNorm_load_state_dict = None + self.LayerNorm_forward = None + self.LayerNorm_load_state_dict = None + self.MultiheadAttention_forward = None + self.MultiheadAttention_load_state_dict = None + + def apply(self): + if self.active or shared.opts.lora_force_diffusers: + return + self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward) + self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict) + self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward) + self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict) + self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward) + self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict) + self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward) + self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict) + self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward) + self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict) + networks.timer['load'] = 0 + networks.timer['apply'] = 0 + networks.timer['restore'] = 0 + self.active = True + + def undo(self): + if not self.active or shared.opts.lora_force_diffusers: + return + self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward') # pylint: disable=E1128 + self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict') # pylint: disable=E1128 + self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward') # pylint: disable=E1128 + self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict') # pylint: disable=E1128 + self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward') # pylint: disable=E1128 + self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict') # pylint: disable=E1128 + self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward') # pylint: disable=E1128 + self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict') # pylint: disable=E1128 + self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward') # pylint: disable=E1128 + self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict') # pylint: disable=E1128 + patches.originals.pop(__name__, None) + self.active = False diff --git a/extensions-builtin/Lora/lyco_helpers.py b/extensions-builtin/Lora/lyco_helpers.py index 1679a0ce6..e0e1afa78 100644 --- a/extensions-builtin/Lora/lyco_helpers.py +++ b/extensions-builtin/Lora/lyco_helpers.py @@ -1,68 +1,68 @@ -import torch - - -def make_weight_cp(t, wa, wb): - temp = torch.einsum('i j k l, j r -> i r k l', t, wb) - return torch.einsum('i j k l, i r -> r j k l', temp, wa) - - -def rebuild_conventional(up, down, shape, dyn_dim=None): - up = up.reshape(up.size(0), -1) - down = down.reshape(down.size(0), -1) - if dyn_dim is not None: - up = up[:, :dyn_dim] - down = down[:dyn_dim, :] - return (up @ down).reshape(shape) - - -def rebuild_cp_decomposition(up, down, mid): - up = up.reshape(up.size(0), -1) - down = down.reshape(down.size(0), -1) - return torch.einsum('n m k l, i n, m j -> i j k l', mid, up, down) - - -# copied from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/lokr.py -def factorization(dimension: int, factor:int=-1) -> tuple[int, int]: - ''' - return a tuple of two value of input dimension decomposed by the number closest to factor - second value is higher or equal than first value. - - In LoRA with Kroneckor Product, first value is a value for weight scale. - secon value is a value for weight. - - Becuase of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different. - - examples) - factor - -1 2 4 8 16 ... - 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 - 128 -> 8, 16 128 -> 2, 64 128 -> 4, 32 128 -> 8, 16 128 -> 8, 16 - 250 -> 10, 25 250 -> 2, 125 250 -> 2, 125 250 -> 5, 50 250 -> 10, 25 - 360 -> 8, 45 360 -> 2, 180 360 -> 4, 90 360 -> 8, 45 360 -> 12, 30 - 512 -> 16, 32 512 -> 2, 256 512 -> 4, 128 512 -> 8, 64 512 -> 16, 32 - 1024 -> 32, 32 1024 -> 2, 512 1024 -> 4, 256 1024 -> 8, 128 1024 -> 16, 64 - ''' - - if factor > 0 and (dimension % factor) == 0: - m = factor - n = dimension // factor - if m > n: - n, m = m, n - return m, n - if factor < 0: - factor = dimension - m, n = 1, dimension - length = m + n - while m length or new_m>factor: - break - else: - m, n = new_m, new_n - if m > n: - n, m = m, n - return m, n - +import torch + + +def make_weight_cp(t, wa, wb): + temp = torch.einsum('i j k l, j r -> i r k l', t, wb) + return torch.einsum('i j k l, i r -> r j k l', temp, wa) + + +def rebuild_conventional(up, down, shape, dyn_dim=None): + up = up.reshape(up.size(0), -1) + down = down.reshape(down.size(0), -1) + if dyn_dim is not None: + up = up[:, :dyn_dim] + down = down[:dyn_dim, :] + return (up @ down).reshape(shape) + + +def rebuild_cp_decomposition(up, down, mid): + up = up.reshape(up.size(0), -1) + down = down.reshape(down.size(0), -1) + return torch.einsum('n m k l, i n, m j -> i j k l', mid, up, down) + + +# copied from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/lokr.py +def factorization(dimension: int, factor:int=-1) -> tuple[int, int]: + ''' + return a tuple of two value of input dimension decomposed by the number closest to factor + second value is higher or equal than first value. + + In LoRA with Kroneckor Product, first value is a value for weight scale. + secon value is a value for weight. + + Becuase of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different. + + examples) + factor + -1 2 4 8 16 ... + 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 + 128 -> 8, 16 128 -> 2, 64 128 -> 4, 32 128 -> 8, 16 128 -> 8, 16 + 250 -> 10, 25 250 -> 2, 125 250 -> 2, 125 250 -> 5, 50 250 -> 10, 25 + 360 -> 8, 45 360 -> 2, 180 360 -> 4, 90 360 -> 8, 45 360 -> 12, 30 + 512 -> 16, 32 512 -> 2, 256 512 -> 4, 128 512 -> 8, 64 512 -> 16, 32 + 1024 -> 32, 32 1024 -> 2, 512 1024 -> 4, 256 1024 -> 8, 128 1024 -> 16, 64 + ''' + + if factor > 0 and (dimension % factor) == 0: + m = factor + n = dimension // factor + if m > n: + n, m = m, n + return m, n + if factor < 0: + factor = dimension + m, n = 1, dimension + length = m + n + while m length or new_m>factor: + break + else: + m, n = new_m, new_n + if m > n: + n, m = m, n + return m, n + diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py index ea22e9c3e..d0f3ebb7a 100644 --- a/extensions-builtin/Lora/network.py +++ b/extensions-builtin/Lora/network.py @@ -1,129 +1,129 @@ -from __future__ import annotations -import os -from collections import namedtuple -import enum - -from modules import sd_models, hashes, shared - -NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module']) - -metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20} - - -class SdVersion(enum.Enum): - Unknown = 1 - SD1 = 2 - SD2 = 3 - SDXL = 4 - - -class NetworkOnDisk: - def __init__(self, name, filename): - self.name = name - self.filename = filename - self.metadata = {} - self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors" - - if self.is_safetensors: - self.metadata = sd_models.read_metadata_from_safetensors(filename) - if self.metadata: - m = {} - for k, v in sorted(self.metadata.items(), key=lambda x: metadata_tags_order.get(x[0], 999)): - m[k] = v - self.metadata = m - self.alias = self.metadata.get('ss_output_name', self.name) - self.hash = None - self.shorthash = None - self.set_hash(self.metadata.get('sshs_model_hash') or hashes.sha256_from_cache(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '') - self.sd_version = self.detect_version() - - def detect_version(self): - if str(self.metadata.get('ss_base_model_version', "")).startswith("sdxl_"): - return SdVersion.SDXL - elif str(self.metadata.get('ss_v2', "")) == "True": - return SdVersion.SD2 - elif len(self.metadata): - return SdVersion.SD1 - return SdVersion.Unknown - - def set_hash(self, v): - self.hash = v - self.shorthash = self.hash[0:12] - - def read_hash(self): - if not self.hash: - self.set_hash(hashes.sha256(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '') - - def get_alias(self): - import networks - return self.name if shared.opts.lora_preferred_name == "filename" or self.alias.lower() in networks.forbidden_network_aliases else self.alias - - -class Network: # LoraModule - def __init__(self, name, network_on_disk: NetworkOnDisk): - self.name = name - self.network_on_disk = network_on_disk - self.te_multiplier = 1.0 - self.unet_multiplier = [1.0] * 3 - self.dyn_dim = None - self.modules = {} - self.mtime = None - self.mentioned_name = None - """the text that was used to add the network to prompt - can be either name or an alias""" - - -class ModuleType: - def create_module(self, net: Network, weights: NetworkWeights) -> Network | None: # pylint: disable=W0613 - return None - - -class NetworkModule: - def __init__(self, net: Network, weights: NetworkWeights): - self.network = net - self.network_key = weights.network_key - self.sd_key = weights.sd_key - self.sd_module = weights.sd_module - if hasattr(self.sd_module, 'weight'): - self.shape = self.sd_module.weight.shape - self.dim = None - self.bias = weights.w.get("bias") - self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None - self.scale = weights.w["scale"].item() if "scale" in weights.w else None - - def multiplier(self): - if 'transformer' in self.sd_key[:20]: - return self.network.te_multiplier - if "down_blocks" in self.sd_key: - return self.network.unet_multiplier[0] - if "mid_block" in self.sd_key: - return self.network.unet_multiplier[1] - if "up_blocks" in self.sd_key: - return self.network.unet_multiplier[2] - else: - return self.network.unet_multiplier[0] - - def calc_scale(self): - if self.scale is not None: - return self.scale - if self.dim is not None and self.alpha is not None: - return self.alpha / self.dim - return 1.0 - - def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None): - if self.bias is not None: - updown = updown.reshape(self.bias.shape) - updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype) - updown = updown.reshape(output_shape) - if len(output_shape) == 4: - updown = updown.reshape(output_shape) - if orig_weight.size().numel() == updown.size().numel(): - updown = updown.reshape(orig_weight.shape) - if ex_bias is not None: - ex_bias = ex_bias * self.multiplier() - return updown * self.calc_scale() * self.multiplier(), ex_bias - - def calc_updown(self, target): - raise NotImplementedError() - - def forward(self, x, y): - raise NotImplementedError() +from __future__ import annotations +import os +from collections import namedtuple +import enum + +from modules import sd_models, hashes, shared + +NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module']) + +metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20} + + +class SdVersion(enum.Enum): + Unknown = 1 + SD1 = 2 + SD2 = 3 + SDXL = 4 + + +class NetworkOnDisk: + def __init__(self, name, filename): + self.name = name + self.filename = filename + self.metadata = {} + self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors" + + if self.is_safetensors: + self.metadata = sd_models.read_metadata_from_safetensors(filename) + if self.metadata: + m = {} + for k, v in sorted(self.metadata.items(), key=lambda x: metadata_tags_order.get(x[0], 999)): + m[k] = v + self.metadata = m + self.alias = self.metadata.get('ss_output_name', self.name) + self.hash = None + self.shorthash = None + self.set_hash(self.metadata.get('sshs_model_hash') or hashes.sha256_from_cache(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '') + self.sd_version = self.detect_version() + + def detect_version(self): + if str(self.metadata.get('ss_base_model_version', "")).startswith("sdxl_"): + return SdVersion.SDXL + elif str(self.metadata.get('ss_v2', "")) == "True": + return SdVersion.SD2 + elif len(self.metadata): + return SdVersion.SD1 + return SdVersion.Unknown + + def set_hash(self, v): + self.hash = v + self.shorthash = self.hash[0:12] + + def read_hash(self): + if not self.hash: + self.set_hash(hashes.sha256(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '') + + def get_alias(self): + import networks + return self.name if shared.opts.lora_preferred_name == "filename" or self.alias.lower() in networks.forbidden_network_aliases else self.alias + + +class Network: # LoraModule + def __init__(self, name, network_on_disk: NetworkOnDisk): + self.name = name + self.network_on_disk = network_on_disk + self.te_multiplier = 1.0 + self.unet_multiplier = [1.0] * 3 + self.dyn_dim = None + self.modules = {} + self.mtime = None + self.mentioned_name = None + """the text that was used to add the network to prompt - can be either name or an alias""" + + +class ModuleType: + def create_module(self, net: Network, weights: NetworkWeights) -> Network | None: # pylint: disable=W0613 + return None + + +class NetworkModule: + def __init__(self, net: Network, weights: NetworkWeights): + self.network = net + self.network_key = weights.network_key + self.sd_key = weights.sd_key + self.sd_module = weights.sd_module + if hasattr(self.sd_module, 'weight'): + self.shape = self.sd_module.weight.shape + self.dim = None + self.bias = weights.w.get("bias") + self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None + self.scale = weights.w["scale"].item() if "scale" in weights.w else None + + def multiplier(self): + if 'transformer' in self.sd_key[:20]: + return self.network.te_multiplier + if "down_blocks" in self.sd_key: + return self.network.unet_multiplier[0] + if "mid_block" in self.sd_key: + return self.network.unet_multiplier[1] + if "up_blocks" in self.sd_key: + return self.network.unet_multiplier[2] + else: + return self.network.unet_multiplier[0] + + def calc_scale(self): + if self.scale is not None: + return self.scale + if self.dim is not None and self.alpha is not None: + return self.alpha / self.dim + return 1.0 + + def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None): + if self.bias is not None: + updown = updown.reshape(self.bias.shape) + updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype) + updown = updown.reshape(output_shape) + if len(output_shape) == 4: + updown = updown.reshape(output_shape) + if orig_weight.size().numel() == updown.size().numel(): + updown = updown.reshape(orig_weight.shape) + if ex_bias is not None: + ex_bias = ex_bias * self.multiplier() + return updown * self.calc_scale() * self.multiplier(), ex_bias + + def calc_updown(self, target): + raise NotImplementedError() + + def forward(self, x, y): + raise NotImplementedError() diff --git a/extensions-builtin/Lora/network_full.py b/extensions-builtin/Lora/network_full.py index 233791712..ba9f2e359 100644 --- a/extensions-builtin/Lora/network_full.py +++ b/extensions-builtin/Lora/network_full.py @@ -1,27 +1,27 @@ -import network - - -class ModuleTypeFull(network.ModuleType): - def create_module(self, net: network.Network, weights: network.NetworkWeights): - if all(x in weights.w for x in ["diff"]): - return NetworkModuleFull(net, weights) - - return None - - -class NetworkModuleFull(network.NetworkModule): - def __init__(self, net: network.Network, weights: network.NetworkWeights): - super().__init__(net, weights) - - self.weight = weights.w.get("diff") - self.ex_bias = weights.w.get("diff_b") - - def calc_updown(self, target): - output_shape = self.weight.shape - updown = self.weight.to(target.device, dtype=target.dtype) - if self.ex_bias is not None: - ex_bias = self.ex_bias.to(target.device, dtype=target.dtype) - else: - ex_bias = None - - return self.finalize_updown(updown, target, output_shape, ex_bias) +import network + + +class ModuleTypeFull(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + if all(x in weights.w for x in ["diff"]): + return NetworkModuleFull(net, weights) + + return None + + +class NetworkModuleFull(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + + self.weight = weights.w.get("diff") + self.ex_bias = weights.w.get("diff_b") + + def calc_updown(self, target): + output_shape = self.weight.shape + updown = self.weight.to(target.device, dtype=target.dtype) + if self.ex_bias is not None: + ex_bias = self.ex_bias.to(target.device, dtype=target.dtype) + else: + ex_bias = None + + return self.finalize_updown(updown, target, output_shape, ex_bias) diff --git a/extensions-builtin/Lora/network_hada.py b/extensions-builtin/Lora/network_hada.py index 0feda761e..4e5924d04 100644 --- a/extensions-builtin/Lora/network_hada.py +++ b/extensions-builtin/Lora/network_hada.py @@ -1,46 +1,46 @@ -import lyco_helpers -import network - - -class ModuleTypeHada(network.ModuleType): - def create_module(self, net: network.Network, weights: network.NetworkWeights): - if all(x in weights.w for x in ["hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b"]): - return NetworkModuleHada(net, weights) - return None - - -class NetworkModuleHada(network.NetworkModule): - def __init__(self, net: network.Network, weights: network.NetworkWeights): - super().__init__(net, weights) - if hasattr(self.sd_module, 'weight'): - self.shape = self.sd_module.weight.shape - self.w1a = weights.w["hada_w1_a"] - self.w1b = weights.w["hada_w1_b"] - self.dim = self.w1b.shape[0] - self.w2a = weights.w["hada_w2_a"] - self.w2b = weights.w["hada_w2_b"] - self.t1 = weights.w.get("hada_t1") - self.t2 = weights.w.get("hada_t2") - - def calc_updown(self, target): - w1a = self.w1a.to(target.device, dtype=target.dtype) - w1b = self.w1b.to(target.device, dtype=target.dtype) - w2a = self.w2a.to(target.device, dtype=target.dtype) - w2b = self.w2b.to(target.device, dtype=target.dtype) - output_shape = [w1a.size(0), w1b.size(1)] - if self.t1 is not None: - output_shape = [w1a.size(1), w1b.size(1)] - t1 = self.t1.to(target.device, dtype=target.dtype) - updown1 = lyco_helpers.make_weight_cp(t1, w1a, w1b) - output_shape += t1.shape[2:] - else: - if len(w1b.shape) == 4: - output_shape += w1b.shape[2:] - updown1 = lyco_helpers.rebuild_conventional(w1a, w1b, output_shape) - if self.t2 is not None: - t2 = self.t2.to(target.device, dtype=target.dtype) - updown2 = lyco_helpers.make_weight_cp(t2, w2a, w2b) - else: - updown2 = lyco_helpers.rebuild_conventional(w2a, w2b, output_shape) - updown = updown1 * updown2 - return self.finalize_updown(updown, target, output_shape) +import lyco_helpers +import network + + +class ModuleTypeHada(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + if all(x in weights.w for x in ["hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b"]): + return NetworkModuleHada(net, weights) + return None + + +class NetworkModuleHada(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + if hasattr(self.sd_module, 'weight'): + self.shape = self.sd_module.weight.shape + self.w1a = weights.w["hada_w1_a"] + self.w1b = weights.w["hada_w1_b"] + self.dim = self.w1b.shape[0] + self.w2a = weights.w["hada_w2_a"] + self.w2b = weights.w["hada_w2_b"] + self.t1 = weights.w.get("hada_t1") + self.t2 = weights.w.get("hada_t2") + + def calc_updown(self, target): + w1a = self.w1a.to(target.device, dtype=target.dtype) + w1b = self.w1b.to(target.device, dtype=target.dtype) + w2a = self.w2a.to(target.device, dtype=target.dtype) + w2b = self.w2b.to(target.device, dtype=target.dtype) + output_shape = [w1a.size(0), w1b.size(1)] + if self.t1 is not None: + output_shape = [w1a.size(1), w1b.size(1)] + t1 = self.t1.to(target.device, dtype=target.dtype) + updown1 = lyco_helpers.make_weight_cp(t1, w1a, w1b) + output_shape += t1.shape[2:] + else: + if len(w1b.shape) == 4: + output_shape += w1b.shape[2:] + updown1 = lyco_helpers.rebuild_conventional(w1a, w1b, output_shape) + if self.t2 is not None: + t2 = self.t2.to(target.device, dtype=target.dtype) + updown2 = lyco_helpers.make_weight_cp(t2, w2a, w2b) + else: + updown2 = lyco_helpers.rebuild_conventional(w2a, w2b, output_shape) + updown = updown1 * updown2 + return self.finalize_updown(updown, target, output_shape) diff --git a/extensions-builtin/Lora/network_ia3.py b/extensions-builtin/Lora/network_ia3.py index cb39df228..75316d97c 100644 --- a/extensions-builtin/Lora/network_ia3.py +++ b/extensions-builtin/Lora/network_ia3.py @@ -1,26 +1,26 @@ -import network - - -class ModuleTypeIa3(network.ModuleType): - def create_module(self, net: network.Network, weights: network.NetworkWeights): - if all(x in weights.w for x in ["weight"]): - return NetworkModuleIa3(net, weights) - - return None - - -class NetworkModuleIa3(network.NetworkModule): - def __init__(self, net: network.Network, weights: network.NetworkWeights): - super().__init__(net, weights) - self.w = weights.w["weight"] - self.on_input = weights.w["on_input"].item() - - def calc_updown(self, target): - w = self.w.to(target.device, dtype=target.dtype) - output_shape = [w.size(0), target.size(1)] - if self.on_input: - output_shape.reverse() - else: - w = w.reshape(-1, 1) - updown = target * w - return self.finalize_updown(updown, target, output_shape) +import network + + +class ModuleTypeIa3(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + if all(x in weights.w for x in ["weight"]): + return NetworkModuleIa3(net, weights) + + return None + + +class NetworkModuleIa3(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + self.w = weights.w["weight"] + self.on_input = weights.w["on_input"].item() + + def calc_updown(self, target): + w = self.w.to(target.device, dtype=target.dtype) + output_shape = [w.size(0), target.size(1)] + if self.on_input: + output_shape.reverse() + else: + w = w.reshape(-1, 1) + updown = target * w + return self.finalize_updown(updown, target, output_shape) diff --git a/extensions-builtin/Lora/network_lokr.py b/extensions-builtin/Lora/network_lokr.py index 20387efee..cafdb968a 100644 --- a/extensions-builtin/Lora/network_lokr.py +++ b/extensions-builtin/Lora/network_lokr.py @@ -1,57 +1,57 @@ -import torch -import lyco_helpers -import network - - -class ModuleTypeLokr(network.ModuleType): - def create_module(self, net: network.Network, weights: network.NetworkWeights): - has_1 = "lokr_w1" in weights.w or ("lokr_w1_a" in weights.w and "lokr_w1_b" in weights.w) - has_2 = "lokr_w2" in weights.w or ("lokr_w2_a" in weights.w and "lokr_w2_b" in weights.w) - if has_1 and has_2: - return NetworkModuleLokr(net, weights) - return None - - -def make_kron(orig_shape, w1, w2): - if len(w2.shape) == 4: - w1 = w1.unsqueeze(2).unsqueeze(2) - w2 = w2.contiguous() - return torch.kron(w1, w2).reshape(orig_shape) - - -class NetworkModuleLokr(network.NetworkModule): - def __init__(self, net: network.Network, weights: network.NetworkWeights): - super().__init__(net, weights) - self.w1 = weights.w.get("lokr_w1") - self.w1a = weights.w.get("lokr_w1_a") - self.w1b = weights.w.get("lokr_w1_b") - self.dim = self.w1b.shape[0] if self.w1b is not None else self.dim - self.w2 = weights.w.get("lokr_w2") - self.w2a = weights.w.get("lokr_w2_a") - self.w2b = weights.w.get("lokr_w2_b") - self.dim = self.w2b.shape[0] if self.w2b is not None else self.dim - self.t2 = weights.w.get("lokr_t2") - - def calc_updown(self, target): - if self.w1 is not None: - w1 = self.w1.to(target.device, dtype=target.dtype) - else: - w1a = self.w1a.to(target.device, dtype=target.dtype) - w1b = self.w1b.to(target.device, dtype=target.dtype) - w1 = w1a @ w1b - if self.w2 is not None: - w2 = self.w2.to(target.device, dtype=target.dtype) - elif self.t2 is None: - w2a = self.w2a.to(target.device, dtype=target.dtype) - w2b = self.w2b.to(target.device, dtype=target.dtype) - w2 = w2a @ w2b - else: - t2 = self.t2.to(target.device, dtype=target.dtype) - w2a = self.w2a.to(target.device, dtype=target.dtype) - w2b = self.w2b.to(target.device, dtype=target.dtype) - w2 = lyco_helpers.make_weight_cp(t2, w2a, w2b) - output_shape = [w1.size(0) * w2.size(0), w1.size(1) * w2.size(1)] - if len(target.shape) == 4: - output_shape = target.shape - updown = make_kron(output_shape, w1, w2) - return self.finalize_updown(updown, target, output_shape) +import torch +import lyco_helpers +import network + + +class ModuleTypeLokr(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + has_1 = "lokr_w1" in weights.w or ("lokr_w1_a" in weights.w and "lokr_w1_b" in weights.w) + has_2 = "lokr_w2" in weights.w or ("lokr_w2_a" in weights.w and "lokr_w2_b" in weights.w) + if has_1 and has_2: + return NetworkModuleLokr(net, weights) + return None + + +def make_kron(orig_shape, w1, w2): + if len(w2.shape) == 4: + w1 = w1.unsqueeze(2).unsqueeze(2) + w2 = w2.contiguous() + return torch.kron(w1, w2).reshape(orig_shape) + + +class NetworkModuleLokr(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + self.w1 = weights.w.get("lokr_w1") + self.w1a = weights.w.get("lokr_w1_a") + self.w1b = weights.w.get("lokr_w1_b") + self.dim = self.w1b.shape[0] if self.w1b is not None else self.dim + self.w2 = weights.w.get("lokr_w2") + self.w2a = weights.w.get("lokr_w2_a") + self.w2b = weights.w.get("lokr_w2_b") + self.dim = self.w2b.shape[0] if self.w2b is not None else self.dim + self.t2 = weights.w.get("lokr_t2") + + def calc_updown(self, target): + if self.w1 is not None: + w1 = self.w1.to(target.device, dtype=target.dtype) + else: + w1a = self.w1a.to(target.device, dtype=target.dtype) + w1b = self.w1b.to(target.device, dtype=target.dtype) + w1 = w1a @ w1b + if self.w2 is not None: + w2 = self.w2.to(target.device, dtype=target.dtype) + elif self.t2 is None: + w2a = self.w2a.to(target.device, dtype=target.dtype) + w2b = self.w2b.to(target.device, dtype=target.dtype) + w2 = w2a @ w2b + else: + t2 = self.t2.to(target.device, dtype=target.dtype) + w2a = self.w2a.to(target.device, dtype=target.dtype) + w2b = self.w2b.to(target.device, dtype=target.dtype) + w2 = lyco_helpers.make_weight_cp(t2, w2a, w2b) + output_shape = [w1.size(0) * w2.size(0), w1.size(1) * w2.size(1)] + if len(target.shape) == 4: + output_shape = target.shape + updown = make_kron(output_shape, w1, w2) + return self.finalize_updown(updown, target, output_shape) diff --git a/extensions-builtin/Lora/network_lora.py b/extensions-builtin/Lora/network_lora.py index 8c2c4c8a5..8f31388d4 100644 --- a/extensions-builtin/Lora/network_lora.py +++ b/extensions-builtin/Lora/network_lora.py @@ -1,75 +1,75 @@ -import torch - -import diffusers.models.lora as diffusers_lora -import lyco_helpers -import network -from modules import devices - - -class ModuleTypeLora(network.ModuleType): - def create_module(self, net: network.Network, weights: network.NetworkWeights): - if all(x in weights.w for x in ["lora_up.weight", "lora_down.weight"]): - return NetworkModuleLora(net, weights) - return None - - -class NetworkModuleLora(network.NetworkModule): - def __init__(self, net: network.Network, weights: network.NetworkWeights): - super().__init__(net, weights) - self.up_model = self.create_module(weights.w, "lora_up.weight") - self.down_model = self.create_module(weights.w, "lora_down.weight") - self.mid_model = self.create_module(weights.w, "lora_mid.weight", none_ok=True) - self.dim = weights.w["lora_down.weight"].shape[0] - - def create_module(self, weights, key, none_ok=False): - weight = weights.get(key) - if weight is None and none_ok: - return None - is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention, diffusers_lora.LoRACompatibleLinear] - is_conv = type(self.sd_module) in [torch.nn.Conv2d, diffusers_lora.LoRACompatibleConv] - if is_linear: - weight = weight.reshape(weight.shape[0], -1) - module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False) - elif is_conv and key == "lora_down.weight" or key == "dyn_up": - if len(weight.shape) == 2: - weight = weight.reshape(weight.shape[0], -1, 1, 1) - if weight.shape[2] != 1 or weight.shape[3] != 1: - module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False) - else: - module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) - elif is_conv and key == "lora_mid.weight": - module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False) - elif is_conv and key == "lora_up.weight" or key == "dyn_down": - module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) - else: - raise AssertionError(f'Lora layer {self.network_key} matched a layer with unsupported type: {type(self.sd_module).__name__}') - with torch.no_grad(): - if weight.shape != module.weight.shape: - weight = weight.reshape(module.weight.shape) - module.weight.copy_(weight) - module.to(device=devices.cpu, dtype=devices.dtype) - module.weight.requires_grad_(False) - return module - - def calc_updown(self, target): # pylint: disable=W0237 - up = self.up_model.weight.to(target.device, dtype=target.dtype) - down = self.down_model.weight.to(target.device, dtype=target.dtype) - output_shape = [up.size(0), down.size(1)] - if self.mid_model is not None: - # cp-decomposition - mid = self.mid_model.weight.to(target.device, dtype=target.dtype) - updown = lyco_helpers.rebuild_cp_decomposition(up, down, mid) - output_shape += mid.shape[2:] - else: - if len(down.shape) == 4: - output_shape += down.shape[2:] - updown = lyco_helpers.rebuild_conventional(up, down, output_shape, self.network.dyn_dim) - return self.finalize_updown(updown, target, output_shape) - - def forward(self, x, y): - self.up_model.to(device=devices.device) - self.down_model.to(device=devices.device) - if hasattr(y, "scale"): - return y(scale=1) + self.up_model(self.down_model(x)) * self.multiplier() * self.calc_scale() - - return y + self.up_model(self.down_model(x)) * self.multiplier() * self.calc_scale() +import torch + +import diffusers.models.lora as diffusers_lora +import lyco_helpers +import network +from modules import devices + + +class ModuleTypeLora(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + if all(x in weights.w for x in ["lora_up.weight", "lora_down.weight"]): + return NetworkModuleLora(net, weights) + return None + + +class NetworkModuleLora(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + self.up_model = self.create_module(weights.w, "lora_up.weight") + self.down_model = self.create_module(weights.w, "lora_down.weight") + self.mid_model = self.create_module(weights.w, "lora_mid.weight", none_ok=True) + self.dim = weights.w["lora_down.weight"].shape[0] + + def create_module(self, weights, key, none_ok=False): + weight = weights.get(key) + if weight is None and none_ok: + return None + is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention, diffusers_lora.LoRACompatibleLinear] + is_conv = type(self.sd_module) in [torch.nn.Conv2d, diffusers_lora.LoRACompatibleConv] + if is_linear: + weight = weight.reshape(weight.shape[0], -1) + module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False) + elif is_conv and key == "lora_down.weight" or key == "dyn_up": + if len(weight.shape) == 2: + weight = weight.reshape(weight.shape[0], -1, 1, 1) + if weight.shape[2] != 1 or weight.shape[3] != 1: + module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False) + else: + module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) + elif is_conv and key == "lora_mid.weight": + module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False) + elif is_conv and key == "lora_up.weight" or key == "dyn_down": + module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) + else: + raise AssertionError(f'Lora layer {self.network_key} matched a layer with unsupported type: {type(self.sd_module).__name__}') + with torch.no_grad(): + if weight.shape != module.weight.shape: + weight = weight.reshape(module.weight.shape) + module.weight.copy_(weight) + module.to(device=devices.cpu, dtype=devices.dtype) + module.weight.requires_grad_(False) + return module + + def calc_updown(self, target): # pylint: disable=W0237 + up = self.up_model.weight.to(target.device, dtype=target.dtype) + down = self.down_model.weight.to(target.device, dtype=target.dtype) + output_shape = [up.size(0), down.size(1)] + if self.mid_model is not None: + # cp-decomposition + mid = self.mid_model.weight.to(target.device, dtype=target.dtype) + updown = lyco_helpers.rebuild_cp_decomposition(up, down, mid) + output_shape += mid.shape[2:] + else: + if len(down.shape) == 4: + output_shape += down.shape[2:] + updown = lyco_helpers.rebuild_conventional(up, down, output_shape, self.network.dyn_dim) + return self.finalize_updown(updown, target, output_shape) + + def forward(self, x, y): + self.up_model.to(device=devices.device) + self.down_model.to(device=devices.device) + if hasattr(y, "scale"): + return y(scale=1) + self.up_model(self.down_model(x)) * self.multiplier() * self.calc_scale() + + return y + self.up_model(self.down_model(x)) * self.multiplier() * self.calc_scale() diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 5518e2527..c61bc5355 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -1,495 +1,495 @@ -from typing import Union, List -import os -import re -import time -import concurrent -import lora_patches -import network -import network_lora -import network_hada -import network_ia3 -import network_oft -import network_lokr -import network_full -import network_norm -import network_glora -import lora_convert -import torch -import diffusers.models.lora -from modules import shared, devices, sd_models, sd_models_compile, errors, scripts - - -debug = os.environ.get('SD_LORA_DEBUG', None) is not None -originals: lora_patches.LoraPatches = None -extra_network_lora = None -available_networks = {} -available_network_aliases = {} -loaded_networks: List[network.Network] = [] -timer = { 'load': 0, 'apply': 0, 'restore': 0 } -# networks_in_memory = {} -lora_cache = {} -available_network_hash_lookup = {} -forbidden_network_aliases = {} -re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)") -module_types = [ - network_lora.ModuleTypeLora(), - network_hada.ModuleTypeHada(), - network_ia3.ModuleTypeIa3(), - network_oft.ModuleTypeOFT(), - network_lokr.ModuleTypeLokr(), - network_full.ModuleTypeFull(), - network_norm.ModuleTypeNorm(), - network_glora.ModuleTypeGLora(), -] -convert_diffusers_name_to_compvis = lora_convert.convert_diffusers_name_to_compvis # supermerger compatibility item - - -def assign_network_names_to_compvis_modules(sd_model): - network_layer_mapping = {} - if shared.backend == shared.Backend.DIFFUSERS: - if not hasattr(shared.sd_model, 'text_encoder') or not hasattr(shared.sd_model, 'unet'): - return - for name, module in shared.sd_model.text_encoder.named_modules(): - prefix = "lora_te1_" if shared.sd_model_type == "sdxl" else "lora_te_" - network_name = prefix + name.replace(".", "_") - network_layer_mapping[network_name] = module - module.network_layer_name = network_name - if shared.sd_model_type == "sdxl": - for name, module in shared.sd_model.text_encoder_2.named_modules(): - network_name = "lora_te2_" + name.replace(".", "_") - network_layer_mapping[network_name] = module - module.network_layer_name = network_name - for name, module in shared.sd_model.unet.named_modules(): - network_name = "lora_unet_" + name.replace(".", "_") - network_layer_mapping[network_name] = module - module.network_layer_name = network_name - else: - if not hasattr(shared.sd_model, 'cond_stage_model'): - return - for name, module in shared.sd_model.cond_stage_model.wrapped.named_modules(): - network_name = name.replace(".", "_") - network_layer_mapping[network_name] = module - module.network_layer_name = network_name - for name, module in shared.sd_model.model.named_modules(): - network_name = name.replace(".", "_") - network_layer_mapping[network_name] = module - module.network_layer_name = network_name - sd_model.network_layer_mapping = network_layer_mapping - - -def load_diffusers(name, network_on_disk, lora_scale=1.0) -> network.Network: - t0 = time.time() - cached = lora_cache.get(name, None) - # if debug: - shared.log.debug(f'LoRA load: name="{name}" file="{network_on_disk.filename}" type=diffusers {"cached" if cached else ""} fuse={shared.opts.lora_fuse_diffusers}') - if cached is not None: - return cached - if shared.backend != shared.Backend.DIFFUSERS: - return None - shared.sd_model.load_lora_weights(network_on_disk.filename) - if shared.opts.lora_fuse_diffusers: - shared.sd_model.fuse_lora(lora_scale=lora_scale) - net = network.Network(name, network_on_disk) - net.mtime = os.path.getmtime(network_on_disk.filename) - lora_cache[name] = net - t1 = time.time() - timer['load'] += t1 - t0 - return net - - -def load_network(name, network_on_disk) -> network.Network: - t0 = time.time() - cached = lora_cache.get(name, None) - if debug: - shared.log.debug(f'LoRA load: name="{name}" file="{network_on_disk.filename}" type=lora {"cached" if cached else ""}') - if cached is not None: - return cached - net = network.Network(name, network_on_disk) - net.mtime = os.path.getmtime(network_on_disk.filename) - sd = sd_models.read_state_dict(network_on_disk.filename) - assign_network_names_to_compvis_modules(shared.sd_model) # this should not be needed but is here as an emergency fix for an unknown error people are experiencing in 1.2.0 - keys_failed_to_match = {} - matched_networks = {} - convert = lora_convert.KeyConvert() - for key_network, weight in sd.items(): - parts = key_network.split('.') - if len(parts) > 5: # messy handler for diffusers peft lora - key_network_without_network_parts = '_'.join(parts[:-2]) - if not key_network_without_network_parts.startswith('lora_'): - key_network_without_network_parts = 'lora_' + key_network_without_network_parts - network_part = '.'.join(parts[-2:]).replace('lora_A', 'lora_down').replace('lora_B', 'lora_up') - else: - key_network_without_network_parts, network_part = key_network.split(".", 1) - # if debug: - # shared.log.debug(f'LoRA load: name="{name}" full={key_network} network={network_part} key={key_network_without_network_parts}') - key, sd_module = convert(key_network_without_network_parts) - if sd_module is None: - keys_failed_to_match[key_network] = key - continue - if key not in matched_networks: - matched_networks[key] = network.NetworkWeights(network_key=key_network, sd_key=key, w={}, sd_module=sd_module) - matched_networks[key].w[network_part] = weight - for key, weights in matched_networks.items(): - net_module = None - for nettype in module_types: - net_module = nettype.create_module(net, weights) - if net_module is not None: - break - if net_module is None: - shared.log.error(f'LoRA unhandled: name={name} key={key} weights={weights.w.keys()}') - else: - net.modules[key] = net_module - if len(keys_failed_to_match) > 0: - shared.log.warning(f"LoRA file={network_on_disk.filename} unmatched={len(keys_failed_to_match)} matched={len(matched_networks)}") - if debug: - shared.log.debug(f"LoRA file={network_on_disk.filename} unmatched={keys_failed_to_match}") - elif debug: - shared.log.debug(f"LoRA file={network_on_disk.filename} unmatched={len(keys_failed_to_match)} matched={len(matched_networks)}") - lora_cache[name] = net - t1 = time.time() - timer['load'] += t1 - t0 - return net - - -def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None): - networks_on_disk = [available_network_aliases.get(name, None) for name in names] - if any(x is None for x in networks_on_disk): - list_available_networks() - networks_on_disk = [available_network_aliases.get(name, None) for name in names] - failed_to_load_networks = [] - - recompile_model = False - if ((shared.opts.cuda_compile and shared.opts.cuda_compile_backend == "openvino_fx") or - shared.opts.nncf_compress_weights or shared.opts.nncf_compress_text_encoder_weights): - if len(names) == len(shared.compiled_model_state.lora_model): - for i, name in enumerate(names): - if shared.compiled_model_state.lora_model[i] != f"{name}:{te_multipliers[i] if te_multipliers else 1.0}": - recompile_model = True - shared.compiled_model_state.lora_model = [] - break - if not recompile_model: - if len(loaded_networks) > 0 and debug: - shared.log.debug('OpenVINO: Skipping LoRa loading') - return - else: - recompile_model = True - shared.compiled_model_state.lora_model = [] - if recompile_model: - backup_cuda_compile = shared.opts.cuda_compile - backup_nncf_compress_weights = shared.opts.nncf_compress_weights - backup_nncf_compress_text_encoder_weights = shared.opts.nncf_compress_text_encoder_weights - shared.compiled_model_state.lora_compile = True - sd_models.unload_model_weights(op='model') - shared.opts.cuda_compile = False - shared.opts.nncf_compress_weights = False - shared.opts.nncf_compress_text_encoder_weights = False - sd_models.reload_model_weights(op='model') - shared.opts.cuda_compile = backup_cuda_compile - shared.opts.nncf_compress_weights = backup_nncf_compress_weights - shared.opts.nncf_compress_text_encoder_weights = backup_nncf_compress_text_encoder_weights - - loaded_networks.clear() - for i, (network_on_disk, name) in enumerate(zip(networks_on_disk, names)): - net = None - if network_on_disk is not None: - if debug: - shared.log.debug(f'LoRA load start: name="{name}" file="{network_on_disk.filename}"') - try: - if recompile_model: - shared.compiled_model_state.lora_model.append(f"{name}:{te_multipliers[i] if te_multipliers else 1.0}") - if shared.backend == shared.Backend.DIFFUSERS and shared.opts.lora_force_diffusers: # OpenVINO only works with Diffusers LoRa loading. - # or getattr(network_on_disk, 'shorthash', '').lower() == 'aaebf6360f7d' # sd15-lcm - # or getattr(network_on_disk, 'shorthash', '').lower() == '3d18b05e4f56' # sdxl-lcm - # or getattr(network_on_disk, 'shorthash', '').lower() == '813ea5fb1c67' # turbo sdxl-turbo - net = load_diffusers(name, network_on_disk, lora_scale=te_multipliers[i] if te_multipliers else 1.0) - else: - net = load_network(name, network_on_disk) - except Exception as e: - shared.log.error(f"LoRA load failed: file={network_on_disk.filename} {e}") - if debug: - errors.display(e, f"LoRA load failed file={network_on_disk.filename}") - continue - net.mentioned_name = name - network_on_disk.read_hash() - if net is None: - failed_to_load_networks.append(name) - shared.log.error(f"LoRA unknown type: network={name}") - continue - net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0 - net.unet_multiplier = unet_multipliers[i] if unet_multipliers else 1.0 - net.dyn_dim = dyn_dims[i] if dyn_dims else 1.0 - loaded_networks.append(net) - - while len(lora_cache) > shared.opts.lora_in_memory_limit: - name = next(iter(lora_cache)) - lora_cache.pop(name, None) - if len(loaded_networks) > 0 and debug: - shared.log.debug(f'LoRA loaded={len(loaded_networks)} cache={list(lora_cache)}') - devices.torch_gc() - - if recompile_model: - shared.log.info("LoRA recompiling model") - sd_models_compile.compile_diffusers(shared.sd_model) - - -def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention, diffusers.models.lora.LoRACompatibleLinear, diffusers.models.lora.LoRACompatibleConv]): - t0 = time.time() - weights_backup = getattr(self, "network_weights_backup", None) - bias_backup = getattr(self, "network_bias_backup", None) - if weights_backup is None and bias_backup is None: - return - # if debug: - # shared.log.debug('LoRA restore weights') - if weights_backup is not None: - if isinstance(self, torch.nn.MultiheadAttention): - self.in_proj_weight.copy_(weights_backup[0]) - self.out_proj.weight.copy_(weights_backup[1]) - else: - self.weight.copy_(weights_backup) - if bias_backup is not None: - if isinstance(self, torch.nn.MultiheadAttention): - self.out_proj.bias.copy_(bias_backup) - else: - self.bias.copy_(bias_backup) - else: - if isinstance(self, torch.nn.MultiheadAttention): - self.out_proj.bias = None - else: - self.bias = None - t1 = time.time() - timer['restore'] += t1 - t0 - - -def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention, diffusers.models.lora.LoRACompatibleLinear, diffusers.models.lora.LoRACompatibleConv]): - """ - Applies the currently selected set of networks to the weights of torch layer self. - If weights already have this particular set of networks applied, does nothing. - If not, restores orginal weights from backup and alters weights according to networks. - """ - network_layer_name = getattr(self, 'network_layer_name', None) - if network_layer_name is None: - return - t0 = time.time() - current_names = getattr(self, "network_current_names", ()) - wanted_names = tuple((x.name, x.te_multiplier, x.unet_multiplier, x.dyn_dim) for x in loaded_networks) - weights_backup = getattr(self, "network_weights_backup", None) - if weights_backup is None and wanted_names != (): # pylint: disable=C1803 - if current_names != (): - raise RuntimeError("no backup weights found and current weights are not unchanged") - if isinstance(self, torch.nn.MultiheadAttention): - weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True)) - else: - weights_backup = self.weight.to(devices.cpu, copy=True) - self.network_weights_backup = weights_backup - bias_backup = getattr(self, "network_bias_backup", None) - if bias_backup is None: - if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None: - bias_backup = self.out_proj.bias.to(devices.cpu, copy=True) - elif getattr(self, 'bias', None) is not None: - bias_backup = self.bias.to(devices.cpu, copy=True) - else: - bias_backup = None - self.network_bias_backup = bias_backup - - if current_names != wanted_names: - network_restore_weights_from_backup(self) - for net in loaded_networks: - # default workflow where module is known and has weights - module = net.modules.get(network_layer_name, None) - if module is not None and hasattr(self, 'weight'): - try: - with devices.inference_context(): - updown, ex_bias = module.calc_updown(self.weight) - if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: - # inpainting model. zero pad updown to make channel[1] 4 to 9 - updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) # pylint: disable=not-callable - self.weight += updown - if ex_bias is not None and hasattr(self, 'bias'): - if self.bias is None: - self.bias = torch.nn.Parameter(ex_bias) - else: - self.bias += ex_bias - except RuntimeError as e: - extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 - if debug: - module_name = net.modules.get(network_layer_name, None) - shared.log.error(f"LoRA apply weight name={net.name} module={module_name} layer={network_layer_name} {e}") - errors.display(e, 'LoRA apply weight') - raise RuntimeError('LoRA apply weight') from e - continue - # alternative workflow looking at _*_proj layers - module_q = net.modules.get(network_layer_name + "_q_proj", None) - module_k = net.modules.get(network_layer_name + "_k_proj", None) - module_v = net.modules.get(network_layer_name + "_v_proj", None) - module_out = net.modules.get(network_layer_name + "_out_proj", None) - if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out: - try: - with devices.inference_context(): - updown_q, _ = module_q.calc_updown(self.in_proj_weight) - updown_k, _ = module_k.calc_updown(self.in_proj_weight) - updown_v, _ = module_v.calc_updown(self.in_proj_weight) - updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) - updown_out, ex_bias = module_out.calc_updown(self.out_proj.weight) - self.in_proj_weight += updown_qkv - self.out_proj.weight += updown_out - if ex_bias is not None: - if self.out_proj.bias is None: - self.out_proj.bias = torch.nn.Parameter(ex_bias) - else: - self.out_proj.bias += ex_bias - except RuntimeError as e: - if debug: - shared.log.debug(f"LoRA network={net.name} layer={network_layer_name} {e}") - extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 - continue - if module is None: - continue - shared.log.warning(f"LoRA network={net.name} layer={network_layer_name} unsupported operation") - extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 - self.network_current_names = wanted_names - t1 = time.time() - timer['apply'] += t1 - t0 - - -def network_forward(module, input, original_forward): # pylint: disable=W0622 - """ - Old way of applying Lora by executing operations during layer's forward. - Stacking many loras this way results in big performance degradation. - """ - if len(loaded_networks) == 0: - return original_forward(module, input) - input = devices.cond_cast_unet(input) - network_restore_weights_from_backup(module) - network_reset_cached_weight(module) - y = original_forward(module, input) - network_layer_name = getattr(module, 'network_layer_name', None) - for lora in loaded_networks: - module = lora.modules.get(network_layer_name, None) - if module is None: - continue - y = module.forward(input, y) - return y - - -def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): - self.network_current_names = () - self.network_weights_backup = None - - -def network_Linear_forward(self, input): # pylint: disable=W0622 - if shared.opts.lora_functional: - return network_forward(self, input, originals.Linear_forward) - network_apply_weights(self) - return originals.Linear_forward(self, input) - - -def network_Linear_load_state_dict(self, *args, **kwargs): - network_reset_cached_weight(self) - return originals.Linear_load_state_dict(self, *args, **kwargs) - - -def network_Conv2d_forward(self, input): # pylint: disable=W0622 - if shared.opts.lora_functional: - return network_forward(self, input, originals.Conv2d_forward) - network_apply_weights(self) - return originals.Conv2d_forward(self, input) - - -def network_Conv2d_load_state_dict(self, *args, **kwargs): - network_reset_cached_weight(self) - return originals.Conv2d_load_state_dict(self, *args, **kwargs) - - -def network_GroupNorm_forward(self, input): # pylint: disable=W0622 - if shared.opts.lora_functional: - return network_forward(self, input, originals.GroupNorm_forward) - network_apply_weights(self) - return originals.GroupNorm_forward(self, input) - - -def network_GroupNorm_load_state_dict(self, *args, **kwargs): - network_reset_cached_weight(self) - return originals.GroupNorm_load_state_dict(self, *args, **kwargs) - - -def network_LayerNorm_forward(self, input): # pylint: disable=W0622 - if shared.opts.lora_functional: - return network_forward(self, input, originals.LayerNorm_forward) - network_apply_weights(self) - return originals.LayerNorm_forward(self, input) - - -def network_LayerNorm_load_state_dict(self, *args, **kwargs): - network_reset_cached_weight(self) - return originals.LayerNorm_load_state_dict(self, *args, **kwargs) - - -def network_MultiheadAttention_forward(self, *args, **kwargs): - network_apply_weights(self) - return originals.MultiheadAttention_forward(self, *args, **kwargs) - - -def network_MultiheadAttention_load_state_dict(self, *args, **kwargs): - network_reset_cached_weight(self) - return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs) - - -def list_available_networks(): - available_networks.clear() - available_network_aliases.clear() - forbidden_network_aliases.clear() - available_network_hash_lookup.clear() - forbidden_network_aliases.update({"none": 1, "Addams": 1}) - os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True) - candidates = [] - if os.path.exists(shared.cmd_opts.lora_dir): - candidates += list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"])) - else: - shared.log.warning('LoRA directory not found: path="{shared.cmd_opts.lora_dir}"') - if os.path.exists(shared.cmd_opts.lyco_dir): - candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"])) - - def add_network(filename): - if os.path.isdir(filename): - return - name = os.path.splitext(os.path.basename(filename))[0] - try: - entry = network.NetworkOnDisk(name, filename) - available_networks[entry.name] = entry - if entry.alias in available_network_aliases: - forbidden_network_aliases[entry.alias.lower()] = 1 - available_network_aliases[entry.name] = entry - available_network_aliases[entry.alias] = entry - if entry.shorthash: - available_network_hash_lookup[entry.shorthash] = entry - except OSError as e: # should catch FileNotFoundError and PermissionError etc. - shared.log.error(f"Failed to load network {name} from {filename} {e}") - - with concurrent.futures.ThreadPoolExecutor(max_workers=shared.max_workers) as executor: - for fn in candidates: - executor.submit(add_network, fn) - - -def infotext_pasted(infotext, params): # pylint: disable=W0613 - if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]: - return # if the other extension is active, it will handle those fields, no need to do anything - added = [] - for k in params: - if not k.startswith("AddNet Model "): - continue - num = k[13:] - if params.get("AddNet Module " + num) != "LoRA": - continue - name = params.get("AddNet Model " + num) - if name is None: - continue - m = re_network_name.match(name) - if m: - name = m.group(1) - multiplier = params.get("AddNet Weight A " + num, "1.0") - added.append(f"") - if added: - params["Prompt"] += "\n" + "".join(added) - - -list_available_networks() +from typing import Union, List +import os +import re +import time +import concurrent +import lora_patches +import network +import network_lora +import network_hada +import network_ia3 +import network_oft +import network_lokr +import network_full +import network_norm +import network_glora +import lora_convert +import torch +import diffusers.models.lora +from modules import shared, devices, sd_models, sd_models_compile, errors, scripts + + +debug = os.environ.get('SD_LORA_DEBUG', None) is not None +originals: lora_patches.LoraPatches = None +extra_network_lora = None +available_networks = {} +available_network_aliases = {} +loaded_networks: List[network.Network] = [] +timer = { 'load': 0, 'apply': 0, 'restore': 0 } +# networks_in_memory = {} +lora_cache = {} +available_network_hash_lookup = {} +forbidden_network_aliases = {} +re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)") +module_types = [ + network_lora.ModuleTypeLora(), + network_hada.ModuleTypeHada(), + network_ia3.ModuleTypeIa3(), + network_oft.ModuleTypeOFT(), + network_lokr.ModuleTypeLokr(), + network_full.ModuleTypeFull(), + network_norm.ModuleTypeNorm(), + network_glora.ModuleTypeGLora(), +] +convert_diffusers_name_to_compvis = lora_convert.convert_diffusers_name_to_compvis # supermerger compatibility item + + +def assign_network_names_to_compvis_modules(sd_model): + network_layer_mapping = {} + if shared.backend == shared.Backend.DIFFUSERS: + if not hasattr(shared.sd_model, 'text_encoder') or not hasattr(shared.sd_model, 'unet'): + return + for name, module in shared.sd_model.text_encoder.named_modules(): + prefix = "lora_te1_" if shared.sd_model_type == "sdxl" else "lora_te_" + network_name = prefix + name.replace(".", "_") + network_layer_mapping[network_name] = module + module.network_layer_name = network_name + if shared.sd_model_type == "sdxl": + for name, module in shared.sd_model.text_encoder_2.named_modules(): + network_name = "lora_te2_" + name.replace(".", "_") + network_layer_mapping[network_name] = module + module.network_layer_name = network_name + for name, module in shared.sd_model.unet.named_modules(): + network_name = "lora_unet_" + name.replace(".", "_") + network_layer_mapping[network_name] = module + module.network_layer_name = network_name + else: + if not hasattr(shared.sd_model, 'cond_stage_model'): + return + for name, module in shared.sd_model.cond_stage_model.wrapped.named_modules(): + network_name = name.replace(".", "_") + network_layer_mapping[network_name] = module + module.network_layer_name = network_name + for name, module in shared.sd_model.model.named_modules(): + network_name = name.replace(".", "_") + network_layer_mapping[network_name] = module + module.network_layer_name = network_name + sd_model.network_layer_mapping = network_layer_mapping + + +def load_diffusers(name, network_on_disk, lora_scale=1.0) -> network.Network: + t0 = time.time() + cached = lora_cache.get(name, None) + # if debug: + shared.log.debug(f'LoRA load: name="{name}" file="{network_on_disk.filename}" type=diffusers {"cached" if cached else ""} fuse={shared.opts.lora_fuse_diffusers}') + if cached is not None: + return cached + if shared.backend != shared.Backend.DIFFUSERS: + return None + shared.sd_model.load_lora_weights(network_on_disk.filename) + if shared.opts.lora_fuse_diffusers: + shared.sd_model.fuse_lora(lora_scale=lora_scale) + net = network.Network(name, network_on_disk) + net.mtime = os.path.getmtime(network_on_disk.filename) + lora_cache[name] = net + t1 = time.time() + timer['load'] += t1 - t0 + return net + + +def load_network(name, network_on_disk) -> network.Network: + t0 = time.time() + cached = lora_cache.get(name, None) + if debug: + shared.log.debug(f'LoRA load: name="{name}" file="{network_on_disk.filename}" type=lora {"cached" if cached else ""}') + if cached is not None: + return cached + net = network.Network(name, network_on_disk) + net.mtime = os.path.getmtime(network_on_disk.filename) + sd = sd_models.read_state_dict(network_on_disk.filename) + assign_network_names_to_compvis_modules(shared.sd_model) # this should not be needed but is here as an emergency fix for an unknown error people are experiencing in 1.2.0 + keys_failed_to_match = {} + matched_networks = {} + convert = lora_convert.KeyConvert() + for key_network, weight in sd.items(): + parts = key_network.split('.') + if len(parts) > 5: # messy handler for diffusers peft lora + key_network_without_network_parts = '_'.join(parts[:-2]) + if not key_network_without_network_parts.startswith('lora_'): + key_network_without_network_parts = 'lora_' + key_network_without_network_parts + network_part = '.'.join(parts[-2:]).replace('lora_A', 'lora_down').replace('lora_B', 'lora_up') + else: + key_network_without_network_parts, network_part = key_network.split(".", 1) + # if debug: + # shared.log.debug(f'LoRA load: name="{name}" full={key_network} network={network_part} key={key_network_without_network_parts}') + key, sd_module = convert(key_network_without_network_parts) + if sd_module is None: + keys_failed_to_match[key_network] = key + continue + if key not in matched_networks: + matched_networks[key] = network.NetworkWeights(network_key=key_network, sd_key=key, w={}, sd_module=sd_module) + matched_networks[key].w[network_part] = weight + for key, weights in matched_networks.items(): + net_module = None + for nettype in module_types: + net_module = nettype.create_module(net, weights) + if net_module is not None: + break + if net_module is None: + shared.log.error(f'LoRA unhandled: name={name} key={key} weights={weights.w.keys()}') + else: + net.modules[key] = net_module + if len(keys_failed_to_match) > 0: + shared.log.warning(f"LoRA file={network_on_disk.filename} unmatched={len(keys_failed_to_match)} matched={len(matched_networks)}") + if debug: + shared.log.debug(f"LoRA file={network_on_disk.filename} unmatched={keys_failed_to_match}") + elif debug: + shared.log.debug(f"LoRA file={network_on_disk.filename} unmatched={len(keys_failed_to_match)} matched={len(matched_networks)}") + lora_cache[name] = net + t1 = time.time() + timer['load'] += t1 - t0 + return net + + +def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None): + networks_on_disk = [available_network_aliases.get(name, None) for name in names] + if any(x is None for x in networks_on_disk): + list_available_networks() + networks_on_disk = [available_network_aliases.get(name, None) for name in names] + failed_to_load_networks = [] + + recompile_model = False + if ((shared.opts.cuda_compile and shared.opts.cuda_compile_backend == "openvino_fx") or + shared.opts.nncf_compress_weights or shared.opts.nncf_compress_text_encoder_weights): + if len(names) == len(shared.compiled_model_state.lora_model): + for i, name in enumerate(names): + if shared.compiled_model_state.lora_model[i] != f"{name}:{te_multipliers[i] if te_multipliers else 1.0}": + recompile_model = True + shared.compiled_model_state.lora_model = [] + break + if not recompile_model: + if len(loaded_networks) > 0 and debug: + shared.log.debug('OpenVINO: Skipping LoRa loading') + return + else: + recompile_model = True + shared.compiled_model_state.lora_model = [] + if recompile_model: + backup_cuda_compile = shared.opts.cuda_compile + backup_nncf_compress_weights = shared.opts.nncf_compress_weights + backup_nncf_compress_text_encoder_weights = shared.opts.nncf_compress_text_encoder_weights + shared.compiled_model_state.lora_compile = True + sd_models.unload_model_weights(op='model') + shared.opts.cuda_compile = False + shared.opts.nncf_compress_weights = False + shared.opts.nncf_compress_text_encoder_weights = False + sd_models.reload_model_weights(op='model') + shared.opts.cuda_compile = backup_cuda_compile + shared.opts.nncf_compress_weights = backup_nncf_compress_weights + shared.opts.nncf_compress_text_encoder_weights = backup_nncf_compress_text_encoder_weights + + loaded_networks.clear() + for i, (network_on_disk, name) in enumerate(zip(networks_on_disk, names)): + net = None + if network_on_disk is not None: + if debug: + shared.log.debug(f'LoRA load start: name="{name}" file="{network_on_disk.filename}"') + try: + if recompile_model: + shared.compiled_model_state.lora_model.append(f"{name}:{te_multipliers[i] if te_multipliers else 1.0}") + if shared.backend == shared.Backend.DIFFUSERS and shared.opts.lora_force_diffusers: # OpenVINO only works with Diffusers LoRa loading. + # or getattr(network_on_disk, 'shorthash', '').lower() == 'aaebf6360f7d' # sd15-lcm + # or getattr(network_on_disk, 'shorthash', '').lower() == '3d18b05e4f56' # sdxl-lcm + # or getattr(network_on_disk, 'shorthash', '').lower() == '813ea5fb1c67' # turbo sdxl-turbo + net = load_diffusers(name, network_on_disk, lora_scale=te_multipliers[i] if te_multipliers else 1.0) + else: + net = load_network(name, network_on_disk) + except Exception as e: + shared.log.error(f"LoRA load failed: file={network_on_disk.filename} {e}") + if debug: + errors.display(e, f"LoRA load failed file={network_on_disk.filename}") + continue + net.mentioned_name = name + network_on_disk.read_hash() + if net is None: + failed_to_load_networks.append(name) + shared.log.error(f"LoRA unknown type: network={name}") + continue + net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0 + net.unet_multiplier = unet_multipliers[i] if unet_multipliers else 1.0 + net.dyn_dim = dyn_dims[i] if dyn_dims else 1.0 + loaded_networks.append(net) + + while len(lora_cache) > shared.opts.lora_in_memory_limit: + name = next(iter(lora_cache)) + lora_cache.pop(name, None) + if len(loaded_networks) > 0 and debug: + shared.log.debug(f'LoRA loaded={len(loaded_networks)} cache={list(lora_cache)}') + devices.torch_gc() + + if recompile_model: + shared.log.info("LoRA recompiling model") + sd_models_compile.compile_diffusers(shared.sd_model) + + +def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention, diffusers.models.lora.LoRACompatibleLinear, diffusers.models.lora.LoRACompatibleConv]): + t0 = time.time() + weights_backup = getattr(self, "network_weights_backup", None) + bias_backup = getattr(self, "network_bias_backup", None) + if weights_backup is None and bias_backup is None: + return + # if debug: + # shared.log.debug('LoRA restore weights') + if weights_backup is not None: + if isinstance(self, torch.nn.MultiheadAttention): + self.in_proj_weight.copy_(weights_backup[0]) + self.out_proj.weight.copy_(weights_backup[1]) + else: + self.weight.copy_(weights_backup) + if bias_backup is not None: + if isinstance(self, torch.nn.MultiheadAttention): + self.out_proj.bias.copy_(bias_backup) + else: + self.bias.copy_(bias_backup) + else: + if isinstance(self, torch.nn.MultiheadAttention): + self.out_proj.bias = None + else: + self.bias = None + t1 = time.time() + timer['restore'] += t1 - t0 + + +def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention, diffusers.models.lora.LoRACompatibleLinear, diffusers.models.lora.LoRACompatibleConv]): + """ + Applies the currently selected set of networks to the weights of torch layer self. + If weights already have this particular set of networks applied, does nothing. + If not, restores orginal weights from backup and alters weights according to networks. + """ + network_layer_name = getattr(self, 'network_layer_name', None) + if network_layer_name is None: + return + t0 = time.time() + current_names = getattr(self, "network_current_names", ()) + wanted_names = tuple((x.name, x.te_multiplier, x.unet_multiplier, x.dyn_dim) for x in loaded_networks) + weights_backup = getattr(self, "network_weights_backup", None) + if weights_backup is None and wanted_names != (): # pylint: disable=C1803 + if current_names != (): + raise RuntimeError("no backup weights found and current weights are not unchanged") + if isinstance(self, torch.nn.MultiheadAttention): + weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True)) + else: + weights_backup = self.weight.to(devices.cpu, copy=True) + self.network_weights_backup = weights_backup + bias_backup = getattr(self, "network_bias_backup", None) + if bias_backup is None: + if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None: + bias_backup = self.out_proj.bias.to(devices.cpu, copy=True) + elif getattr(self, 'bias', None) is not None: + bias_backup = self.bias.to(devices.cpu, copy=True) + else: + bias_backup = None + self.network_bias_backup = bias_backup + + if current_names != wanted_names: + network_restore_weights_from_backup(self) + for net in loaded_networks: + # default workflow where module is known and has weights + module = net.modules.get(network_layer_name, None) + if module is not None and hasattr(self, 'weight'): + try: + with devices.inference_context(): + updown, ex_bias = module.calc_updown(self.weight) + if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: + # inpainting model. zero pad updown to make channel[1] 4 to 9 + updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) # pylint: disable=not-callable + self.weight += updown + if ex_bias is not None and hasattr(self, 'bias'): + if self.bias is None: + self.bias = torch.nn.Parameter(ex_bias) + else: + self.bias += ex_bias + except RuntimeError as e: + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 + if debug: + module_name = net.modules.get(network_layer_name, None) + shared.log.error(f"LoRA apply weight name={net.name} module={module_name} layer={network_layer_name} {e}") + errors.display(e, 'LoRA apply weight') + raise RuntimeError('LoRA apply weight') from e + continue + # alternative workflow looking at _*_proj layers + module_q = net.modules.get(network_layer_name + "_q_proj", None) + module_k = net.modules.get(network_layer_name + "_k_proj", None) + module_v = net.modules.get(network_layer_name + "_v_proj", None) + module_out = net.modules.get(network_layer_name + "_out_proj", None) + if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out: + try: + with devices.inference_context(): + updown_q, _ = module_q.calc_updown(self.in_proj_weight) + updown_k, _ = module_k.calc_updown(self.in_proj_weight) + updown_v, _ = module_v.calc_updown(self.in_proj_weight) + updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) + updown_out, ex_bias = module_out.calc_updown(self.out_proj.weight) + self.in_proj_weight += updown_qkv + self.out_proj.weight += updown_out + if ex_bias is not None: + if self.out_proj.bias is None: + self.out_proj.bias = torch.nn.Parameter(ex_bias) + else: + self.out_proj.bias += ex_bias + except RuntimeError as e: + if debug: + shared.log.debug(f"LoRA network={net.name} layer={network_layer_name} {e}") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 + continue + if module is None: + continue + shared.log.warning(f"LoRA network={net.name} layer={network_layer_name} unsupported operation") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 + self.network_current_names = wanted_names + t1 = time.time() + timer['apply'] += t1 - t0 + + +def network_forward(module, input, original_forward): # pylint: disable=W0622 + """ + Old way of applying Lora by executing operations during layer's forward. + Stacking many loras this way results in big performance degradation. + """ + if len(loaded_networks) == 0: + return original_forward(module, input) + input = devices.cond_cast_unet(input) + network_restore_weights_from_backup(module) + network_reset_cached_weight(module) + y = original_forward(module, input) + network_layer_name = getattr(module, 'network_layer_name', None) + for lora in loaded_networks: + module = lora.modules.get(network_layer_name, None) + if module is None: + continue + y = module.forward(input, y) + return y + + +def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): + self.network_current_names = () + self.network_weights_backup = None + + +def network_Linear_forward(self, input): # pylint: disable=W0622 + if shared.opts.lora_functional: + return network_forward(self, input, originals.Linear_forward) + network_apply_weights(self) + return originals.Linear_forward(self, input) + + +def network_Linear_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + return originals.Linear_load_state_dict(self, *args, **kwargs) + + +def network_Conv2d_forward(self, input): # pylint: disable=W0622 + if shared.opts.lora_functional: + return network_forward(self, input, originals.Conv2d_forward) + network_apply_weights(self) + return originals.Conv2d_forward(self, input) + + +def network_Conv2d_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + return originals.Conv2d_load_state_dict(self, *args, **kwargs) + + +def network_GroupNorm_forward(self, input): # pylint: disable=W0622 + if shared.opts.lora_functional: + return network_forward(self, input, originals.GroupNorm_forward) + network_apply_weights(self) + return originals.GroupNorm_forward(self, input) + + +def network_GroupNorm_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + return originals.GroupNorm_load_state_dict(self, *args, **kwargs) + + +def network_LayerNorm_forward(self, input): # pylint: disable=W0622 + if shared.opts.lora_functional: + return network_forward(self, input, originals.LayerNorm_forward) + network_apply_weights(self) + return originals.LayerNorm_forward(self, input) + + +def network_LayerNorm_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + return originals.LayerNorm_load_state_dict(self, *args, **kwargs) + + +def network_MultiheadAttention_forward(self, *args, **kwargs): + network_apply_weights(self) + return originals.MultiheadAttention_forward(self, *args, **kwargs) + + +def network_MultiheadAttention_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs) + + +def list_available_networks(): + available_networks.clear() + available_network_aliases.clear() + forbidden_network_aliases.clear() + available_network_hash_lookup.clear() + forbidden_network_aliases.update({"none": 1, "Addams": 1}) + os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True) + candidates = [] + if os.path.exists(shared.cmd_opts.lora_dir): + candidates += list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"])) + else: + shared.log.warning('LoRA directory not found: path="{shared.cmd_opts.lora_dir}"') + if os.path.exists(shared.cmd_opts.lyco_dir): + candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"])) + + def add_network(filename): + if os.path.isdir(filename): + return + name = os.path.splitext(os.path.basename(filename))[0] + try: + entry = network.NetworkOnDisk(name, filename) + available_networks[entry.name] = entry + if entry.alias in available_network_aliases: + forbidden_network_aliases[entry.alias.lower()] = 1 + available_network_aliases[entry.name] = entry + available_network_aliases[entry.alias] = entry + if entry.shorthash: + available_network_hash_lookup[entry.shorthash] = entry + except OSError as e: # should catch FileNotFoundError and PermissionError etc. + shared.log.error(f"Failed to load network {name} from {filename} {e}") + + with concurrent.futures.ThreadPoolExecutor(max_workers=shared.max_workers) as executor: + for fn in candidates: + executor.submit(add_network, fn) + + +def infotext_pasted(infotext, params): # pylint: disable=W0613 + if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]: + return # if the other extension is active, it will handle those fields, no need to do anything + added = [] + for k in params: + if not k.startswith("AddNet Model "): + continue + num = k[13:] + if params.get("AddNet Module " + num) != "LoRA": + continue + name = params.get("AddNet Model " + num) + if name is None: + continue + m = re_network_name.match(name) + if m: + name = m.group(1) + multiplier = params.get("AddNet Weight A " + num, "1.0") + added.append(f"") + if added: + params["Prompt"] += "\n" + "".join(added) + + +list_available_networks() diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 8130d2b5f..4bda68638 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -1,62 +1,62 @@ -import re -import networks -import lora # noqa:F401 # pylint: disable=unused-import -from network import NetworkOnDisk -from ui_extra_networks_lora import ExtraNetworksPageLora -from extra_networks_lora import ExtraNetworkLora -from modules import script_callbacks, ui_extra_networks, extra_networks - - -re_lora = re.compile(""), - "local_preview": f"{path}.{shared.opts.samples_format}", - "metadata": json.dumps(l.metadata, indent=4) if l.metadata else None, - "mtime": os.path.getmtime(l.filename), - "size": os.path.getsize(l.filename), - } - info = self.find_info(l.filename) - - tags = {} - possible_tags = l.metadata.get('ss_tag_frequency', {}) if l.metadata is not None else {} # tags from model metedata - if isinstance(possible_tags, str): - possible_tags = {} - for k, v in possible_tags.items(): - words = k.split('_', 1) if '_' in k else [v, k] - words = [str(w).replace('.json', '') for w in words] - if words[0] == '{}': - words[0] = 0 - tag = ' '.join(words[1:]) - tags[tag] = words[0] - versions = info.get('modelVersions', []) # trigger words from info json - for v in versions: - possible_tags = v.get('trainedWords', []) - if isinstance(possible_tags, list): - for tag in possible_tags: - if tag not in tags: - tags[tag] = 0 - search = {} - possible_tags = info.get('tags', []) # tags from info json - if not isinstance(possible_tags, list): - possible_tags = [v for v in possible_tags.values()] - for v in possible_tags: - search[v] = 0 - if len(list(tags)) == 0: - tags = search - - bad_chars = [';', ':', '<', ">", "*", '?', '\'', '\"'] - clean_tags = {} - for k, v in tags.items(): - tag = ''.join(i for i in k if not i in bad_chars) - clean_tags[tag] = v - - item["info"] = info - item["description"] = self.find_description(l.filename, info) # use existing info instead of double-read - item["tags"] = clean_tags - item["search_term"] = f'{self.search_terms_from_path(l.filename)} {" ".join(tags.keys())} {" ".join(search.keys())}' - - return item - except Exception as e: - shared.log.debug(f"Extra networks error: type=lora file={name} {e}") - return None - - def list_items(self): - with concurrent.futures.ThreadPoolExecutor(max_workers=shared.max_workers) as executor: - future_items = {executor.submit(self.create_item, net): net for net in networks.available_networks} - for future in concurrent.futures.as_completed(future_items): - item = future.result() - if item is not None: - yield item - - def allowed_directories_for_previews(self): - return [shared.cmd_opts.lora_dir, shared.cmd_opts.lyco_dir] +import os +import json +import concurrent +import network +import networks +from modules import shared, ui_extra_networks + + +class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): + def __init__(self): + super().__init__('Lora') + self.list_time = 0 + + def refresh(self): + networks.list_available_networks() + + def create_item(self, name): + l = networks.available_networks.get(name) + try: + path, _ext = os.path.splitext(l.filename) + name = os.path.splitext(os.path.relpath(l.filename, shared.cmd_opts.lora_dir))[0] + if shared.backend == shared.Backend.ORIGINAL: + if l.sd_version == network.SdVersion.SDXL: + return None + elif shared.backend == shared.Backend.DIFFUSERS: + if shared.sd_model_type == 'none': # return all when model is not loaded + pass + elif shared.sd_model_type == 'sdxl': + if l.sd_version == network.SdVersion.SD1 or l.sd_version == network.SdVersion.SD2: + return None + elif shared.sd_model_type == 'sd': + if l.sd_version == network.SdVersion.SDXL: + return None + + item = { + "type": 'Lora', + "name": name, + "filename": l.filename, + "hash": l.shorthash, + "preview": self.find_preview(l.filename), + "prompt": json.dumps(f" "), + "local_preview": f"{path}.{shared.opts.samples_format}", + "metadata": json.dumps(l.metadata, indent=4) if l.metadata else None, + "mtime": os.path.getmtime(l.filename), + "size": os.path.getsize(l.filename), + } + info = self.find_info(l.filename) + + tags = {} + possible_tags = l.metadata.get('ss_tag_frequency', {}) if l.metadata is not None else {} # tags from model metedata + if isinstance(possible_tags, str): + possible_tags = {} + for k, v in possible_tags.items(): + words = k.split('_', 1) if '_' in k else [v, k] + words = [str(w).replace('.json', '') for w in words] + if words[0] == '{}': + words[0] = 0 + tag = ' '.join(words[1:]) + tags[tag] = words[0] + versions = info.get('modelVersions', []) # trigger words from info json + for v in versions: + possible_tags = v.get('trainedWords', []) + if isinstance(possible_tags, list): + for tag in possible_tags: + if tag not in tags: + tags[tag] = 0 + search = {} + possible_tags = info.get('tags', []) # tags from info json + if not isinstance(possible_tags, list): + possible_tags = [v for v in possible_tags.values()] + for v in possible_tags: + search[v] = 0 + if len(list(tags)) == 0: + tags = search + + bad_chars = [';', ':', '<', ">", "*", '?', '\'', '\"'] + clean_tags = {} + for k, v in tags.items(): + tag = ''.join(i for i in k if not i in bad_chars) + clean_tags[tag] = v + + item["info"] = info + item["description"] = self.find_description(l.filename, info) # use existing info instead of double-read + item["tags"] = clean_tags + item["search_term"] = f'{self.search_terms_from_path(l.filename)} {" ".join(tags.keys())} {" ".join(search.keys())}' + + return item + except Exception as e: + shared.log.debug(f"Extra networks error: type=lora file={name} {e}") + return None + + def list_items(self): + with concurrent.futures.ThreadPoolExecutor(max_workers=shared.max_workers) as executor: + future_items = {executor.submit(self.create_item, net): net for net in networks.available_networks} + for future in concurrent.futures.as_completed(future_items): + item = future.result() + if item is not None: + yield item + + def allowed_directories_for_previews(self): + return [shared.cmd_opts.lora_dir, shared.cmd_opts.lyco_dir] diff --git a/extensions-builtin/sd-webui-controlnet b/extensions-builtin/sd-webui-controlnet index bb9483d46..f8f43a809 160000 --- a/extensions-builtin/sd-webui-controlnet +++ b/extensions-builtin/sd-webui-controlnet @@ -1 +1 @@ -Subproject commit bb9483d46f5a932fd35e8b4d04a3fdcc02dd9ff1 +Subproject commit f8f43a809fd0ce0ccb36d1abbe56fae3b8e18b60 diff --git a/html/licenses.html b/html/licenses.html index 2ad803052..66b9c0bc6 100644 --- a/html/licenses.html +++ b/html/licenses.html @@ -1,690 +1,690 @@ - - -

CodeFormer

-Parts of CodeFormer code had to be copied to be compatible with GFPGAN. -
-S-Lab License 1.0
-
-Copyright 2022 S-Lab
-
-Redistribution and use for non-commercial purpose in source and
-binary forms, with or without modification, are permitted provided
-that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright
-   notice, this list of conditions and the following disclaimer in
-   the documentation and/or other materials provided with the
-   distribution.
-
-3. Neither the name of the copyright holder nor the names of its
-   contributors may be used to endorse or promote products derived
-   from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-In the event that redistribution and/or use for commercial purpose in
-source or binary forms, with or without modification is required,
-please contact the contributor(s) of the work.
-
- - -

ESRGAN

-Code for architecture and reading models copied. -
-MIT License
-
-Copyright (c) 2021 victorca25
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
- -

Real-ESRGAN

-Some code is copied to support ESRGAN models. -
-BSD 3-Clause License
-
-Copyright (c) 2021, Xintao Wang
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
-3. Neither the name of the copyright holder nor the names of its
-   contributors may be used to endorse or promote products derived from
-   this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- -

InvokeAI

-Some code for compatibility with OSX is taken from lstein's repository. -
-MIT License
-
-Copyright (c) 2022 InvokeAI Team
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
- -

LDSR

-Code added by contirubtors, most likely copied from this repository. -
-MIT License
-
-Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
- -

CLIP Interrogator

-Some small amounts of code borrowed and reworked. -
-MIT License
-
-Copyright (c) 2022 pharmapsychotic
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
- -

SwinIR

-Code added by contributors, most likely copied from this repository. - -
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [2021] [SwinIR Authors]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
- -

Memory Efficient Attention

-The sub-quadratic cross attention optimization uses modified code from the Memory Efficient Attention package that Alex Birch optimized for 3D tensors. This license is updated to reflect that. -
-MIT License
-
-Copyright (c) 2023 Alex Birch
-Copyright (c) 2023 Amin Rezaei
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
- -

Scaled Dot Product Attention

-Some small amounts of code borrowed and reworked. -
-   Copyright 2023 The HuggingFace Team. All rights reserved.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
- -

Curated transformers

-The MPS workaround for nn.Linear on macOS 13.2.X is based on the MPS workaround for nn.Linear created by danieldk for Curated transformers -
-The MIT License (MIT)
-
-Copyright (C) 2021 ExplosionAI GmbH
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
- -

TAESD

-Tiny AutoEncoder for Stable Diffusion option for live previews -
-MIT License
-
-Copyright (c) 2023 Ollin Boer Bohan
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
+ + +

CodeFormer

+Parts of CodeFormer code had to be copied to be compatible with GFPGAN. +
+S-Lab License 1.0
+
+Copyright 2022 S-Lab
+
+Redistribution and use for non-commercial purpose in source and
+binary forms, with or without modification, are permitted provided
+that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in
+   the documentation and/or other materials provided with the
+   distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived
+   from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+In the event that redistribution and/or use for commercial purpose in
+source or binary forms, with or without modification is required,
+please contact the contributor(s) of the work.
+
+ + +

ESRGAN

+Code for architecture and reading models copied. +
+MIT License
+
+Copyright (c) 2021 victorca25
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

Real-ESRGAN

+Some code is copied to support ESRGAN models. +
+BSD 3-Clause License
+
+Copyright (c) 2021, Xintao Wang
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ +

InvokeAI

+Some code for compatibility with OSX is taken from lstein's repository. +
+MIT License
+
+Copyright (c) 2022 InvokeAI Team
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

LDSR

+Code added by contirubtors, most likely copied from this repository. +
+MIT License
+
+Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

CLIP Interrogator

+Some small amounts of code borrowed and reworked. +
+MIT License
+
+Copyright (c) 2022 pharmapsychotic
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

SwinIR

+Code added by contributors, most likely copied from this repository. + +
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [2021] [SwinIR Authors]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+ +

Memory Efficient Attention

+The sub-quadratic cross attention optimization uses modified code from the Memory Efficient Attention package that Alex Birch optimized for 3D tensors. This license is updated to reflect that. +
+MIT License
+
+Copyright (c) 2023 Alex Birch
+Copyright (c) 2023 Amin Rezaei
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

Scaled Dot Product Attention

+Some small amounts of code borrowed and reworked. +
+   Copyright 2023 The HuggingFace Team. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+ +

Curated transformers

+The MPS workaround for nn.Linear on macOS 13.2.X is based on the MPS workaround for nn.Linear created by danieldk for Curated transformers +
+The MIT License (MIT)
+
+Copyright (C) 2021 ExplosionAI GmbH
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+ +

TAESD

+Tiny AutoEncoder for Stable Diffusion option for live previews +
+MIT License
+
+Copyright (c) 2023 Ollin Boer Bohan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/javascript/emerald-paradise.css b/javascript/emerald-paradise.css index db96216f1..1fe9a7424 100644 --- a/javascript/emerald-paradise.css +++ b/javascript/emerald-paradise.css @@ -1,297 +1,297 @@ -/* generic html tags */ -:root, .light, .dark { - --font: 'system-ui', 'ui-sans-serif', 'system-ui', "Roboto", sans-serif; - --font-mono: 'ui-monospace', 'Consolas', monospace; - --font-size: 16px; - --primary-100: #1e2223; /* bg color*/ - --primary-200: #242a2c; /* drop down menu/ prompt window fill*/ - --primary-300: #0a0c0e; /* black */ - --primary-400: #2a302c; /* small buttons*/ - --primary-500: #4b695d; /* main accent color green*/ - --primary-700: #273538; /* extension box fill*/ - --primary-800: #d15e84; /* pink(hover accent)*/ - --highlight-color: var(--primary-500); - --inactive-color: var(--primary--800); - --body-text-color: var(--neutral-100); - --body-text-color-subdued: var(--neutral-300); - --background-color: var(--primary-100); - --background-fill-primary: var(--input-background-fill); - --input-padding: 8px; - --input-background-fill: var(--primary-200); - --input-shadow: none; - --button-secondary-text-color: white; - --button-secondary-background-fill: var(--primary-400); - --button-secondary-background-fill-hover: var(--primary-700); - --block-title-text-color: var(--neutral-300); - --radius-sm: 1px; - --radius-lg: 6px; - --spacing-md: 4px; - --spacing-xxl: 8px; - --line-sm: 1.2em; - --line-md: 1.4em; -} - -html { font-size: var(--font-size); } -body, button, input, select, textarea { font-family: var(--font);} -button { max-width: 400px; } -img { background-color: var(--background-color); } -input[type=range] { height: var(--line-sm); appearance: none; margin-top: 0; min-width: 160px; background-color: var(--background-color); width: 100%; background: transparent; } -input[type=range]::-webkit-slider-runnable-track, input[type=range]::-moz-range-track { width: 100%; height: 6px; cursor: pointer; background: var(--primary-400); border-radius: var(--radius-lg); border: 0px solid #222222; } -input[type=range]::-webkit-slider-thumb, input[type=range]::-moz-range-thumb { border: 0px solid #000000; height: var(--line-sm); width: 8px; border-radius: var(--radius-lg); background: white; cursor: pointer; appearance: none; margin-top: 0px; } -input[type=range]::-moz-range-progress { background-color: var(--primary-500); height: 6px; border-radius: var(--radius-lg); } -::-webkit-scrollbar-track { background: #333333; } -::-webkit-scrollbar-thumb { background-color: var(--highlight-color); border-radius: var(--radius-lg); border-width: 0; box-shadow: 2px 2px 3px #111111; } -div.form { border-width: 0; box-shadow: none; background: transparent; overflow: visible; margin-bottom: 6px; } -div.compact { gap: 1em; } - -/* gradio style classes */ -fieldset .gr-block.gr-box, label.block span { padding: 0; margin-top: -4px; } -.border-2 { border-width: 0; } -.border-b-2 { border-bottom-width: 2px; border-color: var(--highlight-color) !important; padding-bottom: 2px; margin-bottom: 8px; } -.bg-white { color: lightyellow; background-color: var(--inactive-color); } -.gr-box { border-radius: var(--radius-sm) !important; background-color: #111111 !important; box-shadow: 2px 2px 3px #111111; border-width: 0; padding: 4px; margin: 12px 0px 12px 0px } -.gr-button { font-weight: normal; box-shadow: 2px 2px 3px #111111; font-size: 0.8rem; min-width: 32px; min-height: 32px; padding: 3px; margin: 3px; } -.gr-check-radio { background-color: var(--inactive-color); border-width: 0; border-radius: var(--radius-lg); box-shadow: 2px 2px 3px #111111; } -.gr-check-radio:checked { background-color: var(--highlight-color); } -.gr-compact { background-color: var(--background-color); } -.gr-form { border-width: 0; } -.gr-input { background-color: #333333 !important; padding: 4px; margin: 4px; } -.gr-input-label { color: lightyellow; border-width: 0; background: transparent; padding: 2px !important; } -.gr-panel { background-color: var(--background-color); } -.eta-bar { display: none !important } -svg.feather.feather-image, .feather .feather-image { display: none } -.gap-2 { padding-top: 8px; } -.gr-box > div > div > input.gr-text-input { right: 0; width: 4em; padding: 0; top: -12px; border: none; max-height: 20px; } -.output-html { line-height: 1.2rem; overflow-x: hidden; } -.output-html > div { margin-bottom: 8px; } -.overflow-hidden .flex .flex-col .relative col .gap-4 { min-width: var(--left-column); max-width: var(--left-column); } /* this is a problematic one */ -.p-2 { padding: 0; } -.px-4 { padding-lefT: 1rem; padding-right: 1rem; } -.py-6 { padding-bottom: 0; } -.tabs { background-color: var(--background-color); } -.block.token-counter span { background-color: var(--input-background-fill) !important; box-shadow: 2px 2px 2px #111; border: none !important; font-size: 0.8rem; } -.tab-nav { zoom: 110%; margin-top: 10px; margin-bottom: 10px; border-bottom: 2px solid var(--highlight-color) !important; padding-bottom: 2px; } -div.tab-nav button.selected {background-color: var(--button-primary-background-fill);} -#settings div.tab-nav button.selected {background-color: var(--background-color); color: var(--primary-800); font-weight: bold;} -.label-wrap { background-color: #191919; /* extension tab color*/ padding: 16px 8px 8px 8px; border-radius: var(--radius-lg); padding-left: 8px !important; } -.small-accordion .label-wrap { padding: 8px 0px 8px 0px; } -.small-accordion .label-wrap .icon { margin-right: 1em; } -.gradio-button.tool { border: none; box-shadow: none; border-radius: var(--radius-lg);} -button.selected {background: var(--button-primary-background-fill);} -.center.boundedheight.flex {background-color: var(--input-background-fill);} -.compact {border-radius: var(--border-radius-lg);} -#logMonitorData {background-color: var(--input-background-fill);} -#tab_extensions table td, #tab_extensions table th, #tab_config table td, #tab_config table th { border: none; padding: 0.5em; background-color: var(--primary-200); } -#tab_extensions table, #tab_config table { width: 96vw; } -#tab_extensions table input[type=checkbox] {appearance: none; border-radius: 0px;} -#tab_extensions button:hover { background-color: var(--button-secondary-background-fill-hover);} - -/* automatic style classes */ -.progressDiv { border-radius: var(--radius-sm) !important; position: fixed; top: 44px; right: 26px; max-width: 262px; height: 48px; z-index: 99; box-shadow: var(--button-shadow); } -.progressDiv .progress { border-radius: var(--radius-lg) !important; background: var(--highlight-color); line-height: 3rem; height: 48px; } -.gallery-item { box-shadow: none !important; } -.performance { color: #888; } -.extra-networks { border-left: 2px solid var(--highlight-color) !important; padding-left: 4px; } -.image-buttons { gap: 10px !important; justify-content: center; } -.image-buttons > button { max-width: 160px; } -.tooltip { background: var(--primary-800); color: white; border: none; border-radius: var(--radius-lg) } -#system_row > button, #settings_row > button, #config_row > button { max-width: 10em; } - -/* gradio elements overrides */ -#div.gradio-container { overflow-x: hidden; } -#img2img_label_copy_to_img2img { font-weight: normal; } -#txt2img_prompt, #txt2img_neg_prompt, #img2img_prompt, #img2img_neg_prompt { background-color: var(--background-color); box-shadow: 4px 4px 4px 0px #333333 !important; } -#txt2img_prompt > label > textarea, #txt2img_neg_prompt > label > textarea, #img2img_prompt > label > textarea, #img2img_neg_prompt > label > textarea { font-size: 1.1rem; } -#img2img_settings { min-width: calc(2 * var(--left-column)); max-width: calc(2 * var(--left-column)); background-color: #111111; padding-top: 16px; } -#interrogate, #deepbooru { margin: 0 0px 10px 0px; max-width: 80px; max-height: 80px; font-weight: normal; font-size: 0.95em; } -#quicksettings .gr-button-tool { font-size: 1.6rem; box-shadow: none; margin-top: -2px; height: 2.4em; } -#quicksettings button {padding: 0 0.5em 0.1em 0.5em;} -#open_folder_extras, #footer, #style_pos_col, #style_neg_col, #roll_col, #extras_upscaler_2, #extras_upscaler_2_visibility, #txt2img_seed_resize_from_w, #txt2img_seed_resize_from_h { display: none; } -#save-animation { border-radius: var(--radius-sm) !important; margin-bottom: 16px; background-color: #111111; } -#script_list { padding: 4px; margin-top: 16px; margin-bottom: 8px; } -#settings > div.flex-wrap { width: 15em; } -#txt2img_cfg_scale { min-width: 200px; } -#txt2img_checkboxes, #img2img_checkboxes { background-color: transparent; } -#txt2img_checkboxes, #img2img_checkboxes { margin-bottom: 0.2em; } -#txt2img_actions_column, #img2img_actions_column { flex-flow: wrap; justify-content: space-between; } -#txt2img_enqueue_wrapper, #img2img_enqueue_wrapper { min-width: unset; width: 48%; } -#txt2img_generate_box, #img2img_generate_box { min-width: unset; width: 48%; } - -#extras_upscale { margin-top: 10px } -#txt2img_progress_row > div { min-width: var(--left-column); max-width: var(--left-column); } -#txt2img_settings { min-width: var(--left-column); max-width: var(--left-column); background-color: #111111; padding-top: 16px; } -#pnginfo_html2_info { margin-top: -18px; background-color: var(--input-background-fill); padding: var(--input-padding) } -#txt2img_tools, #img2img_tools { margin-top: -4px; margin-bottom: -4px; } -#txt2img_styles_row, #img2img_styles_row { margin-top: -6px; z-index: 200; } - -/* based on gradio built-in dark theme */ -:root, .light, .dark { - --body-background-fill: var(--background-color); - --color-accent-soft: var(--neutral-700); - --background-fill-secondary: none; - --border-color-accent: var(--background-color); - --border-color-primary: var(--background-color); - --link-text-color-active: var(--primary-500); - --link-text-color: var(--secondary-500); - --link-text-color-hover: var(--secondary-400); - --link-text-color-visited: var(--secondary-600); - --shadow-spread: 1px; - --block-background-fill: None; - --block-border-color: var(--border-color-primary); - --block_border_width: None; - --block-info-text-color: var(--body-text-color-subdued); - --block-label-background-fill: var(--background-fill-secondary); - --block-label-border-color: var(--border-color-primary); - --block_label_border_width: None; - --block-label-text-color: var(--neutral-200); - --block_shadow: None; - --block_title_background_fill: None; - --block_title_border_color: None; - --block_title_border_width: None; - --panel-background-fill: var(--background-fill-secondary); - --panel-border-color: var(--border-color-primary); - --panel_border_width: None; - --checkbox-background-color: var(--primary-200); - --checkbox-background-color-focus: var(--primary-700); - --checkbox-background-color-hover: var(--primary-700); - --checkbox-background-color-selected: var(--primary-500); - --checkbox-border-color: transparent; - --checkbox-border-color-focus: var(--primary-800); - --checkbox-border-color-hover: var(--primary-800); - --checkbox-border-color-selected: var(--primary-800); - --checkbox-border-width: var(--input-border-width); - --checkbox-label-background-fill: None; - --checkbox-label-background-fill-hover: None; - --checkbox-label-background-fill-selected: var(--checkbox-label-background-fill); - --checkbox-label-border-color: var(--border-color-primary); - --checkbox-label-border-color-hover: var(--checkbox-label-border-color); - --checkbox-label-border-width: var(--input-border-width); - --checkbox-label-text-color: var(--body-text-color); - --checkbox-label-text-color-selected: var(--checkbox-label-text-color); - --error-background-fill: var(--background-fill-primary); - --error-border-color: var(--border-color-primary); - --error-text-color: #f768b7; /*was ef4444*/ - --input-background-fill-focus: var(--secondary-600); - --input-background-fill-hover: var(--input-background-fill); - --input-border-color: var(--background-color); - --input-border-color-focus: var(--primary-800); - --input-placeholder-color: var(--neutral-500); - --input-shadow-focus: None; - --loader_color: None; - --slider_color: None; - --stat-background-fill: linear-gradient(to right, var(--primary-400), var(--primary-800)); - --table-border-color: var(--neutral-700); - --table-even-background-fill: var(--primary-300); - --table-odd-background-fill: var(--primary-200); - --table-row-focus: var(--color-accent-soft); - --button-border-width: var(--input-border-width); - --button-cancel-background-fill: linear-gradient(to bottom right, #dc2626, #b91c1c); - --button-cancel-background-fill-hover: linear-gradient(to bottom right, #dc2626, #dc2626); - --button-cancel-border-color: #dc2626; - --button-cancel-border-color-hover: var(--button-cancel-border-color); - --button-cancel-text-color: white; - --button-cancel-text-color-hover: var(--button-cancel-text-color); - --button-primary-background-fill: var(--primary-500); - --button-primary-background-fill-hover: var(--primary-800); - --button-primary-border-color: var(--primary-500); - --button-primary-border-color-hover: var(--button-primary-border-color); - --button-primary-text-color: white; - --button-primary-text-color-hover: var(--button-primary-text-color); - --button-secondary-border-color: var(--neutral-600); - --button-secondary-border-color-hover: var(--button-secondary-border-color); - --button-secondary-text-color-hover: var(--button-secondary-text-color); - --secondary-50: #eff6ff; - --secondary-100: #dbeafe; - --secondary-200: #bfdbfe; - --secondary-300: #93c5fd; - --secondary-400: #60a5fa; - --secondary-500: #3b82f6; - --secondary-600: #2563eb; - --secondary-700: #1d4ed8; - --secondary-800: #1e40af; - --secondary-900: #1e3a8a; - --secondary-950: #1d3660; - --neutral-50: #f0f0f0; /* */ - --neutral-100: #e8e8e3;/* majority of text (neutral gray yellow) */ - --neutral-200: #d0d0d0; - --neutral-300: #b3b5ac; /* top tab /sub text (light accent) */ - --neutral-400: #ffba85;/* tab title (bright orange) */ - --neutral-500: #48665b; /* prompt text (desat accent)*/ - --neutral-600: #373f39; /* tab outline color (accent color)*/ - --neutral-700: #2b373b; /* small settings tab accent */ - --neutral-800: #f379c2; /* bright pink accent */ - --neutral-900: #111827; - --neutral-950: #0b0f19; - --radius-xxs: 0; - --radius-xs: 0; - --radius-md: 0; - --radius-xl: 0; - --radius-xxl: 0; - --body-text-size: var(--text-md); - --body-text-weight: 400; - --embed-radius: var(--radius-lg); - --color-accent: var(--primary-500); - --shadow-drop: 0; - --shadow-drop-lg: 0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1); - --shadow-inset: rgba(0,0,0,0.05) 0px 2px 4px 0px inset; - --block-border-width: 1px; - --block-info-text-size: var(--text-sm); - --block-info-text-weight: 400; - --block-label-border-width: 1px; - --block-label-margin: 0; - --block-label-padding: var(--spacing-sm) var(--spacing-lg); - --block-label-radius: calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px) 0; - --block-label-right-radius: 0 calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px); - --block-label-text-size: var(--text-sm); - --block-label-text-weight: 400; - --block-padding: var(--spacing-xl) calc(var(--spacing-xl) + 2px); - --block-radius: var(--radius-lg); - --block-shadow: var(--shadow-drop); - --block-title-background-fill: none; - --block-title-border-color: none; - --block-title-border-width: 0; - --block-title-padding: 0; - --block-title-radius: none; - --block-title-text-size: var(--text-md); - --block-title-text-weight: 400; - --container-radius: var(--radius-lg); - --form-gap-width: 1px; - --layout-gap: var(--spacing-xxl); - --panel-border-width: 0; - --section-header-text-size: var(--text-md); - --section-header-text-weight: 400; - --checkbox-border-radius: var(--radius-sm); - --checkbox-label-gap: 2px; - --checkbox-label-padding: var(--spacing-md); - --checkbox-label-shadow: var(--shadow-drop); - --checkbox-label-text-size: var(--text-md); - --checkbox-label-text-weight: 400; - --checkbox-check: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e"); - --radio-circle: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e"); - --checkbox-shadow: var(--input-shadow); - --error-border-width: 1px; - --input-border-width: 1px; - --input-radius: var(--radius-lg); - --input-text-size: var(--text-md); - --input-text-weight: 400; - --loader-color: var(--color-accent); - --prose-text-size: var(--text-md); - --prose-text-weight: 400; - --prose-header-text-weight: 600; - --slider-color: ; - --table-radius: var(--radius-lg); - --button-large-padding: 2px 6px; - --button-large-radius: var(--radius-lg); - --button-large-text-size: var(--text-lg); - --button-large-text-weight: 400; - --button-shadow: none; - --button-shadow-active: none; - --button-shadow-hover: none; - --button-small-padding: var(--spacing-sm) calc(2 * var(--spacing-sm)); - --button-small-radius: var(--radius-lg); - --button-small-text-size: var(--text-md); - --button-small-text-weight: 400; - --button-transition: none; - --size-9: 64px; - --size-14: 64px; -} +/* generic html tags */ +:root, .light, .dark { + --font: 'system-ui', 'ui-sans-serif', 'system-ui', "Roboto", sans-serif; + --font-mono: 'ui-monospace', 'Consolas', monospace; + --font-size: 16px; + --primary-100: #1e2223; /* bg color*/ + --primary-200: #242a2c; /* drop down menu/ prompt window fill*/ + --primary-300: #0a0c0e; /* black */ + --primary-400: #2a302c; /* small buttons*/ + --primary-500: #4b695d; /* main accent color green*/ + --primary-700: #273538; /* extension box fill*/ + --primary-800: #d15e84; /* pink(hover accent)*/ + --highlight-color: var(--primary-500); + --inactive-color: var(--primary--800); + --body-text-color: var(--neutral-100); + --body-text-color-subdued: var(--neutral-300); + --background-color: var(--primary-100); + --background-fill-primary: var(--input-background-fill); + --input-padding: 8px; + --input-background-fill: var(--primary-200); + --input-shadow: none; + --button-secondary-text-color: white; + --button-secondary-background-fill: var(--primary-400); + --button-secondary-background-fill-hover: var(--primary-700); + --block-title-text-color: var(--neutral-300); + --radius-sm: 1px; + --radius-lg: 6px; + --spacing-md: 4px; + --spacing-xxl: 8px; + --line-sm: 1.2em; + --line-md: 1.4em; +} + +html { font-size: var(--font-size); } +body, button, input, select, textarea { font-family: var(--font);} +button { max-width: 400px; } +img { background-color: var(--background-color); } +input[type=range] { height: var(--line-sm); appearance: none; margin-top: 0; min-width: 160px; background-color: var(--background-color); width: 100%; background: transparent; } +input[type=range]::-webkit-slider-runnable-track, input[type=range]::-moz-range-track { width: 100%; height: 6px; cursor: pointer; background: var(--primary-400); border-radius: var(--radius-lg); border: 0px solid #222222; } +input[type=range]::-webkit-slider-thumb, input[type=range]::-moz-range-thumb { border: 0px solid #000000; height: var(--line-sm); width: 8px; border-radius: var(--radius-lg); background: white; cursor: pointer; appearance: none; margin-top: 0px; } +input[type=range]::-moz-range-progress { background-color: var(--primary-500); height: 6px; border-radius: var(--radius-lg); } +::-webkit-scrollbar-track { background: #333333; } +::-webkit-scrollbar-thumb { background-color: var(--highlight-color); border-radius: var(--radius-lg); border-width: 0; box-shadow: 2px 2px 3px #111111; } +div.form { border-width: 0; box-shadow: none; background: transparent; overflow: visible; margin-bottom: 6px; } +div.compact { gap: 1em; } + +/* gradio style classes */ +fieldset .gr-block.gr-box, label.block span { padding: 0; margin-top: -4px; } +.border-2 { border-width: 0; } +.border-b-2 { border-bottom-width: 2px; border-color: var(--highlight-color) !important; padding-bottom: 2px; margin-bottom: 8px; } +.bg-white { color: lightyellow; background-color: var(--inactive-color); } +.gr-box { border-radius: var(--radius-sm) !important; background-color: #111111 !important; box-shadow: 2px 2px 3px #111111; border-width: 0; padding: 4px; margin: 12px 0px 12px 0px } +.gr-button { font-weight: normal; box-shadow: 2px 2px 3px #111111; font-size: 0.8rem; min-width: 32px; min-height: 32px; padding: 3px; margin: 3px; } +.gr-check-radio { background-color: var(--inactive-color); border-width: 0; border-radius: var(--radius-lg); box-shadow: 2px 2px 3px #111111; } +.gr-check-radio:checked { background-color: var(--highlight-color); } +.gr-compact { background-color: var(--background-color); } +.gr-form { border-width: 0; } +.gr-input { background-color: #333333 !important; padding: 4px; margin: 4px; } +.gr-input-label { color: lightyellow; border-width: 0; background: transparent; padding: 2px !important; } +.gr-panel { background-color: var(--background-color); } +.eta-bar { display: none !important } +svg.feather.feather-image, .feather .feather-image { display: none } +.gap-2 { padding-top: 8px; } +.gr-box > div > div > input.gr-text-input { right: 0; width: 4em; padding: 0; top: -12px; border: none; max-height: 20px; } +.output-html { line-height: 1.2rem; overflow-x: hidden; } +.output-html > div { margin-bottom: 8px; } +.overflow-hidden .flex .flex-col .relative col .gap-4 { min-width: var(--left-column); max-width: var(--left-column); } /* this is a problematic one */ +.p-2 { padding: 0; } +.px-4 { padding-lefT: 1rem; padding-right: 1rem; } +.py-6 { padding-bottom: 0; } +.tabs { background-color: var(--background-color); } +.block.token-counter span { background-color: var(--input-background-fill) !important; box-shadow: 2px 2px 2px #111; border: none !important; font-size: 0.8rem; } +.tab-nav { zoom: 110%; margin-top: 10px; margin-bottom: 10px; border-bottom: 2px solid var(--highlight-color) !important; padding-bottom: 2px; } +div.tab-nav button.selected {background-color: var(--button-primary-background-fill);} +#settings div.tab-nav button.selected {background-color: var(--background-color); color: var(--primary-800); font-weight: bold;} +.label-wrap { background-color: #191919; /* extension tab color*/ padding: 16px 8px 8px 8px; border-radius: var(--radius-lg); padding-left: 8px !important; } +.small-accordion .label-wrap { padding: 8px 0px 8px 0px; } +.small-accordion .label-wrap .icon { margin-right: 1em; } +.gradio-button.tool { border: none; box-shadow: none; border-radius: var(--radius-lg);} +button.selected {background: var(--button-primary-background-fill);} +.center.boundedheight.flex {background-color: var(--input-background-fill);} +.compact {border-radius: var(--border-radius-lg);} +#logMonitorData {background-color: var(--input-background-fill);} +#tab_extensions table td, #tab_extensions table th, #tab_config table td, #tab_config table th { border: none; padding: 0.5em; background-color: var(--primary-200); } +#tab_extensions table, #tab_config table { width: 96vw; } +#tab_extensions table input[type=checkbox] {appearance: none; border-radius: 0px;} +#tab_extensions button:hover { background-color: var(--button-secondary-background-fill-hover);} + +/* automatic style classes */ +.progressDiv { border-radius: var(--radius-sm) !important; position: fixed; top: 44px; right: 26px; max-width: 262px; height: 48px; z-index: 99; box-shadow: var(--button-shadow); } +.progressDiv .progress { border-radius: var(--radius-lg) !important; background: var(--highlight-color); line-height: 3rem; height: 48px; } +.gallery-item { box-shadow: none !important; } +.performance { color: #888; } +.extra-networks { border-left: 2px solid var(--highlight-color) !important; padding-left: 4px; } +.image-buttons { gap: 10px !important; justify-content: center; } +.image-buttons > button { max-width: 160px; } +.tooltip { background: var(--primary-800); color: white; border: none; border-radius: var(--radius-lg) } +#system_row > button, #settings_row > button, #config_row > button { max-width: 10em; } + +/* gradio elements overrides */ +#div.gradio-container { overflow-x: hidden; } +#img2img_label_copy_to_img2img { font-weight: normal; } +#txt2img_prompt, #txt2img_neg_prompt, #img2img_prompt, #img2img_neg_prompt { background-color: var(--background-color); box-shadow: 4px 4px 4px 0px #333333 !important; } +#txt2img_prompt > label > textarea, #txt2img_neg_prompt > label > textarea, #img2img_prompt > label > textarea, #img2img_neg_prompt > label > textarea { font-size: 1.1rem; } +#img2img_settings { min-width: calc(2 * var(--left-column)); max-width: calc(2 * var(--left-column)); background-color: #111111; padding-top: 16px; } +#interrogate, #deepbooru { margin: 0 0px 10px 0px; max-width: 80px; max-height: 80px; font-weight: normal; font-size: 0.95em; } +#quicksettings .gr-button-tool { font-size: 1.6rem; box-shadow: none; margin-top: -2px; height: 2.4em; } +#quicksettings button {padding: 0 0.5em 0.1em 0.5em;} +#open_folder_extras, #footer, #style_pos_col, #style_neg_col, #roll_col, #extras_upscaler_2, #extras_upscaler_2_visibility, #txt2img_seed_resize_from_w, #txt2img_seed_resize_from_h { display: none; } +#save-animation { border-radius: var(--radius-sm) !important; margin-bottom: 16px; background-color: #111111; } +#script_list { padding: 4px; margin-top: 16px; margin-bottom: 8px; } +#settings > div.flex-wrap { width: 15em; } +#txt2img_cfg_scale { min-width: 200px; } +#txt2img_checkboxes, #img2img_checkboxes { background-color: transparent; } +#txt2img_checkboxes, #img2img_checkboxes { margin-bottom: 0.2em; } +#txt2img_actions_column, #img2img_actions_column { flex-flow: wrap; justify-content: space-between; } +#txt2img_enqueue_wrapper, #img2img_enqueue_wrapper { min-width: unset; width: 48%; } +#txt2img_generate_box, #img2img_generate_box { min-width: unset; width: 48%; } + +#extras_upscale { margin-top: 10px } +#txt2img_progress_row > div { min-width: var(--left-column); max-width: var(--left-column); } +#txt2img_settings { min-width: var(--left-column); max-width: var(--left-column); background-color: #111111; padding-top: 16px; } +#pnginfo_html2_info { margin-top: -18px; background-color: var(--input-background-fill); padding: var(--input-padding) } +#txt2img_tools, #img2img_tools { margin-top: -4px; margin-bottom: -4px; } +#txt2img_styles_row, #img2img_styles_row { margin-top: -6px; z-index: 200; } + +/* based on gradio built-in dark theme */ +:root, .light, .dark { + --body-background-fill: var(--background-color); + --color-accent-soft: var(--neutral-700); + --background-fill-secondary: none; + --border-color-accent: var(--background-color); + --border-color-primary: var(--background-color); + --link-text-color-active: var(--primary-500); + --link-text-color: var(--secondary-500); + --link-text-color-hover: var(--secondary-400); + --link-text-color-visited: var(--secondary-600); + --shadow-spread: 1px; + --block-background-fill: None; + --block-border-color: var(--border-color-primary); + --block_border_width: None; + --block-info-text-color: var(--body-text-color-subdued); + --block-label-background-fill: var(--background-fill-secondary); + --block-label-border-color: var(--border-color-primary); + --block_label_border_width: None; + --block-label-text-color: var(--neutral-200); + --block_shadow: None; + --block_title_background_fill: None; + --block_title_border_color: None; + --block_title_border_width: None; + --panel-background-fill: var(--background-fill-secondary); + --panel-border-color: var(--border-color-primary); + --panel_border_width: None; + --checkbox-background-color: var(--primary-200); + --checkbox-background-color-focus: var(--primary-700); + --checkbox-background-color-hover: var(--primary-700); + --checkbox-background-color-selected: var(--primary-500); + --checkbox-border-color: transparent; + --checkbox-border-color-focus: var(--primary-800); + --checkbox-border-color-hover: var(--primary-800); + --checkbox-border-color-selected: var(--primary-800); + --checkbox-border-width: var(--input-border-width); + --checkbox-label-background-fill: None; + --checkbox-label-background-fill-hover: None; + --checkbox-label-background-fill-selected: var(--checkbox-label-background-fill); + --checkbox-label-border-color: var(--border-color-primary); + --checkbox-label-border-color-hover: var(--checkbox-label-border-color); + --checkbox-label-border-width: var(--input-border-width); + --checkbox-label-text-color: var(--body-text-color); + --checkbox-label-text-color-selected: var(--checkbox-label-text-color); + --error-background-fill: var(--background-fill-primary); + --error-border-color: var(--border-color-primary); + --error-text-color: #f768b7; /*was ef4444*/ + --input-background-fill-focus: var(--secondary-600); + --input-background-fill-hover: var(--input-background-fill); + --input-border-color: var(--background-color); + --input-border-color-focus: var(--primary-800); + --input-placeholder-color: var(--neutral-500); + --input-shadow-focus: None; + --loader_color: None; + --slider_color: None; + --stat-background-fill: linear-gradient(to right, var(--primary-400), var(--primary-800)); + --table-border-color: var(--neutral-700); + --table-even-background-fill: var(--primary-300); + --table-odd-background-fill: var(--primary-200); + --table-row-focus: var(--color-accent-soft); + --button-border-width: var(--input-border-width); + --button-cancel-background-fill: linear-gradient(to bottom right, #dc2626, #b91c1c); + --button-cancel-background-fill-hover: linear-gradient(to bottom right, #dc2626, #dc2626); + --button-cancel-border-color: #dc2626; + --button-cancel-border-color-hover: var(--button-cancel-border-color); + --button-cancel-text-color: white; + --button-cancel-text-color-hover: var(--button-cancel-text-color); + --button-primary-background-fill: var(--primary-500); + --button-primary-background-fill-hover: var(--primary-800); + --button-primary-border-color: var(--primary-500); + --button-primary-border-color-hover: var(--button-primary-border-color); + --button-primary-text-color: white; + --button-primary-text-color-hover: var(--button-primary-text-color); + --button-secondary-border-color: var(--neutral-600); + --button-secondary-border-color-hover: var(--button-secondary-border-color); + --button-secondary-text-color-hover: var(--button-secondary-text-color); + --secondary-50: #eff6ff; + --secondary-100: #dbeafe; + --secondary-200: #bfdbfe; + --secondary-300: #93c5fd; + --secondary-400: #60a5fa; + --secondary-500: #3b82f6; + --secondary-600: #2563eb; + --secondary-700: #1d4ed8; + --secondary-800: #1e40af; + --secondary-900: #1e3a8a; + --secondary-950: #1d3660; + --neutral-50: #f0f0f0; /* */ + --neutral-100: #e8e8e3;/* majority of text (neutral gray yellow) */ + --neutral-200: #d0d0d0; + --neutral-300: #b3b5ac; /* top tab /sub text (light accent) */ + --neutral-400: #ffba85;/* tab title (bright orange) */ + --neutral-500: #48665b; /* prompt text (desat accent)*/ + --neutral-600: #373f39; /* tab outline color (accent color)*/ + --neutral-700: #2b373b; /* small settings tab accent */ + --neutral-800: #f379c2; /* bright pink accent */ + --neutral-900: #111827; + --neutral-950: #0b0f19; + --radius-xxs: 0; + --radius-xs: 0; + --radius-md: 0; + --radius-xl: 0; + --radius-xxl: 0; + --body-text-size: var(--text-md); + --body-text-weight: 400; + --embed-radius: var(--radius-lg); + --color-accent: var(--primary-500); + --shadow-drop: 0; + --shadow-drop-lg: 0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1); + --shadow-inset: rgba(0,0,0,0.05) 0px 2px 4px 0px inset; + --block-border-width: 1px; + --block-info-text-size: var(--text-sm); + --block-info-text-weight: 400; + --block-label-border-width: 1px; + --block-label-margin: 0; + --block-label-padding: var(--spacing-sm) var(--spacing-lg); + --block-label-radius: calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px) 0; + --block-label-right-radius: 0 calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px); + --block-label-text-size: var(--text-sm); + --block-label-text-weight: 400; + --block-padding: var(--spacing-xl) calc(var(--spacing-xl) + 2px); + --block-radius: var(--radius-lg); + --block-shadow: var(--shadow-drop); + --block-title-background-fill: none; + --block-title-border-color: none; + --block-title-border-width: 0; + --block-title-padding: 0; + --block-title-radius: none; + --block-title-text-size: var(--text-md); + --block-title-text-weight: 400; + --container-radius: var(--radius-lg); + --form-gap-width: 1px; + --layout-gap: var(--spacing-xxl); + --panel-border-width: 0; + --section-header-text-size: var(--text-md); + --section-header-text-weight: 400; + --checkbox-border-radius: var(--radius-sm); + --checkbox-label-gap: 2px; + --checkbox-label-padding: var(--spacing-md); + --checkbox-label-shadow: var(--shadow-drop); + --checkbox-label-text-size: var(--text-md); + --checkbox-label-text-weight: 400; + --checkbox-check: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e"); + --radio-circle: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e"); + --checkbox-shadow: var(--input-shadow); + --error-border-width: 1px; + --input-border-width: 1px; + --input-radius: var(--radius-lg); + --input-text-size: var(--text-md); + --input-text-weight: 400; + --loader-color: var(--color-accent); + --prose-text-size: var(--text-md); + --prose-text-weight: 400; + --prose-header-text-weight: 600; + --slider-color: ; + --table-radius: var(--radius-lg); + --button-large-padding: 2px 6px; + --button-large-radius: var(--radius-lg); + --button-large-text-size: var(--text-lg); + --button-large-text-weight: 400; + --button-shadow: none; + --button-shadow-active: none; + --button-shadow-hover: none; + --button-small-padding: var(--spacing-sm) calc(2 * var(--spacing-sm)); + --button-small-radius: var(--radius-lg); + --button-small-text-size: var(--text-md); + --button-small-text-weight: 400; + --button-transition: none; + --size-9: 64px; + --size-14: 64px; +} diff --git a/javascript/orchid-dreams.css b/javascript/orchid-dreams.css index ef40d8e3a..05ff97a3b 100644 --- a/javascript/orchid-dreams.css +++ b/javascript/orchid-dreams.css @@ -1,297 +1,297 @@ -/* generic html tags */ -:root, .light, .dark { - --font: 'system-ui', 'ui-sans-serif', 'system-ui', "Roboto", sans-serif; - --font-mono: 'ui-monospace', 'Consolas', monospace; - --font-size: 16px; - --primary-100: #2a2a34; /* bg color*/ - --primary-200: #1f2028; /* drop down menu/ prompt*/ - --primary-300: #0a0c0e; /* black */ - --primary-400: #40435c; /* small buttons*/ - --primary-500: #4c48b5; /* main accent color purple*/ - --primary-700: #1f2028; /* darker hover accent*/ - --primary-800: #e95ee3; /* pink accent*/ - --highlight-color: var(--primary-500); - --inactive-color: var(--primary--800); - --body-text-color: var(--neutral-100); - --body-text-color-subdued: var(--neutral-300); - --background-color: var(--primary-100); - --background-fill-primary: var(--input-background-fill); - --input-padding: 8px; - --input-background-fill: var(--primary-200); - --input-shadow: none; - --button-secondary-text-color: white; - --button-secondary-background-fill: var(--primary-400); - --button-secondary-background-fill-hover: var(--primary-700); - --block-title-text-color: var(--neutral-300); - --radius-sm: 1px; - --radius-lg: 6px; - --spacing-md: 4px; - --spacing-xxl: 8px; - --line-sm: 1.2em; - --line-md: 1.4em; -} - -html { font-size: var(--font-size); } -body, button, input, select, textarea { font-family: var(--font);} -button { max-width: 400px; } -img { background-color: var(--background-color); } -input[type=range] { height: var(--line-sm); appearance: none; margin-top: 0; min-width: 160px; background-color: var(--background-color); width: 100%; background: transparent; } -input[type=range]::-webkit-slider-runnable-track, input[type=range]::-moz-range-track { width: 100%; height: 6px; cursor: pointer; background: var(--primary-400); border-radius: var(--radius-lg); border: 0px solid #222222; } -input[type=range]::-webkit-slider-thumb, input[type=range]::-moz-range-thumb { border: 0px solid #000000; height: var(--line-sm); width: 8px; border-radius: var(--radius-lg); background: white; cursor: pointer; appearance: none; margin-top: 0px; } -input[type=range]::-moz-range-progress { background-color: var(--primary-500); height: 6px; border-radius: var(--radius-lg); } -::-webkit-scrollbar-track { background: #333333; } -::-webkit-scrollbar-thumb { background-color: var(--highlight-color); border-radius: var(--radius-lg); border-width: 0; box-shadow: 2px 2px 3px #111111; } -div.form { border-width: 0; box-shadow: none; background: transparent; overflow: visible; margin-bottom: 6px; } -div.compact { gap: 1em; } - -/* gradio style classes */ -fieldset .gr-block.gr-box, label.block span { padding: 0; margin-top: -4px; } -.border-2 { border-width: 0; } -.border-b-2 { border-bottom-width: 2px; border-color: var(--highlight-color) !important; padding-bottom: 2px; margin-bottom: 8px; } -.bg-white { color: lightyellow; background-color: var(--inactive-color); } -.gr-box { border-radius: var(--radius-sm) !important; background-color: #111111 !important; box-shadow: 2px 2px 3px #111111; border-width: 0; padding: 4px; margin: 12px 0px 12px 0px } -.gr-button { font-weight: normal; box-shadow: 2px 2px 3px #111111; font-size: 0.8rem; min-width: 32px; min-height: 32px; padding: 3px; margin: 3px; } -.gr-check-radio { background-color: var(--inactive-color); border-width: 0; border-radius: var(--radius-lg); box-shadow: 2px 2px 3px #111111; } -.gr-check-radio:checked { background-color: var(--highlight-color); } -.gr-compact { background-color: var(--background-color); } -.gr-form { border-width: 0; } -.gr-input { background-color: #333333 !important; padding: 4px; margin: 4px; } -.gr-input-label { color: lightyellow; border-width: 0; background: transparent; padding: 2px !important; } -.gr-panel { background-color: var(--background-color); } -.eta-bar { display: none !important } -svg.feather.feather-image, .feather .feather-image { display: none } -.gap-2 { padding-top: 8px; } -.gr-box > div > div > input.gr-text-input { right: 0; width: 4em; padding: 0; top: -12px; border: none; max-height: 20px; } -.output-html { line-height: 1.2rem; overflow-x: hidden; } -.output-html > div { margin-bottom: 8px; } -.overflow-hidden .flex .flex-col .relative col .gap-4 { min-width: var(--left-column); max-width: var(--left-column); } /* this is a problematic one */ -.p-2 { padding: 0; } -.px-4 { padding-lefT: 1rem; padding-right: 1rem; } -.py-6 { padding-bottom: 0; } -.tabs { background-color: var(--background-color); } -.block.token-counter span { background-color: var(--input-background-fill) !important; box-shadow: 2px 2px 2px #111; border: none !important; font-size: 0.8rem; } -.tab-nav { zoom: 110%; margin-top: 10px; margin-bottom: 10px; border-bottom: 2px solid var(--highlight-color) !important; padding-bottom: 2px; } -div.tab-nav button.selected {background-color: var(--button-primary-background-fill);} -#settings div.tab-nav button.selected {background-color: var(--background-color); color: var(--primary-800); font-weight: bold;} -.label-wrap { background-color: #18181e; /* extension tab color*/ padding: 16px 8px 8px 8px; border-radius: var(--radius-lg); padding-left: 8px !important; } -.small-accordion .label-wrap { padding: 8px 0px 8px 0px; } -.small-accordion .label-wrap .icon { margin-right: 1em; } -.gradio-button.tool { border: none; box-shadow: none; border-radius: var(--radius-lg);} -button.selected {background: var(--button-primary-background-fill);} -.center.boundedheight.flex {background-color: var(--input-background-fill);} -.compact {border-radius: var(--border-radius-lg);} -#logMonitorData {background-color: var(--input-background-fill);} -#tab_extensions table td, #tab_extensions table th, #tab_config table td, #tab_config table th { border: none; padding: 0.5em; background-color: var(--primary-200); } -#tab_extensions table, #tab_config table { width: 96vw; } -#tab_extensions table input[type=checkbox] {appearance: none; border-radius: 0px;} -#tab_extensions button:hover { background-color: var(--button-secondary-background-fill-hover);} - -/* automatic style classes */ -.progressDiv { border-radius: var(--radius-sm) !important; position: fixed; top: 44px; right: 26px; max-width: 262px; height: 48px; z-index: 99; box-shadow: var(--button-shadow); } -.progressDiv .progress { border-radius: var(--radius-lg) !important; background: var(--highlight-color); line-height: 3rem; height: 48px; } -.gallery-item { box-shadow: none !important; } -.performance { color: #888; } -.extra-networks { border-left: 2px solid var(--highlight-color) !important; padding-left: 4px; } -.image-buttons { gap: 10px !important; justify-content: center; } -.image-buttons > button { max-width: 160px; } -.tooltip { background: var(--primary-800); color: white; border: none; border-radius: var(--radius-lg) } -#system_row > button, #settings_row > button, #config_row > button { max-width: 10em; } - -/* gradio elements overrides */ -#div.gradio-container { overflow-x: hidden; } -#img2img_label_copy_to_img2img { font-weight: normal; } -#txt2img_prompt, #txt2img_neg_prompt, #img2img_prompt, #img2img_neg_prompt { background-color: var(--background-color); box-shadow: 4px 4px 4px 0px #333333 !important; } -#txt2img_prompt > label > textarea, #txt2img_neg_prompt > label > textarea, #img2img_prompt > label > textarea, #img2img_neg_prompt > label > textarea { font-size: 1.1rem; } -#img2img_settings { min-width: calc(2 * var(--left-column)); max-width: calc(2 * var(--left-column)); background-color: #111111; padding-top: 16px; } -#interrogate, #deepbooru { margin: 0 0px 10px 0px; max-width: 80px; max-height: 80px; font-weight: normal; font-size: 0.95em; } -#quicksettings .gr-button-tool { font-size: 1.6rem; box-shadow: none; margin-top: -2px; height: 2.4em; } -#quicksettings button {padding: 0 0.5em 0.1em 0.5em;} -#open_folder_extras, #footer, #style_pos_col, #style_neg_col, #roll_col, #extras_upscaler_2, #extras_upscaler_2_visibility, #txt2img_seed_resize_from_w, #txt2img_seed_resize_from_h { display: none; } -#save-animation { border-radius: var(--radius-sm) !important; margin-bottom: 16px; background-color: #111111; } -#script_list { padding: 4px; margin-top: 16px; margin-bottom: 8px; } -#settings > div.flex-wrap { width: 15em; } -#txt2img_cfg_scale { min-width: 200px; } -#txt2img_checkboxes, #img2img_checkboxes { background-color: transparent; } -#txt2img_checkboxes, #img2img_checkboxes { margin-bottom: 0.2em; } -#txt2img_actions_column, #img2img_actions_column { flex-flow: wrap; justify-content: space-between; } -#txt2img_enqueue_wrapper, #img2img_enqueue_wrapper { min-width: unset; width: 48%; } -#txt2img_generate_box, #img2img_generate_box { min-width: unset; width: 48%; } - -#extras_upscale { margin-top: 10px } -#txt2img_progress_row > div { min-width: var(--left-column); max-width: var(--left-column); } -#txt2img_settings { min-width: var(--left-column); max-width: var(--left-column); background-color: #111111; padding-top: 16px; } -#pnginfo_html2_info { margin-top: -18px; background-color: var(--input-background-fill); padding: var(--input-padding) } -#txt2img_tools, #img2img_tools { margin-top: -4px; margin-bottom: -4px; } -#txt2img_styles_row, #img2img_styles_row { margin-top: -6px; z-index: 200; } - -/* based on gradio built-in dark theme */ -:root, .light, .dark { - --body-background-fill: var(--background-color); - --color-accent-soft: var(--neutral-700); - --background-fill-secondary: none; - --border-color-accent: var(--background-color); - --border-color-primary: var(--background-color); - --link-text-color-active: var(--primary-500); - --link-text-color: var(--secondary-500); - --link-text-color-hover: var(--secondary-400); - --link-text-color-visited: var(--secondary-600); - --shadow-spread: 1px; - --block-background-fill: None; - --block-border-color: var(--border-color-primary); - --block_border_width: None; - --block-info-text-color: var(--body-text-color-subdued); - --block-label-background-fill: var(--background-fill-secondary); - --block-label-border-color: var(--border-color-primary); - --block_label_border_width: None; - --block-label-text-color: var(--neutral-200); - --block_shadow: None; - --block_title_background_fill: None; - --block_title_border_color: None; - --block_title_border_width: None; - --panel-background-fill: var(--background-fill-secondary); - --panel-border-color: var(--border-color-primary); - --panel_border_width: None; - --checkbox-background-color: var(--primary-200); - --checkbox-background-color-focus: var(--primary-400); - --checkbox-background-color-hover: var(--primary-200); - --checkbox-background-color-selected: var(--primary-400); - --checkbox-border-color: transparent; - --checkbox-border-color-focus: var(--primary-800); - --checkbox-border-color-hover: var(--primary-800); - --checkbox-border-color-selected: var(--primary-800); - --checkbox-border-width: var(--input-border-width); - --checkbox-label-background-fill: None; - --checkbox-label-background-fill-hover: None; - --checkbox-label-background-fill-selected: var(--checkbox-label-background-fill); - --checkbox-label-border-color: var(--border-color-primary); - --checkbox-label-border-color-hover: var(--checkbox-label-border-color); - --checkbox-label-border-width: var(--input-border-width); - --checkbox-label-text-color: var(--body-text-color); - --checkbox-label-text-color-selected: var(--checkbox-label-text-color); - --error-background-fill: var(--background-fill-primary); - --error-border-color: var(--border-color-primary); - --error-text-color: #f768b7; /*was ef4444*/ - --input-background-fill-focus: var(--secondary-600); - --input-background-fill-hover: var(--input-background-fill); - --input-border-color: var(--background-color); - --input-border-color-focus: var(--primary-800); - --input-placeholder-color: var(--neutral-500); - --input-shadow-focus: None; - --loader_color: None; - --slider_color: None; - --stat-background-fill: linear-gradient(to right, var(--primary-400), var(--primary-800)); - --table-border-color: var(--neutral-700); - --table-even-background-fill: var(--primary-300); - --table-odd-background-fill: var(--primary-200); - --table-row-focus: var(--color-accent-soft); - --button-border-width: var(--input-border-width); - --button-cancel-background-fill: linear-gradient(to bottom right, #dc2626, #b91c1c); - --button-cancel-background-fill-hover: linear-gradient(to bottom right, #dc2626, #dc2626); - --button-cancel-border-color: #dc2626; - --button-cancel-border-color-hover: var(--button-cancel-border-color); - --button-cancel-text-color: white; - --button-cancel-text-color-hover: var(--button-cancel-text-color); - --button-primary-background-fill: var(--primary-500); - --button-primary-background-fill-hover: var(--primary-800); - --button-primary-border-color: var(--primary-500); - --button-primary-border-color-hover: var(--button-primary-border-color); - --button-primary-text-color: white; - --button-primary-text-color-hover: var(--button-primary-text-color); - --button-secondary-border-color: var(--neutral-600); - --button-secondary-border-color-hover: var(--button-secondary-border-color); - --button-secondary-text-color-hover: var(--button-secondary-text-color); - --secondary-50: #eff6ff; - --secondary-100: #dbeafe; - --secondary-200: #bfdbfe; - --secondary-300: #93c5fd; - --secondary-400: #60a5fa; - --secondary-500: #3b82f6; - --secondary-600: #2563eb; - --secondary-700: #1d4ed8; - --secondary-800: #1e40af; - --secondary-900: #1e3a8a; - --secondary-950: #1d3660; - --neutral-50: #f0f0f0; /* */ - --neutral-100: #ddd5e8;/* majority of text (neutral gray purple) */ - --neutral-200: #d0d0d0; - --neutral-300: #bfbad6; /* top tab text (light accent) */ - --neutral-400: #ffba85;/* tab title (bright orange) */ - --neutral-500: #545b94; /* prompt text (desat accent)*/ - --neutral-600: #1f2028; /* tab outline color (accent color)*/ - --neutral-700: #20212c; /* unchanged settings tab accent (dark)*/ - --neutral-800: #e055dc; /* bright pink accent */ - --neutral-900: #111827; - --neutral-950: #0b0f19; - --radius-xxs: 0; - --radius-xs: 0; - --radius-md: 0; - --radius-xl: 0; - --radius-xxl: 0; - --body-text-size: var(--text-md); - --body-text-weight: 400; - --embed-radius: var(--radius-lg); - --color-accent: var(--primary-500); - --shadow-drop: 0; - --shadow-drop-lg: 0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1); - --shadow-inset: rgba(0,0,0,0.05) 0px 2px 4px 0px inset; - --block-border-width: 1px; - --block-info-text-size: var(--text-sm); - --block-info-text-weight: 400; - --block-label-border-width: 1px; - --block-label-margin: 0; - --block-label-padding: var(--spacing-sm) var(--spacing-lg); - --block-label-radius: calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px) 0; - --block-label-right-radius: 0 calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px); - --block-label-text-size: var(--text-sm); - --block-label-text-weight: 400; - --block-padding: var(--spacing-xl) calc(var(--spacing-xl) + 2px); - --block-radius: var(--radius-lg); - --block-shadow: var(--shadow-drop); - --block-title-background-fill: none; - --block-title-border-color: none; - --block-title-border-width: 0; - --block-title-padding: 0; - --block-title-radius: none; - --block-title-text-size: var(--text-md); - --block-title-text-weight: 400; - --container-radius: var(--radius-lg); - --form-gap-width: 1px; - --layout-gap: var(--spacing-xxl); - --panel-border-width: 0; - --section-header-text-size: var(--text-md); - --section-header-text-weight: 400; - --checkbox-border-radius: var(--radius-sm); - --checkbox-label-gap: 2px; - --checkbox-label-padding: var(--spacing-md); - --checkbox-label-shadow: var(--shadow-drop); - --checkbox-label-text-size: var(--text-md); - --checkbox-label-text-weight: 400; - --checkbox-check: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e"); - --radio-circle: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e"); - --checkbox-shadow: var(--input-shadow); - --error-border-width: 1px; - --input-border-width: 1px; - --input-radius: var(--radius-lg); - --input-text-size: var(--text-md); - --input-text-weight: 400; - --loader-color: var(--color-accent); - --prose-text-size: var(--text-md); - --prose-text-weight: 400; - --prose-header-text-weight: 600; - --slider-color: ; - --table-radius: var(--radius-lg); - --button-large-padding: 2px 6px; - --button-large-radius: var(--radius-lg); - --button-large-text-size: var(--text-lg); - --button-large-text-weight: 400; - --button-shadow: none; - --button-shadow-active: none; - --button-shadow-hover: none; - --button-small-padding: var(--spacing-sm) calc(2 * var(--spacing-sm)); - --button-small-radius: var(--radius-lg); - --button-small-text-size: var(--text-md); - --button-small-text-weight: 400; - --button-transition: none; - --size-9: 64px; - --size-14: 64px; -} +/* generic html tags */ +:root, .light, .dark { + --font: 'system-ui', 'ui-sans-serif', 'system-ui', "Roboto", sans-serif; + --font-mono: 'ui-monospace', 'Consolas', monospace; + --font-size: 16px; + --primary-100: #2a2a34; /* bg color*/ + --primary-200: #1f2028; /* drop down menu/ prompt*/ + --primary-300: #0a0c0e; /* black */ + --primary-400: #40435c; /* small buttons*/ + --primary-500: #4c48b5; /* main accent color purple*/ + --primary-700: #1f2028; /* darker hover accent*/ + --primary-800: #e95ee3; /* pink accent*/ + --highlight-color: var(--primary-500); + --inactive-color: var(--primary--800); + --body-text-color: var(--neutral-100); + --body-text-color-subdued: var(--neutral-300); + --background-color: var(--primary-100); + --background-fill-primary: var(--input-background-fill); + --input-padding: 8px; + --input-background-fill: var(--primary-200); + --input-shadow: none; + --button-secondary-text-color: white; + --button-secondary-background-fill: var(--primary-400); + --button-secondary-background-fill-hover: var(--primary-700); + --block-title-text-color: var(--neutral-300); + --radius-sm: 1px; + --radius-lg: 6px; + --spacing-md: 4px; + --spacing-xxl: 8px; + --line-sm: 1.2em; + --line-md: 1.4em; +} + +html { font-size: var(--font-size); } +body, button, input, select, textarea { font-family: var(--font);} +button { max-width: 400px; } +img { background-color: var(--background-color); } +input[type=range] { height: var(--line-sm); appearance: none; margin-top: 0; min-width: 160px; background-color: var(--background-color); width: 100%; background: transparent; } +input[type=range]::-webkit-slider-runnable-track, input[type=range]::-moz-range-track { width: 100%; height: 6px; cursor: pointer; background: var(--primary-400); border-radius: var(--radius-lg); border: 0px solid #222222; } +input[type=range]::-webkit-slider-thumb, input[type=range]::-moz-range-thumb { border: 0px solid #000000; height: var(--line-sm); width: 8px; border-radius: var(--radius-lg); background: white; cursor: pointer; appearance: none; margin-top: 0px; } +input[type=range]::-moz-range-progress { background-color: var(--primary-500); height: 6px; border-radius: var(--radius-lg); } +::-webkit-scrollbar-track { background: #333333; } +::-webkit-scrollbar-thumb { background-color: var(--highlight-color); border-radius: var(--radius-lg); border-width: 0; box-shadow: 2px 2px 3px #111111; } +div.form { border-width: 0; box-shadow: none; background: transparent; overflow: visible; margin-bottom: 6px; } +div.compact { gap: 1em; } + +/* gradio style classes */ +fieldset .gr-block.gr-box, label.block span { padding: 0; margin-top: -4px; } +.border-2 { border-width: 0; } +.border-b-2 { border-bottom-width: 2px; border-color: var(--highlight-color) !important; padding-bottom: 2px; margin-bottom: 8px; } +.bg-white { color: lightyellow; background-color: var(--inactive-color); } +.gr-box { border-radius: var(--radius-sm) !important; background-color: #111111 !important; box-shadow: 2px 2px 3px #111111; border-width: 0; padding: 4px; margin: 12px 0px 12px 0px } +.gr-button { font-weight: normal; box-shadow: 2px 2px 3px #111111; font-size: 0.8rem; min-width: 32px; min-height: 32px; padding: 3px; margin: 3px; } +.gr-check-radio { background-color: var(--inactive-color); border-width: 0; border-radius: var(--radius-lg); box-shadow: 2px 2px 3px #111111; } +.gr-check-radio:checked { background-color: var(--highlight-color); } +.gr-compact { background-color: var(--background-color); } +.gr-form { border-width: 0; } +.gr-input { background-color: #333333 !important; padding: 4px; margin: 4px; } +.gr-input-label { color: lightyellow; border-width: 0; background: transparent; padding: 2px !important; } +.gr-panel { background-color: var(--background-color); } +.eta-bar { display: none !important } +svg.feather.feather-image, .feather .feather-image { display: none } +.gap-2 { padding-top: 8px; } +.gr-box > div > div > input.gr-text-input { right: 0; width: 4em; padding: 0; top: -12px; border: none; max-height: 20px; } +.output-html { line-height: 1.2rem; overflow-x: hidden; } +.output-html > div { margin-bottom: 8px; } +.overflow-hidden .flex .flex-col .relative col .gap-4 { min-width: var(--left-column); max-width: var(--left-column); } /* this is a problematic one */ +.p-2 { padding: 0; } +.px-4 { padding-lefT: 1rem; padding-right: 1rem; } +.py-6 { padding-bottom: 0; } +.tabs { background-color: var(--background-color); } +.block.token-counter span { background-color: var(--input-background-fill) !important; box-shadow: 2px 2px 2px #111; border: none !important; font-size: 0.8rem; } +.tab-nav { zoom: 110%; margin-top: 10px; margin-bottom: 10px; border-bottom: 2px solid var(--highlight-color) !important; padding-bottom: 2px; } +div.tab-nav button.selected {background-color: var(--button-primary-background-fill);} +#settings div.tab-nav button.selected {background-color: var(--background-color); color: var(--primary-800); font-weight: bold;} +.label-wrap { background-color: #18181e; /* extension tab color*/ padding: 16px 8px 8px 8px; border-radius: var(--radius-lg); padding-left: 8px !important; } +.small-accordion .label-wrap { padding: 8px 0px 8px 0px; } +.small-accordion .label-wrap .icon { margin-right: 1em; } +.gradio-button.tool { border: none; box-shadow: none; border-radius: var(--radius-lg);} +button.selected {background: var(--button-primary-background-fill);} +.center.boundedheight.flex {background-color: var(--input-background-fill);} +.compact {border-radius: var(--border-radius-lg);} +#logMonitorData {background-color: var(--input-background-fill);} +#tab_extensions table td, #tab_extensions table th, #tab_config table td, #tab_config table th { border: none; padding: 0.5em; background-color: var(--primary-200); } +#tab_extensions table, #tab_config table { width: 96vw; } +#tab_extensions table input[type=checkbox] {appearance: none; border-radius: 0px;} +#tab_extensions button:hover { background-color: var(--button-secondary-background-fill-hover);} + +/* automatic style classes */ +.progressDiv { border-radius: var(--radius-sm) !important; position: fixed; top: 44px; right: 26px; max-width: 262px; height: 48px; z-index: 99; box-shadow: var(--button-shadow); } +.progressDiv .progress { border-radius: var(--radius-lg) !important; background: var(--highlight-color); line-height: 3rem; height: 48px; } +.gallery-item { box-shadow: none !important; } +.performance { color: #888; } +.extra-networks { border-left: 2px solid var(--highlight-color) !important; padding-left: 4px; } +.image-buttons { gap: 10px !important; justify-content: center; } +.image-buttons > button { max-width: 160px; } +.tooltip { background: var(--primary-800); color: white; border: none; border-radius: var(--radius-lg) } +#system_row > button, #settings_row > button, #config_row > button { max-width: 10em; } + +/* gradio elements overrides */ +#div.gradio-container { overflow-x: hidden; } +#img2img_label_copy_to_img2img { font-weight: normal; } +#txt2img_prompt, #txt2img_neg_prompt, #img2img_prompt, #img2img_neg_prompt { background-color: var(--background-color); box-shadow: 4px 4px 4px 0px #333333 !important; } +#txt2img_prompt > label > textarea, #txt2img_neg_prompt > label > textarea, #img2img_prompt > label > textarea, #img2img_neg_prompt > label > textarea { font-size: 1.1rem; } +#img2img_settings { min-width: calc(2 * var(--left-column)); max-width: calc(2 * var(--left-column)); background-color: #111111; padding-top: 16px; } +#interrogate, #deepbooru { margin: 0 0px 10px 0px; max-width: 80px; max-height: 80px; font-weight: normal; font-size: 0.95em; } +#quicksettings .gr-button-tool { font-size: 1.6rem; box-shadow: none; margin-top: -2px; height: 2.4em; } +#quicksettings button {padding: 0 0.5em 0.1em 0.5em;} +#open_folder_extras, #footer, #style_pos_col, #style_neg_col, #roll_col, #extras_upscaler_2, #extras_upscaler_2_visibility, #txt2img_seed_resize_from_w, #txt2img_seed_resize_from_h { display: none; } +#save-animation { border-radius: var(--radius-sm) !important; margin-bottom: 16px; background-color: #111111; } +#script_list { padding: 4px; margin-top: 16px; margin-bottom: 8px; } +#settings > div.flex-wrap { width: 15em; } +#txt2img_cfg_scale { min-width: 200px; } +#txt2img_checkboxes, #img2img_checkboxes { background-color: transparent; } +#txt2img_checkboxes, #img2img_checkboxes { margin-bottom: 0.2em; } +#txt2img_actions_column, #img2img_actions_column { flex-flow: wrap; justify-content: space-between; } +#txt2img_enqueue_wrapper, #img2img_enqueue_wrapper { min-width: unset; width: 48%; } +#txt2img_generate_box, #img2img_generate_box { min-width: unset; width: 48%; } + +#extras_upscale { margin-top: 10px } +#txt2img_progress_row > div { min-width: var(--left-column); max-width: var(--left-column); } +#txt2img_settings { min-width: var(--left-column); max-width: var(--left-column); background-color: #111111; padding-top: 16px; } +#pnginfo_html2_info { margin-top: -18px; background-color: var(--input-background-fill); padding: var(--input-padding) } +#txt2img_tools, #img2img_tools { margin-top: -4px; margin-bottom: -4px; } +#txt2img_styles_row, #img2img_styles_row { margin-top: -6px; z-index: 200; } + +/* based on gradio built-in dark theme */ +:root, .light, .dark { + --body-background-fill: var(--background-color); + --color-accent-soft: var(--neutral-700); + --background-fill-secondary: none; + --border-color-accent: var(--background-color); + --border-color-primary: var(--background-color); + --link-text-color-active: var(--primary-500); + --link-text-color: var(--secondary-500); + --link-text-color-hover: var(--secondary-400); + --link-text-color-visited: var(--secondary-600); + --shadow-spread: 1px; + --block-background-fill: None; + --block-border-color: var(--border-color-primary); + --block_border_width: None; + --block-info-text-color: var(--body-text-color-subdued); + --block-label-background-fill: var(--background-fill-secondary); + --block-label-border-color: var(--border-color-primary); + --block_label_border_width: None; + --block-label-text-color: var(--neutral-200); + --block_shadow: None; + --block_title_background_fill: None; + --block_title_border_color: None; + --block_title_border_width: None; + --panel-background-fill: var(--background-fill-secondary); + --panel-border-color: var(--border-color-primary); + --panel_border_width: None; + --checkbox-background-color: var(--primary-200); + --checkbox-background-color-focus: var(--primary-400); + --checkbox-background-color-hover: var(--primary-200); + --checkbox-background-color-selected: var(--primary-400); + --checkbox-border-color: transparent; + --checkbox-border-color-focus: var(--primary-800); + --checkbox-border-color-hover: var(--primary-800); + --checkbox-border-color-selected: var(--primary-800); + --checkbox-border-width: var(--input-border-width); + --checkbox-label-background-fill: None; + --checkbox-label-background-fill-hover: None; + --checkbox-label-background-fill-selected: var(--checkbox-label-background-fill); + --checkbox-label-border-color: var(--border-color-primary); + --checkbox-label-border-color-hover: var(--checkbox-label-border-color); + --checkbox-label-border-width: var(--input-border-width); + --checkbox-label-text-color: var(--body-text-color); + --checkbox-label-text-color-selected: var(--checkbox-label-text-color); + --error-background-fill: var(--background-fill-primary); + --error-border-color: var(--border-color-primary); + --error-text-color: #f768b7; /*was ef4444*/ + --input-background-fill-focus: var(--secondary-600); + --input-background-fill-hover: var(--input-background-fill); + --input-border-color: var(--background-color); + --input-border-color-focus: var(--primary-800); + --input-placeholder-color: var(--neutral-500); + --input-shadow-focus: None; + --loader_color: None; + --slider_color: None; + --stat-background-fill: linear-gradient(to right, var(--primary-400), var(--primary-800)); + --table-border-color: var(--neutral-700); + --table-even-background-fill: var(--primary-300); + --table-odd-background-fill: var(--primary-200); + --table-row-focus: var(--color-accent-soft); + --button-border-width: var(--input-border-width); + --button-cancel-background-fill: linear-gradient(to bottom right, #dc2626, #b91c1c); + --button-cancel-background-fill-hover: linear-gradient(to bottom right, #dc2626, #dc2626); + --button-cancel-border-color: #dc2626; + --button-cancel-border-color-hover: var(--button-cancel-border-color); + --button-cancel-text-color: white; + --button-cancel-text-color-hover: var(--button-cancel-text-color); + --button-primary-background-fill: var(--primary-500); + --button-primary-background-fill-hover: var(--primary-800); + --button-primary-border-color: var(--primary-500); + --button-primary-border-color-hover: var(--button-primary-border-color); + --button-primary-text-color: white; + --button-primary-text-color-hover: var(--button-primary-text-color); + --button-secondary-border-color: var(--neutral-600); + --button-secondary-border-color-hover: var(--button-secondary-border-color); + --button-secondary-text-color-hover: var(--button-secondary-text-color); + --secondary-50: #eff6ff; + --secondary-100: #dbeafe; + --secondary-200: #bfdbfe; + --secondary-300: #93c5fd; + --secondary-400: #60a5fa; + --secondary-500: #3b82f6; + --secondary-600: #2563eb; + --secondary-700: #1d4ed8; + --secondary-800: #1e40af; + --secondary-900: #1e3a8a; + --secondary-950: #1d3660; + --neutral-50: #f0f0f0; /* */ + --neutral-100: #ddd5e8;/* majority of text (neutral gray purple) */ + --neutral-200: #d0d0d0; + --neutral-300: #bfbad6; /* top tab text (light accent) */ + --neutral-400: #ffba85;/* tab title (bright orange) */ + --neutral-500: #545b94; /* prompt text (desat accent)*/ + --neutral-600: #1f2028; /* tab outline color (accent color)*/ + --neutral-700: #20212c; /* unchanged settings tab accent (dark)*/ + --neutral-800: #e055dc; /* bright pink accent */ + --neutral-900: #111827; + --neutral-950: #0b0f19; + --radius-xxs: 0; + --radius-xs: 0; + --radius-md: 0; + --radius-xl: 0; + --radius-xxl: 0; + --body-text-size: var(--text-md); + --body-text-weight: 400; + --embed-radius: var(--radius-lg); + --color-accent: var(--primary-500); + --shadow-drop: 0; + --shadow-drop-lg: 0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1); + --shadow-inset: rgba(0,0,0,0.05) 0px 2px 4px 0px inset; + --block-border-width: 1px; + --block-info-text-size: var(--text-sm); + --block-info-text-weight: 400; + --block-label-border-width: 1px; + --block-label-margin: 0; + --block-label-padding: var(--spacing-sm) var(--spacing-lg); + --block-label-radius: calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px) 0; + --block-label-right-radius: 0 calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px); + --block-label-text-size: var(--text-sm); + --block-label-text-weight: 400; + --block-padding: var(--spacing-xl) calc(var(--spacing-xl) + 2px); + --block-radius: var(--radius-lg); + --block-shadow: var(--shadow-drop); + --block-title-background-fill: none; + --block-title-border-color: none; + --block-title-border-width: 0; + --block-title-padding: 0; + --block-title-radius: none; + --block-title-text-size: var(--text-md); + --block-title-text-weight: 400; + --container-radius: var(--radius-lg); + --form-gap-width: 1px; + --layout-gap: var(--spacing-xxl); + --panel-border-width: 0; + --section-header-text-size: var(--text-md); + --section-header-text-weight: 400; + --checkbox-border-radius: var(--radius-sm); + --checkbox-label-gap: 2px; + --checkbox-label-padding: var(--spacing-md); + --checkbox-label-shadow: var(--shadow-drop); + --checkbox-label-text-size: var(--text-md); + --checkbox-label-text-weight: 400; + --checkbox-check: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e"); + --radio-circle: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e"); + --checkbox-shadow: var(--input-shadow); + --error-border-width: 1px; + --input-border-width: 1px; + --input-radius: var(--radius-lg); + --input-text-size: var(--text-md); + --input-text-weight: 400; + --loader-color: var(--color-accent); + --prose-text-size: var(--text-md); + --prose-text-weight: 400; + --prose-header-text-weight: 600; + --slider-color: ; + --table-radius: var(--radius-lg); + --button-large-padding: 2px 6px; + --button-large-radius: var(--radius-lg); + --button-large-text-size: var(--text-lg); + --button-large-text-weight: 400; + --button-shadow: none; + --button-shadow-active: none; + --button-shadow-hover: none; + --button-small-padding: var(--spacing-sm) calc(2 * var(--spacing-sm)); + --button-small-radius: var(--radius-lg); + --button-small-text-size: var(--text-md); + --button-small-text-weight: 400; + --button-transition: none; + --size-9: 64px; + --size-14: 64px; +} diff --git a/javascript/sdnext.css b/javascript/sdnext.css index f710f742a..2ec0f35ad 100644 --- a/javascript/sdnext.css +++ b/javascript/sdnext.css @@ -1,343 +1,343 @@ -@font-face { font-family: 'NotoSans'; font-display: swap; font-style: normal; font-weight: 100; src: local('NotoSans'), url('notosans-nerdfont-regular.ttf') } -:root { --left-column: 500px; } -a { font-weight: bold; cursor: pointer; } -h2 { margin-top: 1em !important; font-size: var(--text-xxl) !important; } -footer { display: none; } -table { overflow-x: auto !important; overflow-y: auto !important; } -td { border-bottom: none !important; padding: 0.1em 0.5em !important; } -tr { border-bottom: none !important; padding: 0.1em 0.5em !important; } -textarea { overflow-y: auto !important; } -span { font-size: var(--text-md) !important; } -button { font-size: var(--text-lg) !important; } - -/* gradio elements */ -.block .padded:not(.gradio-accordion) { padding: 0 !important; margin-right: 0; min-width: 90px !important; } -.compact { gap: 1em 0.2em; background: transparent !important; padding: 0 !important; } -.flex-break { flex-basis: 100% !important; } -.form { border-width: 0; box-shadow: none; background: transparent; overflow: visible; gap: 0.5em 1em; flex-grow: 1 !important; } -.form-compact { margin-bottom: 0 !important; gap: 0.2em 1em !important; } -.gap .compact{ padding: 0; gap: 0.2em 0; } -.hidden { display: none; } -.tabitem { padding: 0 !important; } - -.gradio-dropdown, .block.gradio-slider, .block.gradio-checkbox, .block.gradio-textbox, .block.gradio-radio, .block.gradio-checkboxgroup, .block.gradio-number, .block.gradio-colorpicker { border-width: 0 !important; box-shadow: none !important;} -.gradio-accordion { padding-top: var(--spacing-md) !important; padding-right: 0 !important; padding-bottom: 0 !important; color: var(--body-text-color); } -.gradio-accordion .label-wrap .icon { color: var(--button-primary-border-color); } -.gradio-button { border-radius: var(--radius-lg) !important; } -.gradio-button.secondary-down { background: var(--button-secondary-background-fill); color: var(--button-secondary-text-color); } -.gradio-button.secondary-down, .gradio-button.secondary-down:hover { box-shadow: 1px 1px 1px rgba(0,0,0,0.25) inset, 0px 0px 3px rgba(0,0,0,0.15) inset; } -.gradio-button.secondary-down:hover { background: var(--button-secondary-background-fill-hover); color: var(--button-secondary-text-color-hover); } -.gradio-button.tool { max-width: min-content; min-width: min-content !important; align-self: end; font-size: 20px !important; color: var(--body-text-color) !important; margin-top: auto; margin-bottom: var(--spacing-md); align-self: center; } -.gradio-checkbox { margin: 0.75em 1.5em 0 0; align-self: center; } -.gradio-column { min-width: min(160px, 100%) !important; } -.gradio-container { max-width: unset !important; padding: var(--block-label-padding) !important; } -.gradio-container .prose a, .gradio-container .prose a:visited{ color: unset; text-decoration: none; } -.gradio-dropdown { margin-right: var(--spacing-sm) !important; min-width:160px; max-width:fit-content } -.gradio-dropdown ul.options { z-index: 1000; min-width: fit-content; max-height: 33vh !important; white-space: nowrap; } -.gradio-dropdown ul.options li.item { padding: var(--spacing-xs); } -.gradio-dropdown ul.options li.item:not(:has(.hide)) { background-color: var(--primary-500); } -.gradio-dropdown .token { padding: var(--spacing-xs); } -.gradio-dropdown span { margin-bottom: 0 !important; font-size: var(--text-sm); } -.gradio-dropdown .reference { margin-bottom: var(--spacing-sm) !important; } -.gradio-html { color: var(--body-text-color); } -.gradio-html .min { min-height: 0; } -.gradio-html div.wrap { height: 100%; } -.gradio-number { min-width: unset !important; max-width: 5em !important; } -.gradio-textbox { overflow: visible !important; } -.gradio-radio { padding: 0 !important; width: max-content !important; } -.gradio-slider { margin-right: var(--spacing-sm) !important; width: max-content !important } -.gradio-slider input[type="number"] { width: 6em; font-size: var(--text-xs); height: 16px; text-align: right; } - -/* custom gradio elements */ -.accordion-compact { padding: 8px 0px 4px 0px !important; } -.settings-accordion>div { flex-flow: wrap; } -.small-accordion .form { min-width: var(--left-column) !important; max-width: max-content; } -.small-accordion .label-wrap .icon { margin-right: 1.6em; margin-left: 0.6em; color: var(--button-primary-border-color); } -.small-accordion .label-wrap { padding: 16px 0px 8px 0px; margin: 0; border-top: 2px solid var(--button-secondary-border-color); } -.small-accordion { width: fit-content !important; min-width: fit-content !important; padding-left: 0 !important; } -.extension-script { max-width: 48vw; } -button.custom-button{ border-radius: var(--button-large-radius); padding: var(--button-large-padding); font-weight: var(--button-large-text-weight); border: var(--button-border-width) solid var(--button-secondary-border-color); - background: var(--button-secondary-background-fill); color: var(--button-secondary-text-color); font-size: var(--text-lg); - display: inline-flex; justify-content: center; align-items: center; transition: var(--button-transition); box-shadow: var(--button-shadow); text-align: center; } - -/* themes */ -.theme-preview { display: none; position: fixed; border: var(--spacing-sm) solid var(--neutral-600); box-shadow: 2px 2px 2px 2px var(--neutral-700); top: 0; bottom: 0; left: 0; right: 0; margin: auto; max-width: 75vw; z-index: 999; } - -/* txt2img/img2img specific */ -.block.token-counter{ position: absolute; display: inline-block; right: 1em; min-width: 0 !important; width: auto; z-index: 100; top: -0.5em; } -.block.token-counter span{ background: var(--input-background-fill) !important; box-shadow: 0 0 0.0 0.3em rgba(192,192,192,0.15), inset 0 0 0.6em rgba(192,192,192,0.075); border: 2px solid rgba(192,192,192,0.4) !important; } -.block.token-counter.error span{ box-shadow: 0 0 0.0 0.3em rgba(255,0,0,0.15), inset 0 0 0.6em rgba(255,0,0,0.075); border: 2px solid rgba(255,0,0,0.4) !important; } -.block.token-counter div{ display: inline; } -.block.token-counter span{ padding: 0.1em 0.75em; } -.performance { font-size: var(--text-xs); color: #444; } -.performance p { display: inline-block; color: var(--body-text-color-subdued) !important } -.performance .time { margin-right: 0; } -.thumbnails { background: var(--body-background-fill); } -#control_gallery { height: 564px; } -#control-result { padding: 0.5em; } -#control-inputs { margin-top: 1em; } -#txt2img_prompt_container, #img2img_prompt_container, #control_prompt_container { margin-right: var(--layout-gap) } -#txt2img_footer, #img2img_footer, #control_footer { height: fit-content; display: none; } -#txt2img_generate_box, #img2img_generate_box, #control_general_box { gap: 0.5em; flex-wrap: wrap-reverse; height: fit-content; } -#txt2img_actions_column, #img2img_actions_column, #control_actions_column { gap: 0.3em; height: fit-content; } -#txt2img_generate_box>button, #img2img_generate_box>button, #control_generate_box>button, #txt2img_enqueue, #img2img_enqueue { min-height: 42px; max-height: 42px; line-height: 1em; } -#txt2img_generate_line2, #img2img_generate_line2, #txt2img_tools, #img2img_tools, #control_generate_line2, #control_tools { display: flex; } -#txt2img_generate_line2>button, #img2img_generate_line2>button, #extras_generate_box>button, #control_generate_line2>button, #txt2img_tools>button, #img2img_tools>button, #control_tools>button { height: 2em; line-height: 0; font-size: var(--text-md); - min-width: unset; display: block !important; } -#txt2img_prompt, #txt2img_neg_prompt, #img2img_prompt, #img2img_neg_prompt, #control_prompt, #control_neg_prompt { display: contents; } -#txt2img_generate_box, #img2img_generate_box { min-width: unset; width: 48%; } -#control_generate_box { min-width: unset; width: 100%; } -#txt2img_actions_column, #img2img_actions_column, #control_actions { flex-flow: wrap; justify-content: space-between; } -#txt2img_enqueue_wrapper, #img2img_enqueue_wrapper, #control_enqueue_wrapper { min-width: unset !important; width: 48%; } -.interrogate-clip { position: absolute; right: 3em; top: -2.7em; max-width: fit-content; } -.interrogate-blip { position: absolute; right: 1em; top: -2.7em; max-width: fit-content; } -.interrogate-col{ min-width: 0 !important; max-width: fit-content; margin-right: var(--spacing-xxl); } -.interrogate-col>button{ flex: 1; width: 7em; max-height: 84px; } -#sampler_selection_img2img { margin-top: 1em; } -#txtimg_hr_finalres{ min-height: 0 !important; } -#img2img_scale_resolution_preview.block{ display: flex; align-items: end; } -#txtimg_hr_finalres .resolution, #img2img_scale_resolution_preview .resolution{ font-weight: bold; } -div#extras_scale_to_tab div.form{ flex-direction: row; } -#img2img_unused_scale_by_slider { visibility: hidden; width: 0.5em; max-width: 0.5em; min-width: 0.5em; } -.inactive{ opacity: 0.5; } -div#extras_scale_to_tab div.form{ flex-direction: row; } -#mode_img2img .gradio-image>div.fixed-height, #mode_img2img .gradio-image>div.fixed-height img{ height: 480px !important; max-height: 480px !important; min-height: 480px !important; } -#img2img_sketch, #img2maskimg, #inpaint_sketch { overflow: overlay !important; resize: auto; background: var(--panel-background-fill); z-index: 5; } -.image-buttons button{ min-width: auto; } -.infotext { overflow-wrap: break-word; line-height: 1.5em; } -.infotext>p { padding-left: 1em; text-indent: -1em; white-space: pre-wrap; } -.tooltip { display: block; position: fixed; top: 1em; right: 1em; padding: 0.5em; background: var(--input-background-fill); color: var(--body-text-color); border: 1pt solid var(--button-primary-border-color); - width: 22em; min-height: 1.3em; font-size: var(--text-xs); transition: opacity 0.2s ease-in; pointer-events: none; opacity: 0; z-index: 999; } -.tooltip-show { opacity: 0.9; } -.toolbutton-selected { background: var(--background-fill-primary) !important; } - -/* settings */ -#si-sparkline-memo, #si-sparkline-load { background-color: #111; } -#quicksettings { width: fit-content; } -#quicksettings>button { padding: 0 1em 0 0; align-self: end; margin-bottom: var(--text-sm); } -#settings { display: flex; gap: var(--layout-gap); } -#settings div { border: none; gap: 0; margin: 0 0 var(--layout-gap) 0px; padding: 0; } -#settings>div.tab-content { flex: 10 0 75%; display: grid; } -#settings>div.tab-content>div { border: none; padding: 0; } -#settings>div.tab-content>div>div>div>div>div { flex-direction: unset; } -#settings>div.tab-nav { display: grid; grid-template-columns: repeat(auto-fill, .5em minmax(10em, 1fr)); flex: 1 0 auto; width: 12em; align-self: flex-start; gap: var(--spacing-xxl); } -#settings>div.tab-nav button { display: block; border: none; text-align: left; white-space: initial; padding: 0; } -#settings>div.tab-nav>#settings_show_all_pages { padding: var(--size-2) var(--size-4); } -#settings .block.gradio-checkbox { margin: 0; width: auto; } -#settings .dirtyable { gap: .5em; } -#settings .dirtyable.hidden { display: none; } -#settings .modification-indicator { height: 1.2em; border-radius: 1em !important; padding: 0; width: 0; margin-right: 0.5em; } -#settings .modification-indicator:disabled { visibility: hidden; } -#settings .modification-indicator.saved { background: var(--color-accent-soft); width: var(--spacing-sm); } -#settings .modification-indicator.changed { background: var(--color-accent); width: var(--spacing-sm); } -#settings .modification-indicator.changed.unsaved { background-image: linear-gradient(var(--color-accent) 25%, var(--color-accent-soft) 75%); width: var(--spacing-sm); } -#settings_result { margin: 0 1.2em; } -.licenses { display: block !important; } - -/* live preview */ -.progressDiv{ position: relative; height: 20px; background: #b4c0cc; margin-bottom: -3px; } -.dark .progressDiv{ background: #424c5b; } -.progressDiv .progress{ width: 0%; height: 20px; background: #0060df; color: white; font-weight: bold; line-height: 20px; padding: 0 8px 0 0; text-align: right; overflow: visible; white-space: nowrap; padding: 0 0.5em; } -.livePreview { position: absolute; z-index: 50; background-color: transparent; width: -moz-available; width: -webkit-fill-available; } -.livePreview img { position: absolute; object-fit: contain; width: 100%; height: 100%; } -.dark .livePreview { background-color: rgb(17 24 39 / var(--tw-bg-opacity)); } -.popup-metadata { color: white; background: #0000; display: inline-block; white-space: pre-wrap; font-size: var(--text-xxs); } -.global-popup{ display: flex; position: fixed; z-index: 10001; left: 0; top: 0; width: 100%; height: 100%; overflow: auto; background-color: rgba(20, 20, 20, 0.95);} -.global-popup-close:before { content: "×"; } -.global-popup-close{ position: fixed; right: 0.5em; top: 0; cursor: pointer; color: white; font-size: 32pt; } -.global-popup-inner{ display: inline-block; margin: auto; padding: 2em; } - -/* fullpage image viewer */ -#lightboxModal{ display: none; position: fixed; z-index: 1001; left: 0; top: 0; width: 100%; height: 100%; overflow: auto; background-color: rgba(20, 20, 20, 0.75); backdrop-filter: blur(6px); - user-select: none; -webkit-user-select: none; flex-direction: row; } -.modalControls { display: flex; justify-content: space-evenly; background-color: transparent; position: absolute; width: 99%; z-index: 1; } -.modalControls:hover { background-color: #50505050; } -.modalControls span { color: white; font-size: 2em; font-weight: bold; cursor: pointer; filter: grayscale(100%); } -.modalControls span:hover, .modalControls span:focus { color: var(--highlight-color); filter: none; } -.lightboxModalPreviewZone { display: flex; width: 100%; height: 100%; } -.lightboxModalPreviewZone:focus-visible { outline: none; } -.lightboxModalPreviewZone>img { display: block; margin: auto; width: auto; } -.lightboxModalPreviewZone>img.modalImageFullscreen{ object-fit: contain; height: 100%; width: 100%; min-height: 0; background: transparent; } -table.settings-value-table { background: white; border-collapse: collapse; margin: 1em; border: var(--spacing-sm) solid white; } -table.settings-value-table td { padding: 0.4em; border: 1px solid #ccc; max-width: 36em; } -.modalPrev, .modalNext { cursor: pointer; position: relative; z-index: 1; top: 0; width: auto; height: 100vh; line-height: 100vh; text-align: center; padding: 16px; - margin-top: -50px; color: white; font-weight: bold; font-size: 20px; transition: 0.6s ease; user-select: none; -webkit-user-select: none; } -.modalNext { right: 0; } -.modalPrev:hover, .modalNext:hover { background-color: rgba(0, 0, 0, 0.8); } -#imageARPreview { position: absolute; top: 0px; left: 0px; border: 2px solid red; background: rgba(255, 0, 0, 0.3); z-index: 900; pointer-events: none; display: none; } - -/* context menu (ie for the generate button) */ -#context-menu { z-index: 9999; position: absolute; display: block; padding: var(--spacing-md); border: 2px solid var(--highlight-color); background: var(--background-fill-primary); color: var(--body-text-color); } -.context-menu-items { list-style: none; margin: 0; padding: 0; font-size: var(--text-sm); } -.context-menu-items a { display: block; padding: var(--spacing-md); cursor: pointer; font-weight: normal; } -.context-menu-items a:hover { background: var(--highlight-color) } - -/* extensions */ -#tab_extensions table, #tab_config table{ border-collapse: collapse; } -#tab_extensions table td, #tab_extensions table th, #tab_config table td, #tab_config table th { border: 1px solid #ccc; padding: 0.25em 0.5em; } -#tab_extensions table tr:hover, #tab_config table tr:hover { background-color: var(--neutral-500) !important; } -#tab_extensions table input[type="checkbox"] { margin-right: 0.5em; appearance: checkbox; } -#tab_extensions button{ max-width: 16em; } -#tab_extensions input[disabled="disabled"]{ opacity: 0.5; } -.extension-tag{ font-weight: bold; font-size: var(--text-sm); } -.extension-button { font-size: var(--text-sm) !important; width: 6em; } -#extensions .name{ font-size: var(--text-lg) } -#extensions .type{ opacity: 0.5; font-size: var(--text-sm); text-align: center; } -#extensions .version{ opacity: 0.7; } -#extensions .info{ margin: 0; } -#extensions .date{ opacity: 0.85; font-size: var(--text-sm); } - -/* extra networks */ -.extra-networks>div { margin: 0; border-bottom: none !important; gap: 0.3em 0; } -.extra-networks .second-line { display: flex; width: -moz-available; width: -webkit-fill-available; gap: 0.3em; box-shadow: var(--input-shadow); } -.extra-networks .search { flex: 1; } -.extra-networks .description { flex: 3; } -.extra-networks .tab-nav>button { margin-right: 0; height: 24px; padding: 2px 4px 2px 4px; } -.extra-networks .buttons { position: absolute; right: 0; margin: -4px; background: var(--background-color); } -.extra-networks .buttons>button { margin-left: -0.2em; height: 1.4em; color: var(--primary-300) !important; font-size: 20px !important; } -.extra-networks .custom-button { width: 120px; width: 100%; background: none; justify-content: left; text-align: left; padding: 3px 3px 3px 12px; text-indent: -6px; box-shadow: none; line-break: auto; } -.extra-networks .custom-button:hover { background: var(--button-primary-background-fill) } -.extra-networks-tab { padding: 0 !important; } -.extra-network-subdirs { background: var(--input-background-fill); overflow-x: hidden; overflow-y: auto; min-width: max(15%, 120px); padding-top: 0.5em; margin-top: -4px !important; } -.extra-networks-page { display: flex } -.extra-network-cards { display: flex; flex-wrap: wrap; overflow-y: auto; overflow-x: hidden; align-content: flex-start; width: -moz-available; width: -webkit-fill-available; } -.extra-network-cards .card { height: fit-content; margin: 0 0 0.5em 0.5em; position: relative; scroll-snap-align: start; scroll-margin-top: 0; } -.extra-network-cards .card .overlay { position: absolute; bottom: 0; padding: 0.2em; z-index: 10; width: 100%; background: none; } -.extra-network-cards .card .overlay .name { font-size: var(--text-lg); font-weight: bold; text-shadow: 1px 1px black; color: white; overflow-wrap: break-word; } -.extra-network-cards .card .preview { box-shadow: var(--button-shadow); min-height: 30px; } -.extra-network-cards .card:hover .overlay { background: rgba(0, 0, 0, 0.40); } -.extra-network-cards .card:hover .preview { box-shadow: none; filter: grayscale(100%); } -.extra-network-cards .card:hover .overlay { background: rgba(0, 0, 0, 0.40); } -.extra-network-cards .card .overlay .tags { display: none; overflow-wrap: break-word; } -.extra-network-cards .card .overlay .tag { padding: 2px; margin: 2px; background: rgba(70, 70, 70, 0.60); font-size: var(--text-md); cursor: pointer; display: inline-block; } -.extra-network-cards .card .actions>span { padding: 4px; font-size: 34px !important; } -.extra-network-cards .card .actions>span:hover { color: var(--highlight-color); } -.extra-network-cards .card:hover .actions { display: block; } -.extra-network-cards .card:hover .overlay .tags { display: block; } -.extra-network-cards .card .actions { font-size: 3em; display: none; text-align-last: right; cursor: pointer; font-variant: unicase; position: absolute; z-index: 80; right: 0; height: 0.7em; width: 100%; background: rgba(0, 0, 0, 0.40); } -.extra-network-cards .card-list { display: flex; margin: 0.3em; padding: 0.3em; background: var(--input-background-fill); cursor: pointer; border-radius: var(--button-large-radius); } -.extra-network-cards .card-list .tag { color: var(--primary-500); margin-left: 0.8em; } -.extra-details-close { position: fixed; top: 0.2em; right: 0.2em; z-index: 99; background: var(--button-secondary-background-fill) !important; } -#txt2img_description, #img2img_description, #control_description { max-height: 63px; overflow-y: auto !important; } -#txt2img_description>label>textarea, #img2img_description>label>textarea, #control_description>label>textarea { font-size: var(--text-sm) } - -#txt2img_extra_details>div, #img2img_extra_details>div { overflow-y: auto; min-height: 40vh; max-height: 80vh; align-self: flex-start; } -#txt2img_extra_details, #img2img_extra_details { position: fixed; bottom: 50%; left: 50%; transform: translate(-50%, 50%); padding: 0.8em; border: var(--block-border-width) solid var(--highlight-color) !important; - z-index: 100; box-shadow: var(--button-shadow); } -#txt2img_extra_details td:first-child, #img2img_extra_details td:first-child { font-weight: bold; vertical-align: top; } -#txt2img_extra_details .gradio-image, #img2img_extra_details .gradio-image { max-height: 70vh; } - - -/* specific elements */ -#modelmerger_interp_description { margin-top: 1em; margin-bottom: 1em; } -#scripts_alwayson_txt2img, #scripts_alwayson_img2img { padding: 0 } -#scripts_alwayson_txt2img>.label-wrap, #scripts_alwayson_img2img>.label-wrap { background: var(--input-background-fill); padding: 0; margin: 0; border-radius: var(--radius-lg); } -#scripts_alwayson_txt2img>.label-wrap>span, #scripts_alwayson_img2img>.label-wrap>span { padding: var(--spacing-xxl); } -#scripts_alwayson_txt2img div { max-width: var(--left-column); } -#script_txt2img_agent_scheduler { display: none; } -#refresh_tac_refreshTempFiles { display: none; } -#train_tab { flex-flow: row-reverse; } -#models_tab { flex-flow: row-reverse; } -#swap_axes>button { min-width: 100px; font-size: var(--text-md); } -#ui_defaults_review { margin: 1em; } - -/* extras */ -.extras { gap: 0.2em 1em !important } -#extras_generate, #extras_interrupt, #extras_skip { display: block !important; position: relative; height: 36px; } -#extras_upscale { margin-top: 10px } -#pnginfo_html_info .gradio-html > div { margin: 0.5em; } - -/* log monitor */ -.log-monitor { display: none; justify-content: unset !important; overflow: hidden; padding: 0; margin-top: auto; font-family: monospace; font-size: var(--text-xs); } -.log-monitor td, .log-monitor th { padding-left: 1em; } - -/* changelog */ -.md h2 { background-color: var(--background-fill-primary); padding: 0.5em; } -.md ul { list-style-type: square !important; text-indent: 1em; margin-left: 4em; } -.md li { list-style-position: outside !important; text-indent: 0; } -.md p { margin-left: 2em; } - -/* custom component */ -.folder-selector textarea { height: 2em !important; padding: 6px !important; } -.nvml { position: fixed; bottom: 10px; right: 10px; background: var(--background-fill-primary); border: 1px solid var(--button-primary-border-color); padding: 6px; color: var(--button-primary-text-color); - font-size: var(--text-xxs); z-index: 50; font-family: monospace; display: none; } - -/* control */ -#control_input_type { max-width: 18em } -#control_settings .small-accordion .form { min-width: 350px !important } -.control-button { min-height: 42px; max-height: 42px; line-height: 1em; } -.control-tabs > .tab-nav { margin-bottom: 0; margin-top: 0; } -.control-unit { max-width: 1200px; padding: 0 !important; margin-top: -10px !important; } -.control-unit > .label-wrap { margin-bottom: 0 !important; } -.processor-settings { padding: 0 !important; max-width: 300px; } -.processor-group>div { flex-flow: wrap;gap: 1em; } - -/* main info */ -.main-info { font-weight: var(--section-header-text-weight); color: var(--body-text-color-subdued); padding: 1em !important; margin-top: 2em !important; line-height: var(--line-lg) !important; } - -/* loader */ -.splash { position: fixed; top: 0; left: 0; width: 100vw; height: 100vh; z-index: 1000; display: block; text-align: center; } -.motd { margin-top: 2em; color: var(--body-text-color-subdued); font-family: monospace; font-variant: all-petite-caps; } -.splash-img { margin: 10% auto 0 auto; width: 512px; background-repeat: no-repeat; height: 512px; animation: color 10s infinite alternate; } -.loading { color: white; position: absolute; top: 20%; left: 50%; transform: translateX(-50%); } -.loader { width: 300px; height: 300px; border: var(--spacing-md) solid transparent; border-radius: 50%; border-top: var(--spacing-md) solid var(--primary-600); animation: spin 4s linear infinite; position: relative; } -.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: var(--spacing-md) solid transparent; } -.loader::before { border-top-color: var(--primary-900); animation: 3s spin linear infinite; } -.loader::after { border-top-color: var(--primary-300); animation: spin 1.5s linear infinite; } -@keyframes move { from { background-position-x: 0, -40px; } to { background-position-x: 0, 40px; } } -@keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } -@keyframes color { from { filter: hue-rotate(0deg) } to { filter: hue-rotate(360deg) } } - -:root, .light, .dark { - --text-xxs: 9px; - --text-xs: 10px; - --text-sm: 12px; - --text-md: 14px; - --text-lg: 15px; - --text-xl: 16px; - --text-xxl: 17px; - --spacing-xxs: 1px; - --spacing-xs: 2px; - --spacing-sm: 3px; - --spacing-lg: 4px; - --spacing-xl: 5px; - --spacing-xxl: 6px; -} - -@media (hover: none) and (pointer: coarse) { /* Apply different styles for devices with coarse pointers dependant on screen resolution */ - @media (max-width: 1024px) { /* Do not affect displays larger than 1024px wide. */ - @media (max-width: 399px) { /* Screens smaller than 400px wide */ - :root, .light, .dark { --left-column: 100%; } - #txt2img_results, #img2img_results, #extras_results { min-width: calc(min(320px, 100%)) !important;} /* maintain single column for from image operations on larger mobile devices */ - #txt2img_footer p { text-wrap: wrap; } - } - @media (min-width: 400px) { /* Screens larger than 400px wide */ - :root, .light, .dark {--left-column: 50%;} - #txt2img_results, #extras_results, #txt2im g_footer p {text-wrap: wrap; max-width: 100% !important; } /* maintain side by side split on larger mobile displays for from text */ - } - #scripts_alwayson_txt2img div, #scripts_alwayson_img2img div { max-width: 100%; } - #txt2img_prompt_container, #img2img_prompt_container, #control_prompt_container { resize:vertical !important; } - #txt2img_generate_box, #txt2img_enqueue_wrapper { min-width: 100% !important;} /* make generate and enqueue buttons take up the entire width of their rows. */ - #img2img_toprow>div.gradio-column {flex-grow: 1 !important;} /*make interrogate buttons take up appropriate space. */ - #img2img_actions_column {display: flex; min-width: fit-content !important; flex-direction: row;justify-content: space-evenly; align-items: center;} - #txt2img_generate_box, #img2img_generate_box, #txt2img_enqueue_wrapper,#img2img_enqueue_wrapper {display: flex;flex-direction: column;height: 4em !important;align-items: stretch;justify-content: space-evenly;} - #img2img_interface, #img2img_results, #img2img_footer p {text-wrap: wrap; min-width: 100% !important; max-width: 100% !important;} /* maintain single column for from image operations on larger mobile devices */ - #img2img_sketch, #img2maskimg, #inpaint_sketch {display: flex; overflow: auto !important; resize: none !important; } /* fix inpaint image display being too large for mobile displays */ - #img2maskimg canvas { width: auto !important; max-height: 100% !important; height: auto !important; } - #txt2img_sampler, #txt2img_batch, #txt2img_seed_group, #txt2img_advanced, #txt2img_second_pass, #img2img_sampling_group, #img2img_resize_group, #img2img_batch_group, #img2img_seed_group, #img2img_denoise_group, #img2img_advanced_group { width: 100% !important; } /* fix from text/image UI elements to prevent them from moving around within the UI */ - #img2img_resize_group .gradio-radio>div { display: flex; flex-direction: column; width: unset !important; } - #inpaint_controls div {display:flex;flex-direction: row;} - #inpaint_controls .gradio-radio>div { display: flex; flex-direction: column !important; } - #models_tab { flex-direction: column-reverse !important; } /* move image preview/output on models page to bottom of page */ - #enqueue_keyboard_shortcut_modifiers, #enqueue_keyboard_shortcut_key div { max-width: 40% !important;} /* fix settings for agent scheduler */ - #settings { display: flex; flex-direction: row; flex-wrap: wrap; max-width: 100% !important; } /* adjust width of certain settings item to allow aligning as row, but not have it go off the screen */ - #settings div.tab-content>div>div>div { max-width: 80% !important;} - #settings div .gradio-radio { width: unset !important; } - #tab_extensions table { border-collapse: collapse; display: block; overflow-x:auto !important;} /* enable scrolling on extensions tab */ - ::-webkit-scrollbar { width: 25px !important; height:25px; } /* increase scrollbar size to make it finger friendly */ - .gradio-dropdown ul.options {max-height: 41vh !important; } /* adjust dropdown size to make them easier to select individual items on mobile. */ - .gradio-dropdown ul.options li.item {height: 40px !important; display: flex; align-items: center;} - .gradio-slider input[type="number"] { width: 4em; font-size: var(--text-xs); height: 16px; text-align: center; } /* adjust slider input fields as they were too large for mobile devices. */ - #txt2img_settings .block .padded:not(.gradio-accordion) {padding: 0 !important;margin-right: 0; min-width: 100% !important; width:100% !important;} - } -} +@font-face { font-family: 'NotoSans'; font-display: swap; font-style: normal; font-weight: 100; src: local('NotoSans'), url('notosans-nerdfont-regular.ttf') } +:root { --left-column: 500px; } +a { font-weight: bold; cursor: pointer; } +h2 { margin-top: 1em !important; font-size: var(--text-xxl) !important; } +footer { display: none; } +table { overflow-x: auto !important; overflow-y: auto !important; } +td { border-bottom: none !important; padding: 0.1em 0.5em !important; } +tr { border-bottom: none !important; padding: 0.1em 0.5em !important; } +textarea { overflow-y: auto !important; } +span { font-size: var(--text-md) !important; } +button { font-size: var(--text-lg) !important; } + +/* gradio elements */ +.block .padded:not(.gradio-accordion) { padding: 0 !important; margin-right: 0; min-width: 90px !important; } +.compact { gap: 1em 0.2em; background: transparent !important; padding: 0 !important; } +.flex-break { flex-basis: 100% !important; } +.form { border-width: 0; box-shadow: none; background: transparent; overflow: visible; gap: 0.5em 1em; flex-grow: 1 !important; } +.form-compact { margin-bottom: 0 !important; gap: 0.2em 1em !important; } +.gap .compact{ padding: 0; gap: 0.2em 0; } +.hidden { display: none; } +.tabitem { padding: 0 !important; } + +.gradio-dropdown, .block.gradio-slider, .block.gradio-checkbox, .block.gradio-textbox, .block.gradio-radio, .block.gradio-checkboxgroup, .block.gradio-number, .block.gradio-colorpicker { border-width: 0 !important; box-shadow: none !important;} +.gradio-accordion { padding-top: var(--spacing-md) !important; padding-right: 0 !important; padding-bottom: 0 !important; color: var(--body-text-color); } +.gradio-accordion .label-wrap .icon { color: var(--button-primary-border-color); } +.gradio-button { border-radius: var(--radius-lg) !important; } +.gradio-button.secondary-down { background: var(--button-secondary-background-fill); color: var(--button-secondary-text-color); } +.gradio-button.secondary-down, .gradio-button.secondary-down:hover { box-shadow: 1px 1px 1px rgba(0,0,0,0.25) inset, 0px 0px 3px rgba(0,0,0,0.15) inset; } +.gradio-button.secondary-down:hover { background: var(--button-secondary-background-fill-hover); color: var(--button-secondary-text-color-hover); } +.gradio-button.tool { max-width: min-content; min-width: min-content !important; align-self: end; font-size: 20px !important; color: var(--body-text-color) !important; margin-top: auto; margin-bottom: var(--spacing-md); align-self: center; } +.gradio-checkbox { margin: 0.75em 1.5em 0 0; align-self: center; } +.gradio-column { min-width: min(160px, 100%) !important; } +.gradio-container { max-width: unset !important; padding: var(--block-label-padding) !important; } +.gradio-container .prose a, .gradio-container .prose a:visited{ color: unset; text-decoration: none; } +.gradio-dropdown { margin-right: var(--spacing-sm) !important; min-width:160px; max-width:fit-content } +.gradio-dropdown ul.options { z-index: 1000; min-width: fit-content; max-height: 33vh !important; white-space: nowrap; } +.gradio-dropdown ul.options li.item { padding: var(--spacing-xs); } +.gradio-dropdown ul.options li.item:not(:has(.hide)) { background-color: var(--primary-500); } +.gradio-dropdown .token { padding: var(--spacing-xs); } +.gradio-dropdown span { margin-bottom: 0 !important; font-size: var(--text-sm); } +.gradio-dropdown .reference { margin-bottom: var(--spacing-sm) !important; } +.gradio-html { color: var(--body-text-color); } +.gradio-html .min { min-height: 0; } +.gradio-html div.wrap { height: 100%; } +.gradio-number { min-width: unset !important; max-width: 5em !important; } +.gradio-textbox { overflow: visible !important; } +.gradio-radio { padding: 0 !important; width: max-content !important; } +.gradio-slider { margin-right: var(--spacing-sm) !important; width: max-content !important } +.gradio-slider input[type="number"] { width: 6em; font-size: var(--text-xs); height: 16px; text-align: right; } + +/* custom gradio elements */ +.accordion-compact { padding: 8px 0px 4px 0px !important; } +.settings-accordion>div { flex-flow: wrap; } +.small-accordion .form { min-width: var(--left-column) !important; max-width: max-content; } +.small-accordion .label-wrap .icon { margin-right: 1.6em; margin-left: 0.6em; color: var(--button-primary-border-color); } +.small-accordion .label-wrap { padding: 16px 0px 8px 0px; margin: 0; border-top: 2px solid var(--button-secondary-border-color); } +.small-accordion { width: fit-content !important; min-width: fit-content !important; padding-left: 0 !important; } +.extension-script { max-width: 48vw; } +button.custom-button{ border-radius: var(--button-large-radius); padding: var(--button-large-padding); font-weight: var(--button-large-text-weight); border: var(--button-border-width) solid var(--button-secondary-border-color); + background: var(--button-secondary-background-fill); color: var(--button-secondary-text-color); font-size: var(--text-lg); + display: inline-flex; justify-content: center; align-items: center; transition: var(--button-transition); box-shadow: var(--button-shadow); text-align: center; } + +/* themes */ +.theme-preview { display: none; position: fixed; border: var(--spacing-sm) solid var(--neutral-600); box-shadow: 2px 2px 2px 2px var(--neutral-700); top: 0; bottom: 0; left: 0; right: 0; margin: auto; max-width: 75vw; z-index: 999; } + +/* txt2img/img2img specific */ +.block.token-counter{ position: absolute; display: inline-block; right: 1em; min-width: 0 !important; width: auto; z-index: 100; top: -0.5em; } +.block.token-counter span{ background: var(--input-background-fill) !important; box-shadow: 0 0 0.0 0.3em rgba(192,192,192,0.15), inset 0 0 0.6em rgba(192,192,192,0.075); border: 2px solid rgba(192,192,192,0.4) !important; } +.block.token-counter.error span{ box-shadow: 0 0 0.0 0.3em rgba(255,0,0,0.15), inset 0 0 0.6em rgba(255,0,0,0.075); border: 2px solid rgba(255,0,0,0.4) !important; } +.block.token-counter div{ display: inline; } +.block.token-counter span{ padding: 0.1em 0.75em; } +.performance { font-size: var(--text-xs); color: #444; } +.performance p { display: inline-block; color: var(--body-text-color-subdued) !important } +.performance .time { margin-right: 0; } +.thumbnails { background: var(--body-background-fill); } +#control_gallery { height: 564px; } +#control-result { padding: 0.5em; } +#control-inputs { margin-top: 1em; } +#txt2img_prompt_container, #img2img_prompt_container, #control_prompt_container { margin-right: var(--layout-gap) } +#txt2img_footer, #img2img_footer, #control_footer { height: fit-content; display: none; } +#txt2img_generate_box, #img2img_generate_box, #control_general_box { gap: 0.5em; flex-wrap: wrap-reverse; height: fit-content; } +#txt2img_actions_column, #img2img_actions_column, #control_actions_column { gap: 0.3em; height: fit-content; } +#txt2img_generate_box>button, #img2img_generate_box>button, #control_generate_box>button, #txt2img_enqueue, #img2img_enqueue { min-height: 42px; max-height: 42px; line-height: 1em; } +#txt2img_generate_line2, #img2img_generate_line2, #txt2img_tools, #img2img_tools, #control_generate_line2, #control_tools { display: flex; } +#txt2img_generate_line2>button, #img2img_generate_line2>button, #extras_generate_box>button, #control_generate_line2>button, #txt2img_tools>button, #img2img_tools>button, #control_tools>button { height: 2em; line-height: 0; font-size: var(--text-md); + min-width: unset; display: block !important; } +#txt2img_prompt, #txt2img_neg_prompt, #img2img_prompt, #img2img_neg_prompt, #control_prompt, #control_neg_prompt { display: contents; } +#txt2img_generate_box, #img2img_generate_box { min-width: unset; width: 48%; } +#control_generate_box { min-width: unset; width: 100%; } +#txt2img_actions_column, #img2img_actions_column, #control_actions { flex-flow: wrap; justify-content: space-between; } +#txt2img_enqueue_wrapper, #img2img_enqueue_wrapper, #control_enqueue_wrapper { min-width: unset !important; width: 48%; } +.interrogate-clip { position: absolute; right: 3em; top: -2.7em; max-width: fit-content; } +.interrogate-blip { position: absolute; right: 1em; top: -2.7em; max-width: fit-content; } +.interrogate-col{ min-width: 0 !important; max-width: fit-content; margin-right: var(--spacing-xxl); } +.interrogate-col>button{ flex: 1; width: 7em; max-height: 84px; } +#sampler_selection_img2img { margin-top: 1em; } +#txtimg_hr_finalres{ min-height: 0 !important; } +#img2img_scale_resolution_preview.block{ display: flex; align-items: end; } +#txtimg_hr_finalres .resolution, #img2img_scale_resolution_preview .resolution{ font-weight: bold; } +div#extras_scale_to_tab div.form{ flex-direction: row; } +#img2img_unused_scale_by_slider { visibility: hidden; width: 0.5em; max-width: 0.5em; min-width: 0.5em; } +.inactive{ opacity: 0.5; } +div#extras_scale_to_tab div.form{ flex-direction: row; } +#mode_img2img .gradio-image>div.fixed-height, #mode_img2img .gradio-image>div.fixed-height img{ height: 480px !important; max-height: 480px !important; min-height: 480px !important; } +#img2img_sketch, #img2maskimg, #inpaint_sketch { overflow: overlay !important; resize: auto; background: var(--panel-background-fill); z-index: 5; } +.image-buttons button{ min-width: auto; } +.infotext { overflow-wrap: break-word; line-height: 1.5em; } +.infotext>p { padding-left: 1em; text-indent: -1em; white-space: pre-wrap; } +.tooltip { display: block; position: fixed; top: 1em; right: 1em; padding: 0.5em; background: var(--input-background-fill); color: var(--body-text-color); border: 1pt solid var(--button-primary-border-color); + width: 22em; min-height: 1.3em; font-size: var(--text-xs); transition: opacity 0.2s ease-in; pointer-events: none; opacity: 0; z-index: 999; } +.tooltip-show { opacity: 0.9; } +.toolbutton-selected { background: var(--background-fill-primary) !important; } + +/* settings */ +#si-sparkline-memo, #si-sparkline-load { background-color: #111; } +#quicksettings { width: fit-content; } +#quicksettings>button { padding: 0 1em 0 0; align-self: end; margin-bottom: var(--text-sm); } +#settings { display: flex; gap: var(--layout-gap); } +#settings div { border: none; gap: 0; margin: 0 0 var(--layout-gap) 0px; padding: 0; } +#settings>div.tab-content { flex: 10 0 75%; display: grid; } +#settings>div.tab-content>div { border: none; padding: 0; } +#settings>div.tab-content>div>div>div>div>div { flex-direction: unset; } +#settings>div.tab-nav { display: grid; grid-template-columns: repeat(auto-fill, .5em minmax(10em, 1fr)); flex: 1 0 auto; width: 12em; align-self: flex-start; gap: var(--spacing-xxl); } +#settings>div.tab-nav button { display: block; border: none; text-align: left; white-space: initial; padding: 0; } +#settings>div.tab-nav>#settings_show_all_pages { padding: var(--size-2) var(--size-4); } +#settings .block.gradio-checkbox { margin: 0; width: auto; } +#settings .dirtyable { gap: .5em; } +#settings .dirtyable.hidden { display: none; } +#settings .modification-indicator { height: 1.2em; border-radius: 1em !important; padding: 0; width: 0; margin-right: 0.5em; } +#settings .modification-indicator:disabled { visibility: hidden; } +#settings .modification-indicator.saved { background: var(--color-accent-soft); width: var(--spacing-sm); } +#settings .modification-indicator.changed { background: var(--color-accent); width: var(--spacing-sm); } +#settings .modification-indicator.changed.unsaved { background-image: linear-gradient(var(--color-accent) 25%, var(--color-accent-soft) 75%); width: var(--spacing-sm); } +#settings_result { margin: 0 1.2em; } +.licenses { display: block !important; } + +/* live preview */ +.progressDiv{ position: relative; height: 20px; background: #b4c0cc; margin-bottom: -3px; } +.dark .progressDiv{ background: #424c5b; } +.progressDiv .progress{ width: 0%; height: 20px; background: #0060df; color: white; font-weight: bold; line-height: 20px; padding: 0 8px 0 0; text-align: right; overflow: visible; white-space: nowrap; padding: 0 0.5em; } +.livePreview { position: absolute; z-index: 50; background-color: transparent; width: -moz-available; width: -webkit-fill-available; } +.livePreview img { position: absolute; object-fit: contain; width: 100%; height: 100%; } +.dark .livePreview { background-color: rgb(17 24 39 / var(--tw-bg-opacity)); } +.popup-metadata { color: white; background: #0000; display: inline-block; white-space: pre-wrap; font-size: var(--text-xxs); } +.global-popup{ display: flex; position: fixed; z-index: 10001; left: 0; top: 0; width: 100%; height: 100%; overflow: auto; background-color: rgba(20, 20, 20, 0.95);} +.global-popup-close:before { content: "×"; } +.global-popup-close{ position: fixed; right: 0.5em; top: 0; cursor: pointer; color: white; font-size: 32pt; } +.global-popup-inner{ display: inline-block; margin: auto; padding: 2em; } + +/* fullpage image viewer */ +#lightboxModal{ display: none; position: fixed; z-index: 1001; left: 0; top: 0; width: 100%; height: 100%; overflow: auto; background-color: rgba(20, 20, 20, 0.75); backdrop-filter: blur(6px); + user-select: none; -webkit-user-select: none; flex-direction: row; } +.modalControls { display: flex; justify-content: space-evenly; background-color: transparent; position: absolute; width: 99%; z-index: 1; } +.modalControls:hover { background-color: #50505050; } +.modalControls span { color: white; font-size: 2em; font-weight: bold; cursor: pointer; filter: grayscale(100%); } +.modalControls span:hover, .modalControls span:focus { color: var(--highlight-color); filter: none; } +.lightboxModalPreviewZone { display: flex; width: 100%; height: 100%; } +.lightboxModalPreviewZone:focus-visible { outline: none; } +.lightboxModalPreviewZone>img { display: block; margin: auto; width: auto; } +.lightboxModalPreviewZone>img.modalImageFullscreen{ object-fit: contain; height: 100%; width: 100%; min-height: 0; background: transparent; } +table.settings-value-table { background: white; border-collapse: collapse; margin: 1em; border: var(--spacing-sm) solid white; } +table.settings-value-table td { padding: 0.4em; border: 1px solid #ccc; max-width: 36em; } +.modalPrev, .modalNext { cursor: pointer; position: relative; z-index: 1; top: 0; width: auto; height: 100vh; line-height: 100vh; text-align: center; padding: 16px; + margin-top: -50px; color: white; font-weight: bold; font-size: 20px; transition: 0.6s ease; user-select: none; -webkit-user-select: none; } +.modalNext { right: 0; } +.modalPrev:hover, .modalNext:hover { background-color: rgba(0, 0, 0, 0.8); } +#imageARPreview { position: absolute; top: 0px; left: 0px; border: 2px solid red; background: rgba(255, 0, 0, 0.3); z-index: 900; pointer-events: none; display: none; } + +/* context menu (ie for the generate button) */ +#context-menu { z-index: 9999; position: absolute; display: block; padding: var(--spacing-md); border: 2px solid var(--highlight-color); background: var(--background-fill-primary); color: var(--body-text-color); } +.context-menu-items { list-style: none; margin: 0; padding: 0; font-size: var(--text-sm); } +.context-menu-items a { display: block; padding: var(--spacing-md); cursor: pointer; font-weight: normal; } +.context-menu-items a:hover { background: var(--highlight-color) } + +/* extensions */ +#tab_extensions table, #tab_config table{ border-collapse: collapse; } +#tab_extensions table td, #tab_extensions table th, #tab_config table td, #tab_config table th { border: 1px solid #ccc; padding: 0.25em 0.5em; } +#tab_extensions table tr:hover, #tab_config table tr:hover { background-color: var(--neutral-500) !important; } +#tab_extensions table input[type="checkbox"] { margin-right: 0.5em; appearance: checkbox; } +#tab_extensions button{ max-width: 16em; } +#tab_extensions input[disabled="disabled"]{ opacity: 0.5; } +.extension-tag{ font-weight: bold; font-size: var(--text-sm); } +.extension-button { font-size: var(--text-sm) !important; width: 6em; } +#extensions .name{ font-size: var(--text-lg) } +#extensions .type{ opacity: 0.5; font-size: var(--text-sm); text-align: center; } +#extensions .version{ opacity: 0.7; } +#extensions .info{ margin: 0; } +#extensions .date{ opacity: 0.85; font-size: var(--text-sm); } + +/* extra networks */ +.extra-networks>div { margin: 0; border-bottom: none !important; gap: 0.3em 0; } +.extra-networks .second-line { display: flex; width: -moz-available; width: -webkit-fill-available; gap: 0.3em; box-shadow: var(--input-shadow); } +.extra-networks .search { flex: 1; } +.extra-networks .description { flex: 3; } +.extra-networks .tab-nav>button { margin-right: 0; height: 24px; padding: 2px 4px 2px 4px; } +.extra-networks .buttons { position: absolute; right: 0; margin: -4px; background: var(--background-color); } +.extra-networks .buttons>button { margin-left: -0.2em; height: 1.4em; color: var(--primary-300) !important; font-size: 20px !important; } +.extra-networks .custom-button { width: 120px; width: 100%; background: none; justify-content: left; text-align: left; padding: 3px 3px 3px 12px; text-indent: -6px; box-shadow: none; line-break: auto; } +.extra-networks .custom-button:hover { background: var(--button-primary-background-fill) } +.extra-networks-tab { padding: 0 !important; } +.extra-network-subdirs { background: var(--input-background-fill); overflow-x: hidden; overflow-y: auto; min-width: max(15%, 120px); padding-top: 0.5em; margin-top: -4px !important; } +.extra-networks-page { display: flex } +.extra-network-cards { display: flex; flex-wrap: wrap; overflow-y: auto; overflow-x: hidden; align-content: flex-start; width: -moz-available; width: -webkit-fill-available; } +.extra-network-cards .card { height: fit-content; margin: 0 0 0.5em 0.5em; position: relative; scroll-snap-align: start; scroll-margin-top: 0; } +.extra-network-cards .card .overlay { position: absolute; bottom: 0; padding: 0.2em; z-index: 10; width: 100%; background: none; } +.extra-network-cards .card .overlay .name { font-size: var(--text-lg); font-weight: bold; text-shadow: 1px 1px black; color: white; overflow-wrap: break-word; } +.extra-network-cards .card .preview { box-shadow: var(--button-shadow); min-height: 30px; } +.extra-network-cards .card:hover .overlay { background: rgba(0, 0, 0, 0.40); } +.extra-network-cards .card:hover .preview { box-shadow: none; filter: grayscale(100%); } +.extra-network-cards .card:hover .overlay { background: rgba(0, 0, 0, 0.40); } +.extra-network-cards .card .overlay .tags { display: none; overflow-wrap: break-word; } +.extra-network-cards .card .overlay .tag { padding: 2px; margin: 2px; background: rgba(70, 70, 70, 0.60); font-size: var(--text-md); cursor: pointer; display: inline-block; } +.extra-network-cards .card .actions>span { padding: 4px; font-size: 34px !important; } +.extra-network-cards .card .actions>span:hover { color: var(--highlight-color); } +.extra-network-cards .card:hover .actions { display: block; } +.extra-network-cards .card:hover .overlay .tags { display: block; } +.extra-network-cards .card .actions { font-size: 3em; display: none; text-align-last: right; cursor: pointer; font-variant: unicase; position: absolute; z-index: 80; right: 0; height: 0.7em; width: 100%; background: rgba(0, 0, 0, 0.40); } +.extra-network-cards .card-list { display: flex; margin: 0.3em; padding: 0.3em; background: var(--input-background-fill); cursor: pointer; border-radius: var(--button-large-radius); } +.extra-network-cards .card-list .tag { color: var(--primary-500); margin-left: 0.8em; } +.extra-details-close { position: fixed; top: 0.2em; right: 0.2em; z-index: 99; background: var(--button-secondary-background-fill) !important; } +#txt2img_description, #img2img_description, #control_description { max-height: 63px; overflow-y: auto !important; } +#txt2img_description>label>textarea, #img2img_description>label>textarea, #control_description>label>textarea { font-size: var(--text-sm) } + +#txt2img_extra_details>div, #img2img_extra_details>div { overflow-y: auto; min-height: 40vh; max-height: 80vh; align-self: flex-start; } +#txt2img_extra_details, #img2img_extra_details { position: fixed; bottom: 50%; left: 50%; transform: translate(-50%, 50%); padding: 0.8em; border: var(--block-border-width) solid var(--highlight-color) !important; + z-index: 100; box-shadow: var(--button-shadow); } +#txt2img_extra_details td:first-child, #img2img_extra_details td:first-child { font-weight: bold; vertical-align: top; } +#txt2img_extra_details .gradio-image, #img2img_extra_details .gradio-image { max-height: 70vh; } + + +/* specific elements */ +#modelmerger_interp_description { margin-top: 1em; margin-bottom: 1em; } +#scripts_alwayson_txt2img, #scripts_alwayson_img2img { padding: 0 } +#scripts_alwayson_txt2img>.label-wrap, #scripts_alwayson_img2img>.label-wrap { background: var(--input-background-fill); padding: 0; margin: 0; border-radius: var(--radius-lg); } +#scripts_alwayson_txt2img>.label-wrap>span, #scripts_alwayson_img2img>.label-wrap>span { padding: var(--spacing-xxl); } +#scripts_alwayson_txt2img div { max-width: var(--left-column); } +#script_txt2img_agent_scheduler { display: none; } +#refresh_tac_refreshTempFiles { display: none; } +#train_tab { flex-flow: row-reverse; } +#models_tab { flex-flow: row-reverse; } +#swap_axes>button { min-width: 100px; font-size: var(--text-md); } +#ui_defaults_review { margin: 1em; } + +/* extras */ +.extras { gap: 0.2em 1em !important } +#extras_generate, #extras_interrupt, #extras_skip { display: block !important; position: relative; height: 36px; } +#extras_upscale { margin-top: 10px } +#pnginfo_html_info .gradio-html > div { margin: 0.5em; } + +/* log monitor */ +.log-monitor { display: none; justify-content: unset !important; overflow: hidden; padding: 0; margin-top: auto; font-family: monospace; font-size: var(--text-xs); } +.log-monitor td, .log-monitor th { padding-left: 1em; } + +/* changelog */ +.md h2 { background-color: var(--background-fill-primary); padding: 0.5em; } +.md ul { list-style-type: square !important; text-indent: 1em; margin-left: 4em; } +.md li { list-style-position: outside !important; text-indent: 0; } +.md p { margin-left: 2em; } + +/* custom component */ +.folder-selector textarea { height: 2em !important; padding: 6px !important; } +.nvml { position: fixed; bottom: 10px; right: 10px; background: var(--background-fill-primary); border: 1px solid var(--button-primary-border-color); padding: 6px; color: var(--button-primary-text-color); + font-size: var(--text-xxs); z-index: 50; font-family: monospace; display: none; } + +/* control */ +#control_input_type { max-width: 18em } +#control_settings .small-accordion .form { min-width: 350px !important } +.control-button { min-height: 42px; max-height: 42px; line-height: 1em; } +.control-tabs > .tab-nav { margin-bottom: 0; margin-top: 0; } +.control-unit { max-width: 1200px; padding: 0 !important; margin-top: -10px !important; } +.control-unit > .label-wrap { margin-bottom: 0 !important; } +.processor-settings { padding: 0 !important; max-width: 300px; } +.processor-group>div { flex-flow: wrap;gap: 1em; } + +/* main info */ +.main-info { font-weight: var(--section-header-text-weight); color: var(--body-text-color-subdued); padding: 1em !important; margin-top: 2em !important; line-height: var(--line-lg) !important; } + +/* loader */ +.splash { position: fixed; top: 0; left: 0; width: 100vw; height: 100vh; z-index: 1000; display: block; text-align: center; } +.motd { margin-top: 2em; color: var(--body-text-color-subdued); font-family: monospace; font-variant: all-petite-caps; } +.splash-img { margin: 10% auto 0 auto; width: 512px; background-repeat: no-repeat; height: 512px; animation: color 10s infinite alternate; } +.loading { color: white; position: absolute; top: 20%; left: 50%; transform: translateX(-50%); } +.loader { width: 300px; height: 300px; border: var(--spacing-md) solid transparent; border-radius: 50%; border-top: var(--spacing-md) solid var(--primary-600); animation: spin 4s linear infinite; position: relative; } +.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: var(--spacing-md) solid transparent; } +.loader::before { border-top-color: var(--primary-900); animation: 3s spin linear infinite; } +.loader::after { border-top-color: var(--primary-300); animation: spin 1.5s linear infinite; } +@keyframes move { from { background-position-x: 0, -40px; } to { background-position-x: 0, 40px; } } +@keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } +@keyframes color { from { filter: hue-rotate(0deg) } to { filter: hue-rotate(360deg) } } + +:root, .light, .dark { + --text-xxs: 9px; + --text-xs: 10px; + --text-sm: 12px; + --text-md: 14px; + --text-lg: 15px; + --text-xl: 16px; + --text-xxl: 17px; + --spacing-xxs: 1px; + --spacing-xs: 2px; + --spacing-sm: 3px; + --spacing-lg: 4px; + --spacing-xl: 5px; + --spacing-xxl: 6px; +} + +@media (hover: none) and (pointer: coarse) { /* Apply different styles for devices with coarse pointers dependant on screen resolution */ + @media (max-width: 1024px) { /* Do not affect displays larger than 1024px wide. */ + @media (max-width: 399px) { /* Screens smaller than 400px wide */ + :root, .light, .dark { --left-column: 100%; } + #txt2img_results, #img2img_results, #extras_results { min-width: calc(min(320px, 100%)) !important;} /* maintain single column for from image operations on larger mobile devices */ + #txt2img_footer p { text-wrap: wrap; } + } + @media (min-width: 400px) { /* Screens larger than 400px wide */ + :root, .light, .dark {--left-column: 50%;} + #txt2img_results, #extras_results, #txt2im g_footer p {text-wrap: wrap; max-width: 100% !important; } /* maintain side by side split on larger mobile displays for from text */ + } + #scripts_alwayson_txt2img div, #scripts_alwayson_img2img div { max-width: 100%; } + #txt2img_prompt_container, #img2img_prompt_container, #control_prompt_container { resize:vertical !important; } + #txt2img_generate_box, #txt2img_enqueue_wrapper { min-width: 100% !important;} /* make generate and enqueue buttons take up the entire width of their rows. */ + #img2img_toprow>div.gradio-column {flex-grow: 1 !important;} /*make interrogate buttons take up appropriate space. */ + #img2img_actions_column {display: flex; min-width: fit-content !important; flex-direction: row;justify-content: space-evenly; align-items: center;} + #txt2img_generate_box, #img2img_generate_box, #txt2img_enqueue_wrapper,#img2img_enqueue_wrapper {display: flex;flex-direction: column;height: 4em !important;align-items: stretch;justify-content: space-evenly;} + #img2img_interface, #img2img_results, #img2img_footer p {text-wrap: wrap; min-width: 100% !important; max-width: 100% !important;} /* maintain single column for from image operations on larger mobile devices */ + #img2img_sketch, #img2maskimg, #inpaint_sketch {display: flex; overflow: auto !important; resize: none !important; } /* fix inpaint image display being too large for mobile displays */ + #img2maskimg canvas { width: auto !important; max-height: 100% !important; height: auto !important; } + #txt2img_sampler, #txt2img_batch, #txt2img_seed_group, #txt2img_advanced, #txt2img_second_pass, #img2img_sampling_group, #img2img_resize_group, #img2img_batch_group, #img2img_seed_group, #img2img_denoise_group, #img2img_advanced_group { width: 100% !important; } /* fix from text/image UI elements to prevent them from moving around within the UI */ + #img2img_resize_group .gradio-radio>div { display: flex; flex-direction: column; width: unset !important; } + #inpaint_controls div {display:flex;flex-direction: row;} + #inpaint_controls .gradio-radio>div { display: flex; flex-direction: column !important; } + #models_tab { flex-direction: column-reverse !important; } /* move image preview/output on models page to bottom of page */ + #enqueue_keyboard_shortcut_modifiers, #enqueue_keyboard_shortcut_key div { max-width: 40% !important;} /* fix settings for agent scheduler */ + #settings { display: flex; flex-direction: row; flex-wrap: wrap; max-width: 100% !important; } /* adjust width of certain settings item to allow aligning as row, but not have it go off the screen */ + #settings div.tab-content>div>div>div { max-width: 80% !important;} + #settings div .gradio-radio { width: unset !important; } + #tab_extensions table { border-collapse: collapse; display: block; overflow-x:auto !important;} /* enable scrolling on extensions tab */ + ::-webkit-scrollbar { width: 25px !important; height:25px; } /* increase scrollbar size to make it finger friendly */ + .gradio-dropdown ul.options {max-height: 41vh !important; } /* adjust dropdown size to make them easier to select individual items on mobile. */ + .gradio-dropdown ul.options li.item {height: 40px !important; display: flex; align-items: center;} + .gradio-slider input[type="number"] { width: 4em; font-size: var(--text-xs); height: 16px; text-align: center; } /* adjust slider input fields as they were too large for mobile devices. */ + #txt2img_settings .block .padded:not(.gradio-accordion) {padding: 0 !important;margin-right: 0; min-width: 100% !important; width:100% !important;} + } +} diff --git a/javascript/timeless-beige.css b/javascript/timeless-beige.css index d9a17bd00..d0142bdce 100644 --- a/javascript/timeless-beige.css +++ b/javascript/timeless-beige.css @@ -1,297 +1,297 @@ -/* generic html tags */ -:root, .light, .dark { - --font: 'system-ui', 'ui-sans-serif', 'system-ui', "Roboto", sans-serif; - --font-mono: 'ui-monospace', 'Consolas', monospace; - --font-size: 16px; - --primary-100: #212226; /* bg color*/ - --primary-200: #17181b; /* drop down menu/ prompt window fill*/ - --primary-300: #0a0c0e; /* black */ - --primary-400: #2f3034; /* small buttons*/ - --primary-500: #434242; /* main accent color retro beige*/ - --primary-700: #e75d5d; /* light blue gray*/ - --primary-800: #e75d5d; /* sat orange(hover accent)*/ - --highlight-color: var(--primary-500); - --inactive-color: var(--primary--800); - --body-text-color: var(--neutral-100); - --body-text-color-subdued: var(--neutral-300); - --background-color: var(--primary-100); - --background-fill-primary: var(--input-background-fill); - --input-padding: 8px; - --input-background-fill: var(--primary-200); - --input-shadow: none; - --button-secondary-text-color: white; - --button-secondary-background-fill: var(--primary-400); - --button-secondary-background-fill-hover: var(--primary-700); - --block-title-text-color: var(--neutral-300); - --radius-sm: 1px; - --radius-lg: 6px; - --spacing-md: 4px; - --spacing-xxl: 8px; - --line-sm: 1.2em; - --line-md: 1.4em; -} - -html { font-size: var(--font-size); } -body, button, input, select, textarea { font-family: var(--font);} -button { max-width: 400px; } -img { background-color: var(--background-color); } -input[type=range] { height: var(--line-sm); appearance: none; margin-top: 0; min-width: 160px; background-color: var(--background-color); width: 100%; background: transparent; } -input[type=range]::-webkit-slider-runnable-track, input[type=range]::-moz-range-track { width: 100%; height: 6px; cursor: pointer; background: var(--primary-400); border-radius: var(--radius-lg); border: 0px solid #222222; } -input[type=range]::-webkit-slider-thumb, input[type=range]::-moz-range-thumb { border: 0px solid #000000; height: var(--line-sm); width: 8px; border-radius: var(--radius-lg); background: white; cursor: pointer; appearance: none; margin-top: 0px; } -input[type=range]::-moz-range-progress { background-color: var(--primary-500); height: 6px; border-radius: var(--radius-lg); } -::-webkit-scrollbar-track { background: #333333; } -::-webkit-scrollbar-thumb { background-color: var(--highlight-color); border-radius: var(--radius-lg); border-width: 0; box-shadow: 2px 2px 3px #111111; } -div.form { border-width: 0; box-shadow: none; background: transparent; overflow: visible; margin-bottom: 6px; } -div.compact { gap: 1em; } - -/* gradio style classes */ -fieldset .gr-block.gr-box, label.block span { padding: 0; margin-top: -4px; } -.border-2 { border-width: 0; } -.border-b-2 { border-bottom-width: 2px; border-color: var(--highlight-color) !important; padding-bottom: 2px; margin-bottom: 8px; } -.bg-white { color: lightyellow; background-color: var(--inactive-color); } -.gr-box { border-radius: var(--radius-sm) !important; background-color: #111111 !important; box-shadow: 2px 2px 3px #111111; border-width: 0; padding: 4px; margin: 12px 0px 12px 0px } -.gr-button { font-weight: normal; box-shadow: 2px 2px 3px #111111; font-size: 0.8rem; min-width: 32px; min-height: 32px; padding: 3px; margin: 3px; } -.gr-check-radio { background-color: var(--inactive-color); border-width: 0; border-radius: var(--radius-lg); box-shadow: 2px 2px 3px #111111; } -.gr-check-radio:checked { background-color: var(--highlight-color); } -.gr-compact { background-color: var(--background-color); } -.gr-form { border-width: 0; } -.gr-input { background-color: #333333 !important; padding: 4px; margin: 4px; } -.gr-input-label { color: lightyellow; border-width: 0; background: transparent; padding: 2px !important; } -.gr-panel { background-color: var(--background-color); } -.eta-bar { display: none !important } -svg.feather.feather-image, .feather .feather-image { display: none } -.gap-2 { padding-top: 8px; } -.gr-box > div > div > input.gr-text-input { right: 0; width: 4em; padding: 0; top: -12px; border: none; max-height: 20px; } -.output-html { line-height: 1.2rem; overflow-x: hidden; } -.output-html > div { margin-bottom: 8px; } -.overflow-hidden .flex .flex-col .relative col .gap-4 { min-width: var(--left-column); max-width: var(--left-column); } /* this is a problematic one */ -.p-2 { padding: 0; } -.px-4 { padding-lefT: 1rem; padding-right: 1rem; } -.py-6 { padding-bottom: 0; } -.tabs { background-color: var(--background-color); } -.block.token-counter span { background-color: var(--input-background-fill) !important; box-shadow: 2px 2px 2px #111; border: none !important; font-size: 0.8rem; } -.tab-nav { zoom: 110%; margin-top: 10px; margin-bottom: 10px; border-bottom: 2px solid var(--highlight-color) !important; padding-bottom: 2px; } -div.tab-nav button.selected {background-color: var(--button-primary-background-fill);} -#settings div.tab-nav button.selected {background-color: var(--background-color); color: var(--primary-800); font-weight: bold;} -.label-wrap { background-color: #292b30; /* extension tab color*/ padding: 16px 8px 8px 8px; border-radius: var(--radius-lg); padding-left: 8px !important; } -.small-accordion .label-wrap { padding: 8px 0px 8px 0px; } -.small-accordion .label-wrap .icon { margin-right: 1em; } -.gradio-button.tool { border: none; box-shadow: none; border-radius: var(--radius-lg);} -button.selected {background: var(--button-primary-background-fill);} -.center.boundedheight.flex {background-color: var(--input-background-fill);} -.compact {border-radius: var(--border-radius-lg);} -#logMonitorData {background-color: var(--input-background-fill);} -#tab_extensions table td, #tab_extensions table th, #tab_config table td, #tab_config table th { border: none; padding: 0.5em; background-color: var(--primary-200); } -#tab_extensions table, #tab_config table { width: 96vw; } -#tab_extensions table input[type=checkbox] {appearance: none; border-radius: 0px;} -#tab_extensions button:hover { background-color: var(--button-secondary-background-fill-hover);} - -/* automatic style classes */ -.progressDiv { border-radius: var(--radius-sm) !important; position: fixed; top: 44px; right: 26px; max-width: 262px; height: 48px; z-index: 99; box-shadow: var(--button-shadow); } -.progressDiv .progress { border-radius: var(--radius-lg) !important; background: var(--highlight-color); line-height: 3rem; height: 48px; } -.gallery-item { box-shadow: none !important; } -.performance { color: #888; } -.extra-networks { border-left: 2px solid var(--highlight-color) !important; padding-left: 4px; } -.image-buttons { gap: 10px !important; justify-content: center; } -.image-buttons > button { max-width: 160px; } -.tooltip { background: var(--primary-800); color: white; border: none; border-radius: var(--radius-lg) } -#system_row > button, #settings_row > button, #config_row > button { max-width: 10em; } - -/* gradio elements overrides */ -#div.gradio-container { overflow-x: hidden; } -#img2img_label_copy_to_img2img { font-weight: normal; } -#txt2img_prompt, #txt2img_neg_prompt, #img2img_prompt, #img2img_neg_prompt { background-color: var(--background-color); box-shadow: 4px 4px 4px 0px #333333 !important; } -#txt2img_prompt > label > textarea, #txt2img_neg_prompt > label > textarea, #img2img_prompt > label > textarea, #img2img_neg_prompt > label > textarea { font-size: 1.1rem; } -#img2img_settings { min-width: calc(2 * var(--left-column)); max-width: calc(2 * var(--left-column)); background-color: #111111; padding-top: 16px; } -#interrogate, #deepbooru { margin: 0 0px 10px 0px; max-width: 80px; max-height: 80px; font-weight: normal; font-size: 0.95em; } -#quicksettings .gr-button-tool { font-size: 1.6rem; box-shadow: none; margin-top: -2px; height: 2.4em; } -#quicksettings button {padding: 0 0.5em 0.1em 0.5em;} -#open_folder_extras, #footer, #style_pos_col, #style_neg_col, #roll_col, #extras_upscaler_2, #extras_upscaler_2_visibility, #txt2img_seed_resize_from_w, #txt2img_seed_resize_from_h { display: none; } -#save-animation { border-radius: var(--radius-sm) !important; margin-bottom: 16px; background-color: #111111; } -#script_list { padding: 4px; margin-top: 16px; margin-bottom: 8px; } -#settings > div.flex-wrap { width: 15em; } -#txt2img_cfg_scale { min-width: 200px; } -#txt2img_checkboxes, #img2img_checkboxes { background-color: transparent; } -#txt2img_checkboxes, #img2img_checkboxes { margin-bottom: 0.2em; } -#txt2img_actions_column, #img2img_actions_column { flex-flow: wrap; justify-content: space-between; } -#txt2img_enqueue_wrapper, #img2img_enqueue_wrapper { min-width: unset; width: 48%; } -#txt2img_generate_box, #img2img_generate_box { min-width: unset; width: 48%; } - -#extras_upscale { margin-top: 10px } -#txt2img_progress_row > div { min-width: var(--left-column); max-width: var(--left-column); } -#txt2img_settings { min-width: var(--left-column); max-width: var(--left-column); background-color: #111111; padding-top: 16px; } -#pnginfo_html2_info { margin-top: -18px; background-color: var(--input-background-fill); padding: var(--input-padding) } -#txt2img_tools, #img2img_tools { margin-top: -4px; margin-bottom: -4px; } -#txt2img_styles_row, #img2img_styles_row { margin-top: -6px; z-index: 200; } - -/* based on gradio built-in dark theme */ -:root, .light, .dark { - --body-background-fill: var(--background-color); - --color-accent-soft: var(--neutral-700); - --background-fill-secondary: none; - --border-color-accent: var(--background-color); - --border-color-primary: var(--background-color); - --link-text-color-active: var(--primary-500); - --link-text-color: var(--secondary-500); - --link-text-color-hover: var(--secondary-400); - --link-text-color-visited: var(--secondary-600); - --shadow-spread: 1px; - --block-background-fill: None; - --block-border-color: var(--border-color-primary); - --block_border_width: None; - --block-info-text-color: var(--body-text-color-subdued); - --block-label-background-fill: var(--background-fill-secondary); - --block-label-border-color: var(--border-color-primary); - --block_label_border_width: None; - --block-label-text-color: var(--neutral-200); - --block_shadow: None; - --block_title_background_fill: None; - --block_title_border_color: None; - --block_title_border_width: None; - --panel-background-fill: var(--background-fill-secondary); - --panel-border-color: var(--border-color-primary); - --panel_border_width: None; - --checkbox-background-color: var(--primary-400); - --checkbox-background-color-focus: var(--primary-700); - --checkbox-background-color-hover: var(--primary-700); - --checkbox-background-color-selected: var(--primary-500); - --checkbox-border-color: transparent; - --checkbox-border-color-focus: var(--primary-800); - --checkbox-border-color-hover: var(--primary-800); - --checkbox-border-color-selected: var(--primary-800); - --checkbox-border-width: var(--input-border-width); - --checkbox-label-background-fill: None; - --checkbox-label-background-fill-hover: None; - --checkbox-label-background-fill-selected: var(--checkbox-label-background-fill); - --checkbox-label-border-color: var(--border-color-primary); - --checkbox-label-border-color-hover: var(--checkbox-label-border-color); - --checkbox-label-border-width: var(--input-border-width); - --checkbox-label-text-color: var(--body-text-color); - --checkbox-label-text-color-selected: var(--checkbox-label-text-color); - --error-background-fill: var(--background-fill-primary); - --error-border-color: var(--border-color-primary); - --error-text-color: #f768b7; /*was ef4444*/ - --input-background-fill-focus: var(--secondary-600); - --input-background-fill-hover: var(--input-background-fill); - --input-border-color: var(--background-color); - --input-border-color-focus: var(--primary-800); - --input-placeholder-color: var(--neutral-500); - --input-shadow-focus: None; - --loader_color: None; - --slider_color: None; - --stat-background-fill: linear-gradient(to right, var(--primary-400), var(--primary-800)); - --table-border-color: var(--neutral-700); - --table-even-background-fill: var(--primary-300); - --table-odd-background-fill: var(--primary-200); - --table-row-focus: var(--color-accent-soft); - --button-border-width: var(--input-border-width); - --button-cancel-background-fill: linear-gradient(to bottom right, #dc2626, #b91c1c); - --button-cancel-background-fill-hover: linear-gradient(to bottom right, #dc2626, #dc2626); - --button-cancel-border-color: #dc2626; - --button-cancel-border-color-hover: var(--button-cancel-border-color); - --button-cancel-text-color: white; - --button-cancel-text-color-hover: var(--button-cancel-text-color); - --button-primary-background-fill: var(--primary-500); - --button-primary-background-fill-hover: var(--primary-800); - --button-primary-border-color: var(--primary-500); - --button-primary-border-color-hover: var(--button-primary-border-color); - --button-primary-text-color: white; - --button-primary-text-color-hover: var(--button-primary-text-color); - --button-secondary-border-color: var(--neutral-600); - --button-secondary-border-color-hover: var(--button-secondary-border-color); - --button-secondary-text-color-hover: var(--button-secondary-text-color); - --secondary-50: #eff6ff; - --secondary-100: #dbeafe; - --secondary-200: #bfdbfe; - --secondary-300: #93c5fd; - --secondary-400: #60a5fa; - --secondary-500: #3b82f6; - --secondary-600: #2563eb; - --secondary-700: #1d4ed8; - --secondary-800: #1e40af; - --secondary-900: #1e3a8a; - --secondary-950: #1d3660; - --neutral-50: #f0f0f0; /* */ - --neutral-100: #e0dedc;/* majority of text (neutral gray yellow) */ - --neutral-200: #d0d0d0; - --neutral-300: #9d9dab; /* top tab text (light accent) */ - --neutral-400: #ffba85;/* tab title (light beige) */ - --neutral-500: #484746; /* prompt text (desat accent)*/ - --neutral-600: #605a54; /* tab outline color (accent color)*/ - --neutral-700: #1b1c1e; /* small settings tab accent (dark)*/ - --neutral-800: #e75d5d; /* bright orange accent */ - --neutral-900: #111827; - --neutral-950: #0b0f19; - --radius-xxs: 0; - --radius-xs: 0; - --radius-md: 0; - --radius-xl: 0; - --radius-xxl: 0; - --body-text-size: var(--text-md); - --body-text-weight: 400; - --embed-radius: var(--radius-lg); - --color-accent: var(--primary-500); - --shadow-drop: 0; - --shadow-drop-lg: 0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1); - --shadow-inset: rgba(0,0,0,0.05) 0px 2px 4px 0px inset; - --block-border-width: 1px; - --block-info-text-size: var(--text-sm); - --block-info-text-weight: 400; - --block-label-border-width: 1px; - --block-label-margin: 0; - --block-label-padding: var(--spacing-sm) var(--spacing-lg); - --block-label-radius: calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px) 0; - --block-label-right-radius: 0 calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px); - --block-label-text-size: var(--text-sm); - --block-label-text-weight: 400; - --block-padding: var(--spacing-xl) calc(var(--spacing-xl) + 2px); - --block-radius: var(--radius-lg); - --block-shadow: var(--shadow-drop); - --block-title-background-fill: none; - --block-title-border-color: none; - --block-title-border-width: 0; - --block-title-padding: 0; - --block-title-radius: none; - --block-title-text-size: var(--text-md); - --block-title-text-weight: 400; - --container-radius: var(--radius-lg); - --form-gap-width: 1px; - --layout-gap: var(--spacing-xxl); - --panel-border-width: 0; - --section-header-text-size: var(--text-md); - --section-header-text-weight: 400; - --checkbox-border-radius: var(--radius-sm); - --checkbox-label-gap: 2px; - --checkbox-label-padding: var(--spacing-md); - --checkbox-label-shadow: var(--shadow-drop); - --checkbox-label-text-size: var(--text-md); - --checkbox-label-text-weight: 400; - --checkbox-check: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e"); - --radio-circle: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e"); - --checkbox-shadow: var(--input-shadow); - --error-border-width: 1px; - --input-border-width: 1px; - --input-radius: var(--radius-lg); - --input-text-size: var(--text-md); - --input-text-weight: 400; - --loader-color: var(--color-accent); - --prose-text-size: var(--text-md); - --prose-text-weight: 400; - --prose-header-text-weight: 600; - --slider-color: ; - --table-radius: var(--radius-lg); - --button-large-padding: 2px 6px; - --button-large-radius: var(--radius-lg); - --button-large-text-size: var(--text-lg); - --button-large-text-weight: 400; - --button-shadow: none; - --button-shadow-active: none; - --button-shadow-hover: none; - --button-small-padding: var(--spacing-sm) calc(2 * var(--spacing-sm)); - --button-small-radius: var(--radius-lg); - --button-small-text-size: var(--text-md); - --button-small-text-weight: 400; - --button-transition: none; - --size-9: 64px; - --size-14: 64px; -} +/* generic html tags */ +:root, .light, .dark { + --font: 'system-ui', 'ui-sans-serif', 'system-ui', "Roboto", sans-serif; + --font-mono: 'ui-monospace', 'Consolas', monospace; + --font-size: 16px; + --primary-100: #212226; /* bg color*/ + --primary-200: #17181b; /* drop down menu/ prompt window fill*/ + --primary-300: #0a0c0e; /* black */ + --primary-400: #2f3034; /* small buttons*/ + --primary-500: #434242; /* main accent color retro beige*/ + --primary-700: #e75d5d; /* light blue gray*/ + --primary-800: #e75d5d; /* sat orange(hover accent)*/ + --highlight-color: var(--primary-500); + --inactive-color: var(--primary--800); + --body-text-color: var(--neutral-100); + --body-text-color-subdued: var(--neutral-300); + --background-color: var(--primary-100); + --background-fill-primary: var(--input-background-fill); + --input-padding: 8px; + --input-background-fill: var(--primary-200); + --input-shadow: none; + --button-secondary-text-color: white; + --button-secondary-background-fill: var(--primary-400); + --button-secondary-background-fill-hover: var(--primary-700); + --block-title-text-color: var(--neutral-300); + --radius-sm: 1px; + --radius-lg: 6px; + --spacing-md: 4px; + --spacing-xxl: 8px; + --line-sm: 1.2em; + --line-md: 1.4em; +} + +html { font-size: var(--font-size); } +body, button, input, select, textarea { font-family: var(--font);} +button { max-width: 400px; } +img { background-color: var(--background-color); } +input[type=range] { height: var(--line-sm); appearance: none; margin-top: 0; min-width: 160px; background-color: var(--background-color); width: 100%; background: transparent; } +input[type=range]::-webkit-slider-runnable-track, input[type=range]::-moz-range-track { width: 100%; height: 6px; cursor: pointer; background: var(--primary-400); border-radius: var(--radius-lg); border: 0px solid #222222; } +input[type=range]::-webkit-slider-thumb, input[type=range]::-moz-range-thumb { border: 0px solid #000000; height: var(--line-sm); width: 8px; border-radius: var(--radius-lg); background: white; cursor: pointer; appearance: none; margin-top: 0px; } +input[type=range]::-moz-range-progress { background-color: var(--primary-500); height: 6px; border-radius: var(--radius-lg); } +::-webkit-scrollbar-track { background: #333333; } +::-webkit-scrollbar-thumb { background-color: var(--highlight-color); border-radius: var(--radius-lg); border-width: 0; box-shadow: 2px 2px 3px #111111; } +div.form { border-width: 0; box-shadow: none; background: transparent; overflow: visible; margin-bottom: 6px; } +div.compact { gap: 1em; } + +/* gradio style classes */ +fieldset .gr-block.gr-box, label.block span { padding: 0; margin-top: -4px; } +.border-2 { border-width: 0; } +.border-b-2 { border-bottom-width: 2px; border-color: var(--highlight-color) !important; padding-bottom: 2px; margin-bottom: 8px; } +.bg-white { color: lightyellow; background-color: var(--inactive-color); } +.gr-box { border-radius: var(--radius-sm) !important; background-color: #111111 !important; box-shadow: 2px 2px 3px #111111; border-width: 0; padding: 4px; margin: 12px 0px 12px 0px } +.gr-button { font-weight: normal; box-shadow: 2px 2px 3px #111111; font-size: 0.8rem; min-width: 32px; min-height: 32px; padding: 3px; margin: 3px; } +.gr-check-radio { background-color: var(--inactive-color); border-width: 0; border-radius: var(--radius-lg); box-shadow: 2px 2px 3px #111111; } +.gr-check-radio:checked { background-color: var(--highlight-color); } +.gr-compact { background-color: var(--background-color); } +.gr-form { border-width: 0; } +.gr-input { background-color: #333333 !important; padding: 4px; margin: 4px; } +.gr-input-label { color: lightyellow; border-width: 0; background: transparent; padding: 2px !important; } +.gr-panel { background-color: var(--background-color); } +.eta-bar { display: none !important } +svg.feather.feather-image, .feather .feather-image { display: none } +.gap-2 { padding-top: 8px; } +.gr-box > div > div > input.gr-text-input { right: 0; width: 4em; padding: 0; top: -12px; border: none; max-height: 20px; } +.output-html { line-height: 1.2rem; overflow-x: hidden; } +.output-html > div { margin-bottom: 8px; } +.overflow-hidden .flex .flex-col .relative col .gap-4 { min-width: var(--left-column); max-width: var(--left-column); } /* this is a problematic one */ +.p-2 { padding: 0; } +.px-4 { padding-lefT: 1rem; padding-right: 1rem; } +.py-6 { padding-bottom: 0; } +.tabs { background-color: var(--background-color); } +.block.token-counter span { background-color: var(--input-background-fill) !important; box-shadow: 2px 2px 2px #111; border: none !important; font-size: 0.8rem; } +.tab-nav { zoom: 110%; margin-top: 10px; margin-bottom: 10px; border-bottom: 2px solid var(--highlight-color) !important; padding-bottom: 2px; } +div.tab-nav button.selected {background-color: var(--button-primary-background-fill);} +#settings div.tab-nav button.selected {background-color: var(--background-color); color: var(--primary-800); font-weight: bold;} +.label-wrap { background-color: #292b30; /* extension tab color*/ padding: 16px 8px 8px 8px; border-radius: var(--radius-lg); padding-left: 8px !important; } +.small-accordion .label-wrap { padding: 8px 0px 8px 0px; } +.small-accordion .label-wrap .icon { margin-right: 1em; } +.gradio-button.tool { border: none; box-shadow: none; border-radius: var(--radius-lg);} +button.selected {background: var(--button-primary-background-fill);} +.center.boundedheight.flex {background-color: var(--input-background-fill);} +.compact {border-radius: var(--border-radius-lg);} +#logMonitorData {background-color: var(--input-background-fill);} +#tab_extensions table td, #tab_extensions table th, #tab_config table td, #tab_config table th { border: none; padding: 0.5em; background-color: var(--primary-200); } +#tab_extensions table, #tab_config table { width: 96vw; } +#tab_extensions table input[type=checkbox] {appearance: none; border-radius: 0px;} +#tab_extensions button:hover { background-color: var(--button-secondary-background-fill-hover);} + +/* automatic style classes */ +.progressDiv { border-radius: var(--radius-sm) !important; position: fixed; top: 44px; right: 26px; max-width: 262px; height: 48px; z-index: 99; box-shadow: var(--button-shadow); } +.progressDiv .progress { border-radius: var(--radius-lg) !important; background: var(--highlight-color); line-height: 3rem; height: 48px; } +.gallery-item { box-shadow: none !important; } +.performance { color: #888; } +.extra-networks { border-left: 2px solid var(--highlight-color) !important; padding-left: 4px; } +.image-buttons { gap: 10px !important; justify-content: center; } +.image-buttons > button { max-width: 160px; } +.tooltip { background: var(--primary-800); color: white; border: none; border-radius: var(--radius-lg) } +#system_row > button, #settings_row > button, #config_row > button { max-width: 10em; } + +/* gradio elements overrides */ +#div.gradio-container { overflow-x: hidden; } +#img2img_label_copy_to_img2img { font-weight: normal; } +#txt2img_prompt, #txt2img_neg_prompt, #img2img_prompt, #img2img_neg_prompt { background-color: var(--background-color); box-shadow: 4px 4px 4px 0px #333333 !important; } +#txt2img_prompt > label > textarea, #txt2img_neg_prompt > label > textarea, #img2img_prompt > label > textarea, #img2img_neg_prompt > label > textarea { font-size: 1.1rem; } +#img2img_settings { min-width: calc(2 * var(--left-column)); max-width: calc(2 * var(--left-column)); background-color: #111111; padding-top: 16px; } +#interrogate, #deepbooru { margin: 0 0px 10px 0px; max-width: 80px; max-height: 80px; font-weight: normal; font-size: 0.95em; } +#quicksettings .gr-button-tool { font-size: 1.6rem; box-shadow: none; margin-top: -2px; height: 2.4em; } +#quicksettings button {padding: 0 0.5em 0.1em 0.5em;} +#open_folder_extras, #footer, #style_pos_col, #style_neg_col, #roll_col, #extras_upscaler_2, #extras_upscaler_2_visibility, #txt2img_seed_resize_from_w, #txt2img_seed_resize_from_h { display: none; } +#save-animation { border-radius: var(--radius-sm) !important; margin-bottom: 16px; background-color: #111111; } +#script_list { padding: 4px; margin-top: 16px; margin-bottom: 8px; } +#settings > div.flex-wrap { width: 15em; } +#txt2img_cfg_scale { min-width: 200px; } +#txt2img_checkboxes, #img2img_checkboxes { background-color: transparent; } +#txt2img_checkboxes, #img2img_checkboxes { margin-bottom: 0.2em; } +#txt2img_actions_column, #img2img_actions_column { flex-flow: wrap; justify-content: space-between; } +#txt2img_enqueue_wrapper, #img2img_enqueue_wrapper { min-width: unset; width: 48%; } +#txt2img_generate_box, #img2img_generate_box { min-width: unset; width: 48%; } + +#extras_upscale { margin-top: 10px } +#txt2img_progress_row > div { min-width: var(--left-column); max-width: var(--left-column); } +#txt2img_settings { min-width: var(--left-column); max-width: var(--left-column); background-color: #111111; padding-top: 16px; } +#pnginfo_html2_info { margin-top: -18px; background-color: var(--input-background-fill); padding: var(--input-padding) } +#txt2img_tools, #img2img_tools { margin-top: -4px; margin-bottom: -4px; } +#txt2img_styles_row, #img2img_styles_row { margin-top: -6px; z-index: 200; } + +/* based on gradio built-in dark theme */ +:root, .light, .dark { + --body-background-fill: var(--background-color); + --color-accent-soft: var(--neutral-700); + --background-fill-secondary: none; + --border-color-accent: var(--background-color); + --border-color-primary: var(--background-color); + --link-text-color-active: var(--primary-500); + --link-text-color: var(--secondary-500); + --link-text-color-hover: var(--secondary-400); + --link-text-color-visited: var(--secondary-600); + --shadow-spread: 1px; + --block-background-fill: None; + --block-border-color: var(--border-color-primary); + --block_border_width: None; + --block-info-text-color: var(--body-text-color-subdued); + --block-label-background-fill: var(--background-fill-secondary); + --block-label-border-color: var(--border-color-primary); + --block_label_border_width: None; + --block-label-text-color: var(--neutral-200); + --block_shadow: None; + --block_title_background_fill: None; + --block_title_border_color: None; + --block_title_border_width: None; + --panel-background-fill: var(--background-fill-secondary); + --panel-border-color: var(--border-color-primary); + --panel_border_width: None; + --checkbox-background-color: var(--primary-400); + --checkbox-background-color-focus: var(--primary-700); + --checkbox-background-color-hover: var(--primary-700); + --checkbox-background-color-selected: var(--primary-500); + --checkbox-border-color: transparent; + --checkbox-border-color-focus: var(--primary-800); + --checkbox-border-color-hover: var(--primary-800); + --checkbox-border-color-selected: var(--primary-800); + --checkbox-border-width: var(--input-border-width); + --checkbox-label-background-fill: None; + --checkbox-label-background-fill-hover: None; + --checkbox-label-background-fill-selected: var(--checkbox-label-background-fill); + --checkbox-label-border-color: var(--border-color-primary); + --checkbox-label-border-color-hover: var(--checkbox-label-border-color); + --checkbox-label-border-width: var(--input-border-width); + --checkbox-label-text-color: var(--body-text-color); + --checkbox-label-text-color-selected: var(--checkbox-label-text-color); + --error-background-fill: var(--background-fill-primary); + --error-border-color: var(--border-color-primary); + --error-text-color: #f768b7; /*was ef4444*/ + --input-background-fill-focus: var(--secondary-600); + --input-background-fill-hover: var(--input-background-fill); + --input-border-color: var(--background-color); + --input-border-color-focus: var(--primary-800); + --input-placeholder-color: var(--neutral-500); + --input-shadow-focus: None; + --loader_color: None; + --slider_color: None; + --stat-background-fill: linear-gradient(to right, var(--primary-400), var(--primary-800)); + --table-border-color: var(--neutral-700); + --table-even-background-fill: var(--primary-300); + --table-odd-background-fill: var(--primary-200); + --table-row-focus: var(--color-accent-soft); + --button-border-width: var(--input-border-width); + --button-cancel-background-fill: linear-gradient(to bottom right, #dc2626, #b91c1c); + --button-cancel-background-fill-hover: linear-gradient(to bottom right, #dc2626, #dc2626); + --button-cancel-border-color: #dc2626; + --button-cancel-border-color-hover: var(--button-cancel-border-color); + --button-cancel-text-color: white; + --button-cancel-text-color-hover: var(--button-cancel-text-color); + --button-primary-background-fill: var(--primary-500); + --button-primary-background-fill-hover: var(--primary-800); + --button-primary-border-color: var(--primary-500); + --button-primary-border-color-hover: var(--button-primary-border-color); + --button-primary-text-color: white; + --button-primary-text-color-hover: var(--button-primary-text-color); + --button-secondary-border-color: var(--neutral-600); + --button-secondary-border-color-hover: var(--button-secondary-border-color); + --button-secondary-text-color-hover: var(--button-secondary-text-color); + --secondary-50: #eff6ff; + --secondary-100: #dbeafe; + --secondary-200: #bfdbfe; + --secondary-300: #93c5fd; + --secondary-400: #60a5fa; + --secondary-500: #3b82f6; + --secondary-600: #2563eb; + --secondary-700: #1d4ed8; + --secondary-800: #1e40af; + --secondary-900: #1e3a8a; + --secondary-950: #1d3660; + --neutral-50: #f0f0f0; /* */ + --neutral-100: #e0dedc;/* majority of text (neutral gray yellow) */ + --neutral-200: #d0d0d0; + --neutral-300: #9d9dab; /* top tab text (light accent) */ + --neutral-400: #ffba85;/* tab title (light beige) */ + --neutral-500: #484746; /* prompt text (desat accent)*/ + --neutral-600: #605a54; /* tab outline color (accent color)*/ + --neutral-700: #1b1c1e; /* small settings tab accent (dark)*/ + --neutral-800: #e75d5d; /* bright orange accent */ + --neutral-900: #111827; + --neutral-950: #0b0f19; + --radius-xxs: 0; + --radius-xs: 0; + --radius-md: 0; + --radius-xl: 0; + --radius-xxl: 0; + --body-text-size: var(--text-md); + --body-text-weight: 400; + --embed-radius: var(--radius-lg); + --color-accent: var(--primary-500); + --shadow-drop: 0; + --shadow-drop-lg: 0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1); + --shadow-inset: rgba(0,0,0,0.05) 0px 2px 4px 0px inset; + --block-border-width: 1px; + --block-info-text-size: var(--text-sm); + --block-info-text-weight: 400; + --block-label-border-width: 1px; + --block-label-margin: 0; + --block-label-padding: var(--spacing-sm) var(--spacing-lg); + --block-label-radius: calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px) 0; + --block-label-right-radius: 0 calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px); + --block-label-text-size: var(--text-sm); + --block-label-text-weight: 400; + --block-padding: var(--spacing-xl) calc(var(--spacing-xl) + 2px); + --block-radius: var(--radius-lg); + --block-shadow: var(--shadow-drop); + --block-title-background-fill: none; + --block-title-border-color: none; + --block-title-border-width: 0; + --block-title-padding: 0; + --block-title-radius: none; + --block-title-text-size: var(--text-md); + --block-title-text-weight: 400; + --container-radius: var(--radius-lg); + --form-gap-width: 1px; + --layout-gap: var(--spacing-xxl); + --panel-border-width: 0; + --section-header-text-size: var(--text-md); + --section-header-text-weight: 400; + --checkbox-border-radius: var(--radius-sm); + --checkbox-label-gap: 2px; + --checkbox-label-padding: var(--spacing-md); + --checkbox-label-shadow: var(--shadow-drop); + --checkbox-label-text-size: var(--text-md); + --checkbox-label-text-weight: 400; + --checkbox-check: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e"); + --radio-circle: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e"); + --checkbox-shadow: var(--input-shadow); + --error-border-width: 1px; + --input-border-width: 1px; + --input-radius: var(--radius-lg); + --input-text-size: var(--text-md); + --input-text-weight: 400; + --loader-color: var(--color-accent); + --prose-text-size: var(--text-md); + --prose-text-weight: 400; + --prose-header-text-weight: 600; + --slider-color: ; + --table-radius: var(--radius-lg); + --button-large-padding: 2px 6px; + --button-large-radius: var(--radius-lg); + --button-large-text-size: var(--text-lg); + --button-large-text-weight: 400; + --button-shadow: none; + --button-shadow-active: none; + --button-shadow-hover: none; + --button-small-padding: var(--spacing-sm) calc(2 * var(--spacing-sm)); + --button-small-radius: var(--radius-lg); + --button-small-text-size: var(--text-md); + --button-small-text-weight: 400; + --button-transition: none; + --size-9: 64px; + --size-14: 64px; +} diff --git a/modules/call_queue.py b/modules/call_queue.py index 482a87e82..04dbd81d0 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -1,86 +1,86 @@ -import html -import threading -import time -import cProfile -from modules import shared, progress, errors - -queue_lock = threading.Lock() - - -def wrap_queued_call(func): - def f(*args, **kwargs): - with queue_lock: - res = func(*args, **kwargs) - return res - return f - - -def wrap_gradio_gpu_call(func, extra_outputs=None): - name = func.__name__ - def f(*args, **kwargs): - # if the first argument is a string that says "task(...)", it is treated as a job id - if len(args) > 0 and type(args[0]) == str and args[0][0:5] == "task(" and args[0][-1] == ")": - id_task = args[0] - progress.add_task_to_queue(id_task) - else: - id_task = None - with queue_lock: - progress.start_task(id_task) - res = [None, '', '', ''] - try: - res = func(*args, **kwargs) - progress.record_results(id_task, res) - except Exception as e: - shared.log.error(f"Exception: {e}") - shared.log.error(f"Arguments: args={str(args)[:10240]} kwargs={str(kwargs)[:10240]}") - errors.display(e, 'gradio call') - res[-1] = f"
{html.escape(str(e))}
" - finally: - progress.finish_task(id_task) - return res - return wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True, name=name) - - -def wrap_gradio_call(func, extra_outputs=None, add_stats=False, name=None): - job_name = name if name is not None else func.__name__ - def f(*args, extra_outputs_array=extra_outputs, **kwargs): - t = time.perf_counter() - shared.mem_mon.reset() - shared.state.begin(job_name) - try: - if shared.cmd_opts.profile: - pr = cProfile.Profile() - pr.enable() - res = func(*args, **kwargs) - if res is None: - msg = "No result returned from function" - shared.log.warning(msg) - res = [None, '', '', f"
{html.escape(msg)}
"] - else: - res = list(res) - if shared.cmd_opts.profile: - errors.profile(pr, 'Wrap') - except Exception as e: - errors.display(e, 'gradio call') - if extra_outputs_array is None: - extra_outputs_array = [None, ''] - res = extra_outputs_array + [f"
{html.escape(type(e).__name__+': '+str(e))}
"] - shared.state.end() - if not add_stats: - return tuple(res) - elapsed = time.perf_counter() - t - elapsed_m = int(elapsed // 60) - elapsed_s = elapsed % 60 - elapsed_text = f"{elapsed_m}m {elapsed_s:.2f}s" if elapsed_m > 0 else f"{elapsed_s:.2f}s" - vram_html = '' - if not shared.mem_mon.disabled: - vram = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.read().items()} - if vram.get('active_peak', 0) > 0: - vram_html = " |

" - vram_html += f"GPU active {max(vram['active_peak'], vram['reserved_peak'])} MB reserved {vram['reserved']} | used {vram['used']} MB free {vram['free']} MB total {vram['total']} MB" - vram_html += f" | retries {vram['retries']} oom {vram['oom']}" if vram.get('retries', 0) > 0 or vram.get('oom', 0) > 0 else '' - vram_html += "

" - if isinstance(res, list): - res[-1] += f"

Time: {elapsed_text}

{vram_html}
" - return tuple(res) - return f +import html +import threading +import time +import cProfile +from modules import shared, progress, errors + +queue_lock = threading.Lock() + + +def wrap_queued_call(func): + def f(*args, **kwargs): + with queue_lock: + res = func(*args, **kwargs) + return res + return f + + +def wrap_gradio_gpu_call(func, extra_outputs=None): + name = func.__name__ + def f(*args, **kwargs): + # if the first argument is a string that says "task(...)", it is treated as a job id + if len(args) > 0 and type(args[0]) == str and args[0][0:5] == "task(" and args[0][-1] == ")": + id_task = args[0] + progress.add_task_to_queue(id_task) + else: + id_task = None + with queue_lock: + progress.start_task(id_task) + res = [None, '', '', ''] + try: + res = func(*args, **kwargs) + progress.record_results(id_task, res) + except Exception as e: + shared.log.error(f"Exception: {e}") + shared.log.error(f"Arguments: args={str(args)[:10240]} kwargs={str(kwargs)[:10240]}") + errors.display(e, 'gradio call') + res[-1] = f"
{html.escape(str(e))}
" + finally: + progress.finish_task(id_task) + return res + return wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True, name=name) + + +def wrap_gradio_call(func, extra_outputs=None, add_stats=False, name=None): + job_name = name if name is not None else func.__name__ + def f(*args, extra_outputs_array=extra_outputs, **kwargs): + t = time.perf_counter() + shared.mem_mon.reset() + shared.state.begin(job_name) + try: + if shared.cmd_opts.profile: + pr = cProfile.Profile() + pr.enable() + res = func(*args, **kwargs) + if res is None: + msg = "No result returned from function" + shared.log.warning(msg) + res = [None, '', '', f"
{html.escape(msg)}
"] + else: + res = list(res) + if shared.cmd_opts.profile: + errors.profile(pr, 'Wrap') + except Exception as e: + errors.display(e, 'gradio call') + if extra_outputs_array is None: + extra_outputs_array = [None, ''] + res = extra_outputs_array + [f"
{html.escape(type(e).__name__+': '+str(e))}
"] + shared.state.end() + if not add_stats: + return tuple(res) + elapsed = time.perf_counter() - t + elapsed_m = int(elapsed // 60) + elapsed_s = elapsed % 60 + elapsed_text = f"{elapsed_m}m {elapsed_s:.2f}s" if elapsed_m > 0 else f"{elapsed_s:.2f}s" + vram_html = '' + if not shared.mem_mon.disabled: + vram = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.read().items()} + if vram.get('active_peak', 0) > 0: + vram_html = " |

" + vram_html += f"GPU active {max(vram['active_peak'], vram['reserved_peak'])} MB reserved {vram['reserved']} | used {vram['used']} MB free {vram['free']} MB total {vram['total']} MB" + vram_html += f" | retries {vram['retries']} oom {vram['oom']}" if vram.get('retries', 0) > 0 or vram.get('oom', 0) > 0 else '' + vram_html += "

" + if isinstance(res, list): + res[-1] += f"

Time: {elapsed_text}

{vram_html}
" + return tuple(res) + return f diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 40aeeebce..b86dfd154 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -1,127 +1,127 @@ -import os -import argparse -from modules.paths import data_path - -parser = argparse.ArgumentParser(description="SD.Next", conflict_handler='resolve', epilog='For other options see UI Settings page', prog='', add_help=True, formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=55, indent_increment=2, width=200)) -parser._optionals = parser.add_argument_group('Other options') # pylint: disable=protected-access -group = parser.add_argument_group('Server options') - -# main server args -group.add_argument("--config", type=str, default=os.environ.get("SD_CONFIG", os.path.join(data_path, 'config.json')), help="Use specific server configuration file, default: %(default)s") -group.add_argument("--ui-config", type=str, default=os.environ.get("SD_UICONFIG", os.path.join(data_path, 'ui-config.json')), help="Use specific UI configuration file, default: %(default)s") -group.add_argument("--medvram", default=os.environ.get("SD_MEDVRAM", False), action='store_true', help="Split model stages and keep only active part in VRAM, default: %(default)s") -group.add_argument("--lowvram", default=os.environ.get("SD_LOWVRAM", False), action='store_true', help="Split model components and keep only active part in VRAM, default: %(default)s") -group.add_argument("--ckpt", type=str, default=os.environ.get("SD_MODEL", None), help="Path to model checkpoint to load immediately, default: %(default)s") -group.add_argument('--vae', type=str, default=os.environ.get("SD_VAE", None), help='Path to VAE checkpoint to load immediately, default: %(default)s') -group.add_argument("--data-dir", type=str, default=os.environ.get("SD_DATADIR", ''), help="Base path where all user data is stored, default: %(default)s") -group.add_argument("--models-dir", type=str, default=os.environ.get("SD_MODELSDIR", 'models'), help="Base path where all models are stored, default: %(default)s",) -group.add_argument("--allow-code", default=os.environ.get("SD_ALLOWCODE", False), action='store_true', help="Allow custom script execution, default: %(default)s") -group.add_argument("--share", default=os.environ.get("SD_SHARE", False), action='store_true', help="Enable UI accessible through Gradio site, default: %(default)s") -group.add_argument("--insecure", default=os.environ.get("SD_INSECURE", False), action='store_true', help="Enable extensions tab regardless of other options, default: %(default)s") -group.add_argument("--use-cpu", nargs='+', default=[], type=str.lower, help="Force use CPU for specified modules, default: %(default)s") -group.add_argument("--listen", default=os.environ.get("SD_LISTEN", False), action='store_true', help="Launch web server using public IP address, default: %(default)s") -group.add_argument("--port", type=int, default=os.environ.get("SD_PORT", 7860), help="Launch web server with given server port, default: %(default)s") -group.add_argument("--freeze", default=os.environ.get("SD_FREEZE", False), action='store_true', help="Disable editing settings") -group.add_argument("--auth", type=str, default=os.environ.get("SD_AUTH", None), help='Set access authentication like "user:pwd,user:pwd""') -group.add_argument("--auth-file", type=str, default=os.environ.get("SD_AUTHFILE", None), help='Set access authentication using file, default: %(default)s') -group.add_argument("--autolaunch", default=os.environ.get("SD_AUTOLAUNCH", False), action='store_true', help="Open the UI URL in the system's default browser upon launch") -group.add_argument('--docs', default=os.environ.get("SD_DOCS", False), action='store_true', help = "Mount Gradio docs at /docs, default: %(default)s") -group.add_argument('--api-only', default=os.environ.get("SD_APIONLY", False), action='store_true', help = "Run in API only mode without starting UI") -group.add_argument("--api-log", default=os.environ.get("SD_APILOG", False), action='store_true', help="Enable logging of all API requests, default: %(default)s") -group.add_argument("--device-id", type=str, default=os.environ.get("SD_DEVICEID", None), help="Select the default CUDA device to use, default: %(default)s") -group.add_argument("--cors-origins", type=str, default=os.environ.get("SD_CORSORIGINS", None), help="Allowed CORS origins as comma-separated list, default: %(default)s") -group.add_argument("--cors-regex", type=str, default=os.environ.get("SD_CORSREGEX", None), help="Allowed CORS origins as regular expression, default: %(default)s") -group.add_argument("--tls-keyfile", type=str, default=os.environ.get("SD_TLSKEYFILE", None), help="Enable TLS and specify key file, default: %(default)s") -group.add_argument("--tls-certfile", type=str, default=os.environ.get("SD_TLSCERTFILE", None), help="Enable TLS and specify cert file, default: %(default)s") -group.add_argument("--tls-selfsign", action="store_true", default=os.environ.get("SD_TLSSELFSIGN", False), help="Enable TLS with self-signed certificates, default: %(default)s") -group.add_argument("--server-name", type=str, default=os.environ.get("SD_SERVERNAME", None), help="Sets hostname of server, default: %(default)s") -group.add_argument("--no-hashing", default=os.environ.get("SD_NOHASHING", False), action='store_true', help="Disable hashing of checkpoints, default: %(default)s") -group.add_argument("--no-metadata", default=os.environ.get("SD_NOMETADATA", False), action='store_true', help="Disable reading of metadata from models, default: %(default)s") -group.add_argument("--no-download", default=os.environ.get("SD_DOWNLOAD", False), action='store_true', help="Disable download of default model, default: %(default)s") -group.add_argument("--profile", default=os.environ.get("SD_PROFILE", False), action='store_true', help="Run profiler, default: %(default)s") -group.add_argument("--disable-queue", default=os.environ.get("SD_DISABLEQUEUE", False), action='store_true', help="Disable queues, default: %(default)s") -group.add_argument('--debug', default=os.environ.get("SD_DEBUG", False), action='store_true', help = "Run installer with debug logging, default: %(default)s") -group.add_argument('--use-directml', default=os.environ.get("SD_USEDIRECTML", False), action='store_true', help = "Use DirectML if no compatible GPU is detected, default: %(default)s") -group.add_argument("--use-openvino", default=os.environ.get("SD_USEOPENVINO", False), action='store_true', help="Use Intel OpenVINO backend, default: %(default)s") -group.add_argument("--use-ipex", default=os.environ.get("SD_USEIPX", False), action='store_true', help="Force use Intel OneAPI XPU backend, default: %(default)s") -group.add_argument("--use-cuda", default=os.environ.get("SD_USECUDA", False), action='store_true', help="Force use nVidia CUDA backend, default: %(default)s") -group.add_argument("--use-rocm", default=os.environ.get("SD_USEROCM", False), action='store_true', help="Force use AMD ROCm backend, default: %(default)s") -group.add_argument('--subpath', type=str, default=os.environ.get("SD_SUBPATH", None), help='Customize the URL subpath for usage with reverse proxy') -group.add_argument('--backend', type=str, default=os.environ.get("SD_BACKEND", None), choices=['original', 'diffusers'], required=False, help='force model pipeline type') - - -# removed args are added here as hidden in fixed format for compatbility reasons -group.add_argument("-f", action='store_true', help=argparse.SUPPRESS) # allows running as root; implemented outside of webui -group.add_argument("--ui-settings-file", type=str, help=argparse.SUPPRESS, default=os.path.join(data_path, 'config.json')) -group.add_argument("--ui-config-file", type=str, help=argparse.SUPPRESS, default=os.path.join(data_path, 'ui-config.json')) -group.add_argument("--hide-ui-dir-config", action='store_true', help=argparse.SUPPRESS, default=False) -group.add_argument("--theme", type=str, help=argparse.SUPPRESS, default=None) -group.add_argument("--disable-console-progressbars", action='store_true', help=argparse.SUPPRESS, default=True) -group.add_argument("--disable-safe-unpickle", action='store_true', help=argparse.SUPPRESS, default=True) -group.add_argument("--lowram", action='store_true', help=argparse.SUPPRESS) -group.add_argument("--disable-extension-access", default=False, action='store_true', help=argparse.SUPPRESS) -group.add_argument("--api", help=argparse.SUPPRESS, default=True) -group.add_argument("--api-auth", type=str, help=argparse.SUPPRESS, default=None) - - -def compatibility_args(opts, args): - # removed args that have been moved to opts are added here as hidden with default values as defined in opts - group.add_argument("--ckpt-dir", type=str, help=argparse.SUPPRESS, default=opts.ckpt_dir) - group.add_argument("--vae-dir", type=str, help=argparse.SUPPRESS, default=opts.vae_dir) - group.add_argument("--embeddings-dir", type=str, help=argparse.SUPPRESS, default=opts.embeddings_dir) - group.add_argument("--embeddings-templates-dir", type=str, help=argparse.SUPPRESS, default=opts.embeddings_templates_dir) - group.add_argument("--hypernetwork-dir", type=str, help=argparse.SUPPRESS, default=opts.hypernetwork_dir) - group.add_argument("--codeformer-models-path", type=str, help=argparse.SUPPRESS, default=opts.codeformer_models_path) - group.add_argument("--gfpgan-models-path", type=str, help=argparse.SUPPRESS, default=opts.gfpgan_models_path) - group.add_argument("--esrgan-models-path", type=str, help=argparse.SUPPRESS, default=opts.esrgan_models_path) - group.add_argument("--bsrgan-models-path", type=str, help=argparse.SUPPRESS, default=opts.bsrgan_models_path) - group.add_argument("--realesrgan-models-path", type=str, help=argparse.SUPPRESS, default=opts.realesrgan_models_path) - group.add_argument("--scunet-models-path", help=argparse.SUPPRESS, default=opts.scunet_models_path) - group.add_argument("--swinir-models-path", help=argparse.SUPPRESS, default=opts.swinir_models_path) - group.add_argument("--ldsr-models-path", help=argparse.SUPPRESS, default=opts.ldsr_models_path) - group.add_argument("--clip-models-path", type=str, help=argparse.SUPPRESS, default=opts.clip_models_path) - group.add_argument("--opt-channelslast", help=argparse.SUPPRESS, action='store_true', default=opts.opt_channelslast) - group.add_argument("--xformers", default=(opts.cross_attention_optimization == "xFormers"), action='store_true', help=argparse.SUPPRESS) - group.add_argument("--disable-nan-check", help=argparse.SUPPRESS, action='store_true', default=opts.disable_nan_check) - group.add_argument("--rollback-vae", help=argparse.SUPPRESS, default=opts.rollback_vae) - group.add_argument("--no-half", help=argparse.SUPPRESS, action='store_true', default=opts.no_half) - group.add_argument("--no-half-vae", help=argparse.SUPPRESS, action='store_true', default=opts.no_half_vae) - group.add_argument("--precision", help=argparse.SUPPRESS, default=opts.precision) - group.add_argument("--sub-quad-q-chunk-size", help=argparse.SUPPRESS, default=opts.sub_quad_q_chunk_size) - group.add_argument("--sub-quad-kv-chunk-size", help=argparse.SUPPRESS, default=opts.sub_quad_kv_chunk_size) - group.add_argument("--sub-quad-chunk-threshold", help=argparse.SUPPRESS, default=opts.sub_quad_chunk_threshold) - group.add_argument("--lora-dir", help=argparse.SUPPRESS, default=opts.lora_dir) - group.add_argument("--lyco-dir", help=argparse.SUPPRESS, default=opts.lyco_dir) - group.add_argument("--embeddings-dir", help=argparse.SUPPRESS, default=opts.embeddings_dir) - group.add_argument("--hypernetwork-dir", help=argparse.SUPPRESS, default=opts.hypernetwork_dir) - group.add_argument("--lyco-patch-lora", help=argparse.SUPPRESS, action='store_true', default=False) - group.add_argument("--lyco-debug", help=argparse.SUPPRESS, action='store_true', default=False) - group.add_argument("--enable-console-prompts", help=argparse.SUPPRESS, action='store_true', default=False) - group.add_argument("--safe", help=argparse.SUPPRESS, action='store_true', default=False) - group.add_argument("--use-xformers", help=argparse.SUPPRESS, action='store_true', default=False) - - # removed opts are added here with fixed values for compatibility reasons - opts.use_old_emphasis_implementation = False - opts.use_old_karras_scheduler_sigmas = False - opts.no_dpmpp_sde_batch_determinism = False - opts.lora_apply_to_outputs = False - opts.do_not_show_images = False - opts.add_model_hash_to_info = True - opts.add_model_name_to_info = True - opts.js_modal_lightbox = True - opts.js_modal_lightbox_initially_zoomed = True - opts.show_progress_in_title = False - opts.sd_vae_as_default = True - opts.enable_emphasis = True - opts.enable_batch_seeds = True - # opts.multiple_tqdm = False - opts.print_hypernet_extra = False - opts.dimensions_and_batch_together = True - opts.enable_pnginfo = True - opts.data['clip_skip'] = 1 - - opts.onchange("lora_dir", lambda: setattr(args, "lora_dir", opts.lora_dir)) - opts.onchange("lyco_dir", lambda: setattr(args, "lyco_dir", opts.lyco_dir)) - - args = parser.parse_args() - return args +import os +import argparse +from modules.paths import data_path + +parser = argparse.ArgumentParser(description="SD.Next", conflict_handler='resolve', epilog='For other options see UI Settings page', prog='', add_help=True, formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=55, indent_increment=2, width=200)) +parser._optionals = parser.add_argument_group('Other options') # pylint: disable=protected-access +group = parser.add_argument_group('Server options') + +# main server args +group.add_argument("--config", type=str, default=os.environ.get("SD_CONFIG", os.path.join(data_path, 'config.json')), help="Use specific server configuration file, default: %(default)s") +group.add_argument("--ui-config", type=str, default=os.environ.get("SD_UICONFIG", os.path.join(data_path, 'ui-config.json')), help="Use specific UI configuration file, default: %(default)s") +group.add_argument("--medvram", default=os.environ.get("SD_MEDVRAM", False), action='store_true', help="Split model stages and keep only active part in VRAM, default: %(default)s") +group.add_argument("--lowvram", default=os.environ.get("SD_LOWVRAM", False), action='store_true', help="Split model components and keep only active part in VRAM, default: %(default)s") +group.add_argument("--ckpt", type=str, default=os.environ.get("SD_MODEL", None), help="Path to model checkpoint to load immediately, default: %(default)s") +group.add_argument('--vae', type=str, default=os.environ.get("SD_VAE", None), help='Path to VAE checkpoint to load immediately, default: %(default)s') +group.add_argument("--data-dir", type=str, default=os.environ.get("SD_DATADIR", ''), help="Base path where all user data is stored, default: %(default)s") +group.add_argument("--models-dir", type=str, default=os.environ.get("SD_MODELSDIR", 'models'), help="Base path where all models are stored, default: %(default)s",) +group.add_argument("--allow-code", default=os.environ.get("SD_ALLOWCODE", False), action='store_true', help="Allow custom script execution, default: %(default)s") +group.add_argument("--share", default=os.environ.get("SD_SHARE", False), action='store_true', help="Enable UI accessible through Gradio site, default: %(default)s") +group.add_argument("--insecure", default=os.environ.get("SD_INSECURE", False), action='store_true', help="Enable extensions tab regardless of other options, default: %(default)s") +group.add_argument("--use-cpu", nargs='+', default=[], type=str.lower, help="Force use CPU for specified modules, default: %(default)s") +group.add_argument("--listen", default=os.environ.get("SD_LISTEN", False), action='store_true', help="Launch web server using public IP address, default: %(default)s") +group.add_argument("--port", type=int, default=os.environ.get("SD_PORT", 7860), help="Launch web server with given server port, default: %(default)s") +group.add_argument("--freeze", default=os.environ.get("SD_FREEZE", False), action='store_true', help="Disable editing settings") +group.add_argument("--auth", type=str, default=os.environ.get("SD_AUTH", None), help='Set access authentication like "user:pwd,user:pwd""') +group.add_argument("--auth-file", type=str, default=os.environ.get("SD_AUTHFILE", None), help='Set access authentication using file, default: %(default)s') +group.add_argument("--autolaunch", default=os.environ.get("SD_AUTOLAUNCH", False), action='store_true', help="Open the UI URL in the system's default browser upon launch") +group.add_argument('--docs', default=os.environ.get("SD_DOCS", False), action='store_true', help = "Mount Gradio docs at /docs, default: %(default)s") +group.add_argument('--api-only', default=os.environ.get("SD_APIONLY", False), action='store_true', help = "Run in API only mode without starting UI") +group.add_argument("--api-log", default=os.environ.get("SD_APILOG", False), action='store_true', help="Enable logging of all API requests, default: %(default)s") +group.add_argument("--device-id", type=str, default=os.environ.get("SD_DEVICEID", None), help="Select the default CUDA device to use, default: %(default)s") +group.add_argument("--cors-origins", type=str, default=os.environ.get("SD_CORSORIGINS", None), help="Allowed CORS origins as comma-separated list, default: %(default)s") +group.add_argument("--cors-regex", type=str, default=os.environ.get("SD_CORSREGEX", None), help="Allowed CORS origins as regular expression, default: %(default)s") +group.add_argument("--tls-keyfile", type=str, default=os.environ.get("SD_TLSKEYFILE", None), help="Enable TLS and specify key file, default: %(default)s") +group.add_argument("--tls-certfile", type=str, default=os.environ.get("SD_TLSCERTFILE", None), help="Enable TLS and specify cert file, default: %(default)s") +group.add_argument("--tls-selfsign", action="store_true", default=os.environ.get("SD_TLSSELFSIGN", False), help="Enable TLS with self-signed certificates, default: %(default)s") +group.add_argument("--server-name", type=str, default=os.environ.get("SD_SERVERNAME", None), help="Sets hostname of server, default: %(default)s") +group.add_argument("--no-hashing", default=os.environ.get("SD_NOHASHING", False), action='store_true', help="Disable hashing of checkpoints, default: %(default)s") +group.add_argument("--no-metadata", default=os.environ.get("SD_NOMETADATA", False), action='store_true', help="Disable reading of metadata from models, default: %(default)s") +group.add_argument("--no-download", default=os.environ.get("SD_DOWNLOAD", False), action='store_true', help="Disable download of default model, default: %(default)s") +group.add_argument("--profile", default=os.environ.get("SD_PROFILE", False), action='store_true', help="Run profiler, default: %(default)s") +group.add_argument("--disable-queue", default=os.environ.get("SD_DISABLEQUEUE", False), action='store_true', help="Disable queues, default: %(default)s") +group.add_argument('--debug', default=os.environ.get("SD_DEBUG", False), action='store_true', help = "Run installer with debug logging, default: %(default)s") +group.add_argument('--use-directml', default=os.environ.get("SD_USEDIRECTML", False), action='store_true', help = "Use DirectML if no compatible GPU is detected, default: %(default)s") +group.add_argument("--use-openvino", default=os.environ.get("SD_USEOPENVINO", False), action='store_true', help="Use Intel OpenVINO backend, default: %(default)s") +group.add_argument("--use-ipex", default=os.environ.get("SD_USEIPX", False), action='store_true', help="Force use Intel OneAPI XPU backend, default: %(default)s") +group.add_argument("--use-cuda", default=os.environ.get("SD_USECUDA", False), action='store_true', help="Force use nVidia CUDA backend, default: %(default)s") +group.add_argument("--use-rocm", default=os.environ.get("SD_USEROCM", False), action='store_true', help="Force use AMD ROCm backend, default: %(default)s") +group.add_argument('--subpath', type=str, default=os.environ.get("SD_SUBPATH", None), help='Customize the URL subpath for usage with reverse proxy') +group.add_argument('--backend', type=str, default=os.environ.get("SD_BACKEND", None), choices=['original', 'diffusers'], required=False, help='force model pipeline type') + + +# removed args are added here as hidden in fixed format for compatbility reasons +group.add_argument("-f", action='store_true', help=argparse.SUPPRESS) # allows running as root; implemented outside of webui +group.add_argument("--ui-settings-file", type=str, help=argparse.SUPPRESS, default=os.path.join(data_path, 'config.json')) +group.add_argument("--ui-config-file", type=str, help=argparse.SUPPRESS, default=os.path.join(data_path, 'ui-config.json')) +group.add_argument("--hide-ui-dir-config", action='store_true', help=argparse.SUPPRESS, default=False) +group.add_argument("--theme", type=str, help=argparse.SUPPRESS, default=None) +group.add_argument("--disable-console-progressbars", action='store_true', help=argparse.SUPPRESS, default=True) +group.add_argument("--disable-safe-unpickle", action='store_true', help=argparse.SUPPRESS, default=True) +group.add_argument("--lowram", action='store_true', help=argparse.SUPPRESS) +group.add_argument("--disable-extension-access", default=False, action='store_true', help=argparse.SUPPRESS) +group.add_argument("--api", help=argparse.SUPPRESS, default=True) +group.add_argument("--api-auth", type=str, help=argparse.SUPPRESS, default=None) + + +def compatibility_args(opts, args): + # removed args that have been moved to opts are added here as hidden with default values as defined in opts + group.add_argument("--ckpt-dir", type=str, help=argparse.SUPPRESS, default=opts.ckpt_dir) + group.add_argument("--vae-dir", type=str, help=argparse.SUPPRESS, default=opts.vae_dir) + group.add_argument("--embeddings-dir", type=str, help=argparse.SUPPRESS, default=opts.embeddings_dir) + group.add_argument("--embeddings-templates-dir", type=str, help=argparse.SUPPRESS, default=opts.embeddings_templates_dir) + group.add_argument("--hypernetwork-dir", type=str, help=argparse.SUPPRESS, default=opts.hypernetwork_dir) + group.add_argument("--codeformer-models-path", type=str, help=argparse.SUPPRESS, default=opts.codeformer_models_path) + group.add_argument("--gfpgan-models-path", type=str, help=argparse.SUPPRESS, default=opts.gfpgan_models_path) + group.add_argument("--esrgan-models-path", type=str, help=argparse.SUPPRESS, default=opts.esrgan_models_path) + group.add_argument("--bsrgan-models-path", type=str, help=argparse.SUPPRESS, default=opts.bsrgan_models_path) + group.add_argument("--realesrgan-models-path", type=str, help=argparse.SUPPRESS, default=opts.realesrgan_models_path) + group.add_argument("--scunet-models-path", help=argparse.SUPPRESS, default=opts.scunet_models_path) + group.add_argument("--swinir-models-path", help=argparse.SUPPRESS, default=opts.swinir_models_path) + group.add_argument("--ldsr-models-path", help=argparse.SUPPRESS, default=opts.ldsr_models_path) + group.add_argument("--clip-models-path", type=str, help=argparse.SUPPRESS, default=opts.clip_models_path) + group.add_argument("--opt-channelslast", help=argparse.SUPPRESS, action='store_true', default=opts.opt_channelslast) + group.add_argument("--xformers", default=(opts.cross_attention_optimization == "xFormers"), action='store_true', help=argparse.SUPPRESS) + group.add_argument("--disable-nan-check", help=argparse.SUPPRESS, action='store_true', default=opts.disable_nan_check) + group.add_argument("--rollback-vae", help=argparse.SUPPRESS, default=opts.rollback_vae) + group.add_argument("--no-half", help=argparse.SUPPRESS, action='store_true', default=opts.no_half) + group.add_argument("--no-half-vae", help=argparse.SUPPRESS, action='store_true', default=opts.no_half_vae) + group.add_argument("--precision", help=argparse.SUPPRESS, default=opts.precision) + group.add_argument("--sub-quad-q-chunk-size", help=argparse.SUPPRESS, default=opts.sub_quad_q_chunk_size) + group.add_argument("--sub-quad-kv-chunk-size", help=argparse.SUPPRESS, default=opts.sub_quad_kv_chunk_size) + group.add_argument("--sub-quad-chunk-threshold", help=argparse.SUPPRESS, default=opts.sub_quad_chunk_threshold) + group.add_argument("--lora-dir", help=argparse.SUPPRESS, default=opts.lora_dir) + group.add_argument("--lyco-dir", help=argparse.SUPPRESS, default=opts.lyco_dir) + group.add_argument("--embeddings-dir", help=argparse.SUPPRESS, default=opts.embeddings_dir) + group.add_argument("--hypernetwork-dir", help=argparse.SUPPRESS, default=opts.hypernetwork_dir) + group.add_argument("--lyco-patch-lora", help=argparse.SUPPRESS, action='store_true', default=False) + group.add_argument("--lyco-debug", help=argparse.SUPPRESS, action='store_true', default=False) + group.add_argument("--enable-console-prompts", help=argparse.SUPPRESS, action='store_true', default=False) + group.add_argument("--safe", help=argparse.SUPPRESS, action='store_true', default=False) + group.add_argument("--use-xformers", help=argparse.SUPPRESS, action='store_true', default=False) + + # removed opts are added here with fixed values for compatibility reasons + opts.use_old_emphasis_implementation = False + opts.use_old_karras_scheduler_sigmas = False + opts.no_dpmpp_sde_batch_determinism = False + opts.lora_apply_to_outputs = False + opts.do_not_show_images = False + opts.add_model_hash_to_info = True + opts.add_model_name_to_info = True + opts.js_modal_lightbox = True + opts.js_modal_lightbox_initially_zoomed = True + opts.show_progress_in_title = False + opts.sd_vae_as_default = True + opts.enable_emphasis = True + opts.enable_batch_seeds = True + # opts.multiple_tqdm = False + opts.print_hypernet_extra = False + opts.dimensions_and_batch_together = True + opts.enable_pnginfo = True + opts.data['clip_skip'] = 1 + + opts.onchange("lora_dir", lambda: setattr(args, "lora_dir", opts.lora_dir)) + opts.onchange("lyco_dir", lambda: setattr(args, "lyco_dir", opts.lyco_dir)) + + args = parser.parse_args() + return args diff --git a/modules/deepbooru_model.py b/modules/deepbooru_model.py index edeb81866..2963385c3 100644 --- a/modules/deepbooru_model.py +++ b/modules/deepbooru_model.py @@ -1,674 +1,674 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from modules import devices - -# see https://github.com/AUTOMATIC1111/TorchDeepDanbooru for more - - -class DeepDanbooruModel(nn.Module): - def __init__(self): - super().__init__() - self.tags = [] - self.n_Conv_0 = nn.Conv2d(kernel_size=(7, 7), in_channels=3, out_channels=64, stride=(2, 2)) - self.n_MaxPool_0 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2)) - self.n_Conv_1 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) - self.n_Conv_2 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=64) - self.n_Conv_3 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64) - self.n_Conv_4 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) - self.n_Conv_5 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=64) - self.n_Conv_6 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64) - self.n_Conv_7 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) - self.n_Conv_8 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=64) - self.n_Conv_9 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64) - self.n_Conv_10 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) - self.n_Conv_11 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=512, stride=(2, 2)) - self.n_Conv_12 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=128) - self.n_Conv_13 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128, stride=(2, 2)) - self.n_Conv_14 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) - self.n_Conv_15 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) - self.n_Conv_16 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) - self.n_Conv_17 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) - self.n_Conv_18 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) - self.n_Conv_19 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) - self.n_Conv_20 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) - self.n_Conv_21 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) - self.n_Conv_22 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) - self.n_Conv_23 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) - self.n_Conv_24 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) - self.n_Conv_25 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) - self.n_Conv_26 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) - self.n_Conv_27 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) - self.n_Conv_28 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) - self.n_Conv_29 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) - self.n_Conv_30 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) - self.n_Conv_31 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) - self.n_Conv_32 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) - self.n_Conv_33 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) - self.n_Conv_34 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) - self.n_Conv_35 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) - self.n_Conv_36 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=1024, stride=(2, 2)) - self.n_Conv_37 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=256) - self.n_Conv_38 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256, stride=(2, 2)) - self.n_Conv_39 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_40 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_41 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_42 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_43 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_44 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_45 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_46 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_47 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_48 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_49 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_50 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_51 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_52 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_53 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_54 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_55 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_56 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_57 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_58 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_59 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_60 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_61 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_62 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_63 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_64 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_65 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_66 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_67 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_68 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_69 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_70 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_71 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_72 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_73 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_74 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_75 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_76 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_77 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_78 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_79 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_80 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_81 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_82 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_83 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_84 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_85 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_86 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_87 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_88 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_89 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_90 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_91 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_92 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_93 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_94 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_95 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_96 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_97 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_98 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256, stride=(2, 2)) - self.n_Conv_99 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_100 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=1024, stride=(2, 2)) - self.n_Conv_101 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_102 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_103 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_104 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_105 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_106 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_107 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_108 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_109 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_110 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_111 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_112 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_113 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_114 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_115 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_116 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_117 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_118 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_119 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_120 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_121 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_122 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_123 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_124 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_125 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_126 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_127 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_128 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_129 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_130 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_131 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_132 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_133 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_134 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_135 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_136 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_137 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_138 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_139 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_140 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_141 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_142 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_143 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_144 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_145 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_146 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_147 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_148 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_149 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_150 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_151 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_152 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_153 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_154 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_155 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) - self.n_Conv_156 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) - self.n_Conv_157 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) - self.n_Conv_158 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=2048, stride=(2, 2)) - self.n_Conv_159 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=512) - self.n_Conv_160 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512, stride=(2, 2)) - self.n_Conv_161 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048) - self.n_Conv_162 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=512) - self.n_Conv_163 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512) - self.n_Conv_164 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048) - self.n_Conv_165 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=512) - self.n_Conv_166 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512) - self.n_Conv_167 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048) - self.n_Conv_168 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=4096, stride=(2, 2)) - self.n_Conv_169 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=1024) - self.n_Conv_170 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024, stride=(2, 2)) - self.n_Conv_171 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096) - self.n_Conv_172 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=1024) - self.n_Conv_173 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024) - self.n_Conv_174 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096) - self.n_Conv_175 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=1024) - self.n_Conv_176 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024) - self.n_Conv_177 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096) - self.n_Conv_178 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=9176, bias=False) - - def forward(self, *inputs): - t_358, = inputs - t_359 = t_358.permute(*[0, 3, 1, 2]) - t_359_padded = F.pad(t_359, [2, 3, 2, 3], value=0) - t_360 = self.n_Conv_0(t_359_padded.to(self.n_Conv_0.bias.dtype) if devices.unet_needs_upcast else t_359_padded) - t_361 = F.relu(t_360) - t_361 = F.pad(t_361, [0, 1, 0, 1], value=float('-inf')) - t_362 = self.n_MaxPool_0(t_361) - t_363 = self.n_Conv_1(t_362) - t_364 = self.n_Conv_2(t_362) - t_365 = F.relu(t_364) - t_365_padded = F.pad(t_365, [1, 1, 1, 1], value=0) - t_366 = self.n_Conv_3(t_365_padded) - t_367 = F.relu(t_366) - t_368 = self.n_Conv_4(t_367) - t_369 = torch.add(t_368, t_363) - t_370 = F.relu(t_369) - t_371 = self.n_Conv_5(t_370) - t_372 = F.relu(t_371) - t_372_padded = F.pad(t_372, [1, 1, 1, 1], value=0) - t_373 = self.n_Conv_6(t_372_padded) - t_374 = F.relu(t_373) - t_375 = self.n_Conv_7(t_374) - t_376 = torch.add(t_375, t_370) - t_377 = F.relu(t_376) - t_378 = self.n_Conv_8(t_377) - t_379 = F.relu(t_378) - t_379_padded = F.pad(t_379, [1, 1, 1, 1], value=0) - t_380 = self.n_Conv_9(t_379_padded) - t_381 = F.relu(t_380) - t_382 = self.n_Conv_10(t_381) - t_383 = torch.add(t_382, t_377) - t_384 = F.relu(t_383) - t_385 = self.n_Conv_11(t_384) - t_386 = self.n_Conv_12(t_384) - t_387 = F.relu(t_386) - t_387_padded = F.pad(t_387, [0, 1, 0, 1], value=0) - t_388 = self.n_Conv_13(t_387_padded) - t_389 = F.relu(t_388) - t_390 = self.n_Conv_14(t_389) - t_391 = torch.add(t_390, t_385) - t_392 = F.relu(t_391) - t_393 = self.n_Conv_15(t_392) - t_394 = F.relu(t_393) - t_394_padded = F.pad(t_394, [1, 1, 1, 1], value=0) - t_395 = self.n_Conv_16(t_394_padded) - t_396 = F.relu(t_395) - t_397 = self.n_Conv_17(t_396) - t_398 = torch.add(t_397, t_392) - t_399 = F.relu(t_398) - t_400 = self.n_Conv_18(t_399) - t_401 = F.relu(t_400) - t_401_padded = F.pad(t_401, [1, 1, 1, 1], value=0) - t_402 = self.n_Conv_19(t_401_padded) - t_403 = F.relu(t_402) - t_404 = self.n_Conv_20(t_403) - t_405 = torch.add(t_404, t_399) - t_406 = F.relu(t_405) - t_407 = self.n_Conv_21(t_406) - t_408 = F.relu(t_407) - t_408_padded = F.pad(t_408, [1, 1, 1, 1], value=0) - t_409 = self.n_Conv_22(t_408_padded) - t_410 = F.relu(t_409) - t_411 = self.n_Conv_23(t_410) - t_412 = torch.add(t_411, t_406) - t_413 = F.relu(t_412) - t_414 = self.n_Conv_24(t_413) - t_415 = F.relu(t_414) - t_415_padded = F.pad(t_415, [1, 1, 1, 1], value=0) - t_416 = self.n_Conv_25(t_415_padded) - t_417 = F.relu(t_416) - t_418 = self.n_Conv_26(t_417) - t_419 = torch.add(t_418, t_413) - t_420 = F.relu(t_419) - t_421 = self.n_Conv_27(t_420) - t_422 = F.relu(t_421) - t_422_padded = F.pad(t_422, [1, 1, 1, 1], value=0) - t_423 = self.n_Conv_28(t_422_padded) - t_424 = F.relu(t_423) - t_425 = self.n_Conv_29(t_424) - t_426 = torch.add(t_425, t_420) - t_427 = F.relu(t_426) - t_428 = self.n_Conv_30(t_427) - t_429 = F.relu(t_428) - t_429_padded = F.pad(t_429, [1, 1, 1, 1], value=0) - t_430 = self.n_Conv_31(t_429_padded) - t_431 = F.relu(t_430) - t_432 = self.n_Conv_32(t_431) - t_433 = torch.add(t_432, t_427) - t_434 = F.relu(t_433) - t_435 = self.n_Conv_33(t_434) - t_436 = F.relu(t_435) - t_436_padded = F.pad(t_436, [1, 1, 1, 1], value=0) - t_437 = self.n_Conv_34(t_436_padded) - t_438 = F.relu(t_437) - t_439 = self.n_Conv_35(t_438) - t_440 = torch.add(t_439, t_434) - t_441 = F.relu(t_440) - t_442 = self.n_Conv_36(t_441) - t_443 = self.n_Conv_37(t_441) - t_444 = F.relu(t_443) - t_444_padded = F.pad(t_444, [0, 1, 0, 1], value=0) - t_445 = self.n_Conv_38(t_444_padded) - t_446 = F.relu(t_445) - t_447 = self.n_Conv_39(t_446) - t_448 = torch.add(t_447, t_442) - t_449 = F.relu(t_448) - t_450 = self.n_Conv_40(t_449) - t_451 = F.relu(t_450) - t_451_padded = F.pad(t_451, [1, 1, 1, 1], value=0) - t_452 = self.n_Conv_41(t_451_padded) - t_453 = F.relu(t_452) - t_454 = self.n_Conv_42(t_453) - t_455 = torch.add(t_454, t_449) - t_456 = F.relu(t_455) - t_457 = self.n_Conv_43(t_456) - t_458 = F.relu(t_457) - t_458_padded = F.pad(t_458, [1, 1, 1, 1], value=0) - t_459 = self.n_Conv_44(t_458_padded) - t_460 = F.relu(t_459) - t_461 = self.n_Conv_45(t_460) - t_462 = torch.add(t_461, t_456) - t_463 = F.relu(t_462) - t_464 = self.n_Conv_46(t_463) - t_465 = F.relu(t_464) - t_465_padded = F.pad(t_465, [1, 1, 1, 1], value=0) - t_466 = self.n_Conv_47(t_465_padded) - t_467 = F.relu(t_466) - t_468 = self.n_Conv_48(t_467) - t_469 = torch.add(t_468, t_463) - t_470 = F.relu(t_469) - t_471 = self.n_Conv_49(t_470) - t_472 = F.relu(t_471) - t_472_padded = F.pad(t_472, [1, 1, 1, 1], value=0) - t_473 = self.n_Conv_50(t_472_padded) - t_474 = F.relu(t_473) - t_475 = self.n_Conv_51(t_474) - t_476 = torch.add(t_475, t_470) - t_477 = F.relu(t_476) - t_478 = self.n_Conv_52(t_477) - t_479 = F.relu(t_478) - t_479_padded = F.pad(t_479, [1, 1, 1, 1], value=0) - t_480 = self.n_Conv_53(t_479_padded) - t_481 = F.relu(t_480) - t_482 = self.n_Conv_54(t_481) - t_483 = torch.add(t_482, t_477) - t_484 = F.relu(t_483) - t_485 = self.n_Conv_55(t_484) - t_486 = F.relu(t_485) - t_486_padded = F.pad(t_486, [1, 1, 1, 1], value=0) - t_487 = self.n_Conv_56(t_486_padded) - t_488 = F.relu(t_487) - t_489 = self.n_Conv_57(t_488) - t_490 = torch.add(t_489, t_484) - t_491 = F.relu(t_490) - t_492 = self.n_Conv_58(t_491) - t_493 = F.relu(t_492) - t_493_padded = F.pad(t_493, [1, 1, 1, 1], value=0) - t_494 = self.n_Conv_59(t_493_padded) - t_495 = F.relu(t_494) - t_496 = self.n_Conv_60(t_495) - t_497 = torch.add(t_496, t_491) - t_498 = F.relu(t_497) - t_499 = self.n_Conv_61(t_498) - t_500 = F.relu(t_499) - t_500_padded = F.pad(t_500, [1, 1, 1, 1], value=0) - t_501 = self.n_Conv_62(t_500_padded) - t_502 = F.relu(t_501) - t_503 = self.n_Conv_63(t_502) - t_504 = torch.add(t_503, t_498) - t_505 = F.relu(t_504) - t_506 = self.n_Conv_64(t_505) - t_507 = F.relu(t_506) - t_507_padded = F.pad(t_507, [1, 1, 1, 1], value=0) - t_508 = self.n_Conv_65(t_507_padded) - t_509 = F.relu(t_508) - t_510 = self.n_Conv_66(t_509) - t_511 = torch.add(t_510, t_505) - t_512 = F.relu(t_511) - t_513 = self.n_Conv_67(t_512) - t_514 = F.relu(t_513) - t_514_padded = F.pad(t_514, [1, 1, 1, 1], value=0) - t_515 = self.n_Conv_68(t_514_padded) - t_516 = F.relu(t_515) - t_517 = self.n_Conv_69(t_516) - t_518 = torch.add(t_517, t_512) - t_519 = F.relu(t_518) - t_520 = self.n_Conv_70(t_519) - t_521 = F.relu(t_520) - t_521_padded = F.pad(t_521, [1, 1, 1, 1], value=0) - t_522 = self.n_Conv_71(t_521_padded) - t_523 = F.relu(t_522) - t_524 = self.n_Conv_72(t_523) - t_525 = torch.add(t_524, t_519) - t_526 = F.relu(t_525) - t_527 = self.n_Conv_73(t_526) - t_528 = F.relu(t_527) - t_528_padded = F.pad(t_528, [1, 1, 1, 1], value=0) - t_529 = self.n_Conv_74(t_528_padded) - t_530 = F.relu(t_529) - t_531 = self.n_Conv_75(t_530) - t_532 = torch.add(t_531, t_526) - t_533 = F.relu(t_532) - t_534 = self.n_Conv_76(t_533) - t_535 = F.relu(t_534) - t_535_padded = F.pad(t_535, [1, 1, 1, 1], value=0) - t_536 = self.n_Conv_77(t_535_padded) - t_537 = F.relu(t_536) - t_538 = self.n_Conv_78(t_537) - t_539 = torch.add(t_538, t_533) - t_540 = F.relu(t_539) - t_541 = self.n_Conv_79(t_540) - t_542 = F.relu(t_541) - t_542_padded = F.pad(t_542, [1, 1, 1, 1], value=0) - t_543 = self.n_Conv_80(t_542_padded) - t_544 = F.relu(t_543) - t_545 = self.n_Conv_81(t_544) - t_546 = torch.add(t_545, t_540) - t_547 = F.relu(t_546) - t_548 = self.n_Conv_82(t_547) - t_549 = F.relu(t_548) - t_549_padded = F.pad(t_549, [1, 1, 1, 1], value=0) - t_550 = self.n_Conv_83(t_549_padded) - t_551 = F.relu(t_550) - t_552 = self.n_Conv_84(t_551) - t_553 = torch.add(t_552, t_547) - t_554 = F.relu(t_553) - t_555 = self.n_Conv_85(t_554) - t_556 = F.relu(t_555) - t_556_padded = F.pad(t_556, [1, 1, 1, 1], value=0) - t_557 = self.n_Conv_86(t_556_padded) - t_558 = F.relu(t_557) - t_559 = self.n_Conv_87(t_558) - t_560 = torch.add(t_559, t_554) - t_561 = F.relu(t_560) - t_562 = self.n_Conv_88(t_561) - t_563 = F.relu(t_562) - t_563_padded = F.pad(t_563, [1, 1, 1, 1], value=0) - t_564 = self.n_Conv_89(t_563_padded) - t_565 = F.relu(t_564) - t_566 = self.n_Conv_90(t_565) - t_567 = torch.add(t_566, t_561) - t_568 = F.relu(t_567) - t_569 = self.n_Conv_91(t_568) - t_570 = F.relu(t_569) - t_570_padded = F.pad(t_570, [1, 1, 1, 1], value=0) - t_571 = self.n_Conv_92(t_570_padded) - t_572 = F.relu(t_571) - t_573 = self.n_Conv_93(t_572) - t_574 = torch.add(t_573, t_568) - t_575 = F.relu(t_574) - t_576 = self.n_Conv_94(t_575) - t_577 = F.relu(t_576) - t_577_padded = F.pad(t_577, [1, 1, 1, 1], value=0) - t_578 = self.n_Conv_95(t_577_padded) - t_579 = F.relu(t_578) - t_580 = self.n_Conv_96(t_579) - t_581 = torch.add(t_580, t_575) - t_582 = F.relu(t_581) - t_583 = self.n_Conv_97(t_582) - t_584 = F.relu(t_583) - t_584_padded = F.pad(t_584, [0, 1, 0, 1], value=0) - t_585 = self.n_Conv_98(t_584_padded) - t_586 = F.relu(t_585) - t_587 = self.n_Conv_99(t_586) - t_588 = self.n_Conv_100(t_582) - t_589 = torch.add(t_587, t_588) - t_590 = F.relu(t_589) - t_591 = self.n_Conv_101(t_590) - t_592 = F.relu(t_591) - t_592_padded = F.pad(t_592, [1, 1, 1, 1], value=0) - t_593 = self.n_Conv_102(t_592_padded) - t_594 = F.relu(t_593) - t_595 = self.n_Conv_103(t_594) - t_596 = torch.add(t_595, t_590) - t_597 = F.relu(t_596) - t_598 = self.n_Conv_104(t_597) - t_599 = F.relu(t_598) - t_599_padded = F.pad(t_599, [1, 1, 1, 1], value=0) - t_600 = self.n_Conv_105(t_599_padded) - t_601 = F.relu(t_600) - t_602 = self.n_Conv_106(t_601) - t_603 = torch.add(t_602, t_597) - t_604 = F.relu(t_603) - t_605 = self.n_Conv_107(t_604) - t_606 = F.relu(t_605) - t_606_padded = F.pad(t_606, [1, 1, 1, 1], value=0) - t_607 = self.n_Conv_108(t_606_padded) - t_608 = F.relu(t_607) - t_609 = self.n_Conv_109(t_608) - t_610 = torch.add(t_609, t_604) - t_611 = F.relu(t_610) - t_612 = self.n_Conv_110(t_611) - t_613 = F.relu(t_612) - t_613_padded = F.pad(t_613, [1, 1, 1, 1], value=0) - t_614 = self.n_Conv_111(t_613_padded) - t_615 = F.relu(t_614) - t_616 = self.n_Conv_112(t_615) - t_617 = torch.add(t_616, t_611) - t_618 = F.relu(t_617) - t_619 = self.n_Conv_113(t_618) - t_620 = F.relu(t_619) - t_620_padded = F.pad(t_620, [1, 1, 1, 1], value=0) - t_621 = self.n_Conv_114(t_620_padded) - t_622 = F.relu(t_621) - t_623 = self.n_Conv_115(t_622) - t_624 = torch.add(t_623, t_618) - t_625 = F.relu(t_624) - t_626 = self.n_Conv_116(t_625) - t_627 = F.relu(t_626) - t_627_padded = F.pad(t_627, [1, 1, 1, 1], value=0) - t_628 = self.n_Conv_117(t_627_padded) - t_629 = F.relu(t_628) - t_630 = self.n_Conv_118(t_629) - t_631 = torch.add(t_630, t_625) - t_632 = F.relu(t_631) - t_633 = self.n_Conv_119(t_632) - t_634 = F.relu(t_633) - t_634_padded = F.pad(t_634, [1, 1, 1, 1], value=0) - t_635 = self.n_Conv_120(t_634_padded) - t_636 = F.relu(t_635) - t_637 = self.n_Conv_121(t_636) - t_638 = torch.add(t_637, t_632) - t_639 = F.relu(t_638) - t_640 = self.n_Conv_122(t_639) - t_641 = F.relu(t_640) - t_641_padded = F.pad(t_641, [1, 1, 1, 1], value=0) - t_642 = self.n_Conv_123(t_641_padded) - t_643 = F.relu(t_642) - t_644 = self.n_Conv_124(t_643) - t_645 = torch.add(t_644, t_639) - t_646 = F.relu(t_645) - t_647 = self.n_Conv_125(t_646) - t_648 = F.relu(t_647) - t_648_padded = F.pad(t_648, [1, 1, 1, 1], value=0) - t_649 = self.n_Conv_126(t_648_padded) - t_650 = F.relu(t_649) - t_651 = self.n_Conv_127(t_650) - t_652 = torch.add(t_651, t_646) - t_653 = F.relu(t_652) - t_654 = self.n_Conv_128(t_653) - t_655 = F.relu(t_654) - t_655_padded = F.pad(t_655, [1, 1, 1, 1], value=0) - t_656 = self.n_Conv_129(t_655_padded) - t_657 = F.relu(t_656) - t_658 = self.n_Conv_130(t_657) - t_659 = torch.add(t_658, t_653) - t_660 = F.relu(t_659) - t_661 = self.n_Conv_131(t_660) - t_662 = F.relu(t_661) - t_662_padded = F.pad(t_662, [1, 1, 1, 1], value=0) - t_663 = self.n_Conv_132(t_662_padded) - t_664 = F.relu(t_663) - t_665 = self.n_Conv_133(t_664) - t_666 = torch.add(t_665, t_660) - t_667 = F.relu(t_666) - t_668 = self.n_Conv_134(t_667) - t_669 = F.relu(t_668) - t_669_padded = F.pad(t_669, [1, 1, 1, 1], value=0) - t_670 = self.n_Conv_135(t_669_padded) - t_671 = F.relu(t_670) - t_672 = self.n_Conv_136(t_671) - t_673 = torch.add(t_672, t_667) - t_674 = F.relu(t_673) - t_675 = self.n_Conv_137(t_674) - t_676 = F.relu(t_675) - t_676_padded = F.pad(t_676, [1, 1, 1, 1], value=0) - t_677 = self.n_Conv_138(t_676_padded) - t_678 = F.relu(t_677) - t_679 = self.n_Conv_139(t_678) - t_680 = torch.add(t_679, t_674) - t_681 = F.relu(t_680) - t_682 = self.n_Conv_140(t_681) - t_683 = F.relu(t_682) - t_683_padded = F.pad(t_683, [1, 1, 1, 1], value=0) - t_684 = self.n_Conv_141(t_683_padded) - t_685 = F.relu(t_684) - t_686 = self.n_Conv_142(t_685) - t_687 = torch.add(t_686, t_681) - t_688 = F.relu(t_687) - t_689 = self.n_Conv_143(t_688) - t_690 = F.relu(t_689) - t_690_padded = F.pad(t_690, [1, 1, 1, 1], value=0) - t_691 = self.n_Conv_144(t_690_padded) - t_692 = F.relu(t_691) - t_693 = self.n_Conv_145(t_692) - t_694 = torch.add(t_693, t_688) - t_695 = F.relu(t_694) - t_696 = self.n_Conv_146(t_695) - t_697 = F.relu(t_696) - t_697_padded = F.pad(t_697, [1, 1, 1, 1], value=0) - t_698 = self.n_Conv_147(t_697_padded) - t_699 = F.relu(t_698) - t_700 = self.n_Conv_148(t_699) - t_701 = torch.add(t_700, t_695) - t_702 = F.relu(t_701) - t_703 = self.n_Conv_149(t_702) - t_704 = F.relu(t_703) - t_704_padded = F.pad(t_704, [1, 1, 1, 1], value=0) - t_705 = self.n_Conv_150(t_704_padded) - t_706 = F.relu(t_705) - t_707 = self.n_Conv_151(t_706) - t_708 = torch.add(t_707, t_702) - t_709 = F.relu(t_708) - t_710 = self.n_Conv_152(t_709) - t_711 = F.relu(t_710) - t_711_padded = F.pad(t_711, [1, 1, 1, 1], value=0) - t_712 = self.n_Conv_153(t_711_padded) - t_713 = F.relu(t_712) - t_714 = self.n_Conv_154(t_713) - t_715 = torch.add(t_714, t_709) - t_716 = F.relu(t_715) - t_717 = self.n_Conv_155(t_716) - t_718 = F.relu(t_717) - t_718_padded = F.pad(t_718, [1, 1, 1, 1], value=0) - t_719 = self.n_Conv_156(t_718_padded) - t_720 = F.relu(t_719) - t_721 = self.n_Conv_157(t_720) - t_722 = torch.add(t_721, t_716) - t_723 = F.relu(t_722) - t_724 = self.n_Conv_158(t_723) - t_725 = self.n_Conv_159(t_723) - t_726 = F.relu(t_725) - t_726_padded = F.pad(t_726, [0, 1, 0, 1], value=0) - t_727 = self.n_Conv_160(t_726_padded) - t_728 = F.relu(t_727) - t_729 = self.n_Conv_161(t_728) - t_730 = torch.add(t_729, t_724) - t_731 = F.relu(t_730) - t_732 = self.n_Conv_162(t_731) - t_733 = F.relu(t_732) - t_733_padded = F.pad(t_733, [1, 1, 1, 1], value=0) - t_734 = self.n_Conv_163(t_733_padded) - t_735 = F.relu(t_734) - t_736 = self.n_Conv_164(t_735) - t_737 = torch.add(t_736, t_731) - t_738 = F.relu(t_737) - t_739 = self.n_Conv_165(t_738) - t_740 = F.relu(t_739) - t_740_padded = F.pad(t_740, [1, 1, 1, 1], value=0) - t_741 = self.n_Conv_166(t_740_padded) - t_742 = F.relu(t_741) - t_743 = self.n_Conv_167(t_742) - t_744 = torch.add(t_743, t_738) - t_745 = F.relu(t_744) - t_746 = self.n_Conv_168(t_745) - t_747 = self.n_Conv_169(t_745) - t_748 = F.relu(t_747) - t_748_padded = F.pad(t_748, [0, 1, 0, 1], value=0) - t_749 = self.n_Conv_170(t_748_padded) - t_750 = F.relu(t_749) - t_751 = self.n_Conv_171(t_750) - t_752 = torch.add(t_751, t_746) - t_753 = F.relu(t_752) - t_754 = self.n_Conv_172(t_753) - t_755 = F.relu(t_754) - t_755_padded = F.pad(t_755, [1, 1, 1, 1], value=0) - t_756 = self.n_Conv_173(t_755_padded) - t_757 = F.relu(t_756) - t_758 = self.n_Conv_174(t_757) - t_759 = torch.add(t_758, t_753) - t_760 = F.relu(t_759) - t_761 = self.n_Conv_175(t_760) - t_762 = F.relu(t_761) - t_762_padded = F.pad(t_762, [1, 1, 1, 1], value=0) - t_763 = self.n_Conv_176(t_762_padded) - t_764 = F.relu(t_763) - t_765 = self.n_Conv_177(t_764) - t_766 = torch.add(t_765, t_760) - t_767 = F.relu(t_766) - t_768 = self.n_Conv_178(t_767) - t_769 = F.avg_pool2d(t_768, kernel_size=t_768.shape[-2:]) - t_770 = torch.squeeze(t_769, 3) - t_770 = torch.squeeze(t_770, 2) - t_771 = torch.sigmoid(t_770) - return t_771 - - def load_state_dict(self, state_dict, **kwargs): # pylint: disable=arguments-differ,unused-argument - self.tags = state_dict.get('tags', []) - super(DeepDanbooruModel, self).load_state_dict({k: v for k, v in state_dict.items() if k != 'tags'}) # pylint: disable=R1725 +import torch +import torch.nn as nn +import torch.nn.functional as F + +from modules import devices + +# see https://github.com/AUTOMATIC1111/TorchDeepDanbooru for more + + +class DeepDanbooruModel(nn.Module): + def __init__(self): + super().__init__() + self.tags = [] + self.n_Conv_0 = nn.Conv2d(kernel_size=(7, 7), in_channels=3, out_channels=64, stride=(2, 2)) + self.n_MaxPool_0 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2)) + self.n_Conv_1 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) + self.n_Conv_2 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=64) + self.n_Conv_3 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64) + self.n_Conv_4 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) + self.n_Conv_5 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=64) + self.n_Conv_6 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64) + self.n_Conv_7 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) + self.n_Conv_8 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=64) + self.n_Conv_9 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64) + self.n_Conv_10 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) + self.n_Conv_11 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=512, stride=(2, 2)) + self.n_Conv_12 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=128) + self.n_Conv_13 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128, stride=(2, 2)) + self.n_Conv_14 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_15 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_16 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_17 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_18 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_19 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_20 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_21 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_22 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_23 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_24 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_25 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_26 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_27 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_28 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_29 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_30 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_31 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_32 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_33 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_34 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_35 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_36 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=1024, stride=(2, 2)) + self.n_Conv_37 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=256) + self.n_Conv_38 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256, stride=(2, 2)) + self.n_Conv_39 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_40 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_41 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_42 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_43 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_44 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_45 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_46 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_47 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_48 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_49 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_50 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_51 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_52 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_53 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_54 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_55 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_56 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_57 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_58 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_59 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_60 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_61 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_62 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_63 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_64 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_65 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_66 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_67 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_68 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_69 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_70 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_71 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_72 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_73 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_74 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_75 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_76 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_77 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_78 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_79 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_80 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_81 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_82 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_83 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_84 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_85 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_86 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_87 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_88 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_89 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_90 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_91 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_92 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_93 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_94 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_95 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_96 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_97 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_98 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256, stride=(2, 2)) + self.n_Conv_99 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_100 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=1024, stride=(2, 2)) + self.n_Conv_101 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_102 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_103 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_104 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_105 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_106 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_107 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_108 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_109 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_110 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_111 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_112 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_113 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_114 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_115 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_116 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_117 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_118 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_119 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_120 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_121 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_122 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_123 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_124 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_125 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_126 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_127 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_128 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_129 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_130 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_131 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_132 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_133 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_134 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_135 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_136 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_137 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_138 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_139 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_140 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_141 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_142 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_143 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_144 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_145 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_146 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_147 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_148 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_149 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_150 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_151 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_152 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_153 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_154 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_155 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_156 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_157 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_158 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=2048, stride=(2, 2)) + self.n_Conv_159 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=512) + self.n_Conv_160 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512, stride=(2, 2)) + self.n_Conv_161 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048) + self.n_Conv_162 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=512) + self.n_Conv_163 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512) + self.n_Conv_164 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048) + self.n_Conv_165 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=512) + self.n_Conv_166 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512) + self.n_Conv_167 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048) + self.n_Conv_168 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=4096, stride=(2, 2)) + self.n_Conv_169 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=1024) + self.n_Conv_170 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024, stride=(2, 2)) + self.n_Conv_171 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096) + self.n_Conv_172 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=1024) + self.n_Conv_173 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024) + self.n_Conv_174 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096) + self.n_Conv_175 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=1024) + self.n_Conv_176 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024) + self.n_Conv_177 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096) + self.n_Conv_178 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=9176, bias=False) + + def forward(self, *inputs): + t_358, = inputs + t_359 = t_358.permute(*[0, 3, 1, 2]) + t_359_padded = F.pad(t_359, [2, 3, 2, 3], value=0) + t_360 = self.n_Conv_0(t_359_padded.to(self.n_Conv_0.bias.dtype) if devices.unet_needs_upcast else t_359_padded) + t_361 = F.relu(t_360) + t_361 = F.pad(t_361, [0, 1, 0, 1], value=float('-inf')) + t_362 = self.n_MaxPool_0(t_361) + t_363 = self.n_Conv_1(t_362) + t_364 = self.n_Conv_2(t_362) + t_365 = F.relu(t_364) + t_365_padded = F.pad(t_365, [1, 1, 1, 1], value=0) + t_366 = self.n_Conv_3(t_365_padded) + t_367 = F.relu(t_366) + t_368 = self.n_Conv_4(t_367) + t_369 = torch.add(t_368, t_363) + t_370 = F.relu(t_369) + t_371 = self.n_Conv_5(t_370) + t_372 = F.relu(t_371) + t_372_padded = F.pad(t_372, [1, 1, 1, 1], value=0) + t_373 = self.n_Conv_6(t_372_padded) + t_374 = F.relu(t_373) + t_375 = self.n_Conv_7(t_374) + t_376 = torch.add(t_375, t_370) + t_377 = F.relu(t_376) + t_378 = self.n_Conv_8(t_377) + t_379 = F.relu(t_378) + t_379_padded = F.pad(t_379, [1, 1, 1, 1], value=0) + t_380 = self.n_Conv_9(t_379_padded) + t_381 = F.relu(t_380) + t_382 = self.n_Conv_10(t_381) + t_383 = torch.add(t_382, t_377) + t_384 = F.relu(t_383) + t_385 = self.n_Conv_11(t_384) + t_386 = self.n_Conv_12(t_384) + t_387 = F.relu(t_386) + t_387_padded = F.pad(t_387, [0, 1, 0, 1], value=0) + t_388 = self.n_Conv_13(t_387_padded) + t_389 = F.relu(t_388) + t_390 = self.n_Conv_14(t_389) + t_391 = torch.add(t_390, t_385) + t_392 = F.relu(t_391) + t_393 = self.n_Conv_15(t_392) + t_394 = F.relu(t_393) + t_394_padded = F.pad(t_394, [1, 1, 1, 1], value=0) + t_395 = self.n_Conv_16(t_394_padded) + t_396 = F.relu(t_395) + t_397 = self.n_Conv_17(t_396) + t_398 = torch.add(t_397, t_392) + t_399 = F.relu(t_398) + t_400 = self.n_Conv_18(t_399) + t_401 = F.relu(t_400) + t_401_padded = F.pad(t_401, [1, 1, 1, 1], value=0) + t_402 = self.n_Conv_19(t_401_padded) + t_403 = F.relu(t_402) + t_404 = self.n_Conv_20(t_403) + t_405 = torch.add(t_404, t_399) + t_406 = F.relu(t_405) + t_407 = self.n_Conv_21(t_406) + t_408 = F.relu(t_407) + t_408_padded = F.pad(t_408, [1, 1, 1, 1], value=0) + t_409 = self.n_Conv_22(t_408_padded) + t_410 = F.relu(t_409) + t_411 = self.n_Conv_23(t_410) + t_412 = torch.add(t_411, t_406) + t_413 = F.relu(t_412) + t_414 = self.n_Conv_24(t_413) + t_415 = F.relu(t_414) + t_415_padded = F.pad(t_415, [1, 1, 1, 1], value=0) + t_416 = self.n_Conv_25(t_415_padded) + t_417 = F.relu(t_416) + t_418 = self.n_Conv_26(t_417) + t_419 = torch.add(t_418, t_413) + t_420 = F.relu(t_419) + t_421 = self.n_Conv_27(t_420) + t_422 = F.relu(t_421) + t_422_padded = F.pad(t_422, [1, 1, 1, 1], value=0) + t_423 = self.n_Conv_28(t_422_padded) + t_424 = F.relu(t_423) + t_425 = self.n_Conv_29(t_424) + t_426 = torch.add(t_425, t_420) + t_427 = F.relu(t_426) + t_428 = self.n_Conv_30(t_427) + t_429 = F.relu(t_428) + t_429_padded = F.pad(t_429, [1, 1, 1, 1], value=0) + t_430 = self.n_Conv_31(t_429_padded) + t_431 = F.relu(t_430) + t_432 = self.n_Conv_32(t_431) + t_433 = torch.add(t_432, t_427) + t_434 = F.relu(t_433) + t_435 = self.n_Conv_33(t_434) + t_436 = F.relu(t_435) + t_436_padded = F.pad(t_436, [1, 1, 1, 1], value=0) + t_437 = self.n_Conv_34(t_436_padded) + t_438 = F.relu(t_437) + t_439 = self.n_Conv_35(t_438) + t_440 = torch.add(t_439, t_434) + t_441 = F.relu(t_440) + t_442 = self.n_Conv_36(t_441) + t_443 = self.n_Conv_37(t_441) + t_444 = F.relu(t_443) + t_444_padded = F.pad(t_444, [0, 1, 0, 1], value=0) + t_445 = self.n_Conv_38(t_444_padded) + t_446 = F.relu(t_445) + t_447 = self.n_Conv_39(t_446) + t_448 = torch.add(t_447, t_442) + t_449 = F.relu(t_448) + t_450 = self.n_Conv_40(t_449) + t_451 = F.relu(t_450) + t_451_padded = F.pad(t_451, [1, 1, 1, 1], value=0) + t_452 = self.n_Conv_41(t_451_padded) + t_453 = F.relu(t_452) + t_454 = self.n_Conv_42(t_453) + t_455 = torch.add(t_454, t_449) + t_456 = F.relu(t_455) + t_457 = self.n_Conv_43(t_456) + t_458 = F.relu(t_457) + t_458_padded = F.pad(t_458, [1, 1, 1, 1], value=0) + t_459 = self.n_Conv_44(t_458_padded) + t_460 = F.relu(t_459) + t_461 = self.n_Conv_45(t_460) + t_462 = torch.add(t_461, t_456) + t_463 = F.relu(t_462) + t_464 = self.n_Conv_46(t_463) + t_465 = F.relu(t_464) + t_465_padded = F.pad(t_465, [1, 1, 1, 1], value=0) + t_466 = self.n_Conv_47(t_465_padded) + t_467 = F.relu(t_466) + t_468 = self.n_Conv_48(t_467) + t_469 = torch.add(t_468, t_463) + t_470 = F.relu(t_469) + t_471 = self.n_Conv_49(t_470) + t_472 = F.relu(t_471) + t_472_padded = F.pad(t_472, [1, 1, 1, 1], value=0) + t_473 = self.n_Conv_50(t_472_padded) + t_474 = F.relu(t_473) + t_475 = self.n_Conv_51(t_474) + t_476 = torch.add(t_475, t_470) + t_477 = F.relu(t_476) + t_478 = self.n_Conv_52(t_477) + t_479 = F.relu(t_478) + t_479_padded = F.pad(t_479, [1, 1, 1, 1], value=0) + t_480 = self.n_Conv_53(t_479_padded) + t_481 = F.relu(t_480) + t_482 = self.n_Conv_54(t_481) + t_483 = torch.add(t_482, t_477) + t_484 = F.relu(t_483) + t_485 = self.n_Conv_55(t_484) + t_486 = F.relu(t_485) + t_486_padded = F.pad(t_486, [1, 1, 1, 1], value=0) + t_487 = self.n_Conv_56(t_486_padded) + t_488 = F.relu(t_487) + t_489 = self.n_Conv_57(t_488) + t_490 = torch.add(t_489, t_484) + t_491 = F.relu(t_490) + t_492 = self.n_Conv_58(t_491) + t_493 = F.relu(t_492) + t_493_padded = F.pad(t_493, [1, 1, 1, 1], value=0) + t_494 = self.n_Conv_59(t_493_padded) + t_495 = F.relu(t_494) + t_496 = self.n_Conv_60(t_495) + t_497 = torch.add(t_496, t_491) + t_498 = F.relu(t_497) + t_499 = self.n_Conv_61(t_498) + t_500 = F.relu(t_499) + t_500_padded = F.pad(t_500, [1, 1, 1, 1], value=0) + t_501 = self.n_Conv_62(t_500_padded) + t_502 = F.relu(t_501) + t_503 = self.n_Conv_63(t_502) + t_504 = torch.add(t_503, t_498) + t_505 = F.relu(t_504) + t_506 = self.n_Conv_64(t_505) + t_507 = F.relu(t_506) + t_507_padded = F.pad(t_507, [1, 1, 1, 1], value=0) + t_508 = self.n_Conv_65(t_507_padded) + t_509 = F.relu(t_508) + t_510 = self.n_Conv_66(t_509) + t_511 = torch.add(t_510, t_505) + t_512 = F.relu(t_511) + t_513 = self.n_Conv_67(t_512) + t_514 = F.relu(t_513) + t_514_padded = F.pad(t_514, [1, 1, 1, 1], value=0) + t_515 = self.n_Conv_68(t_514_padded) + t_516 = F.relu(t_515) + t_517 = self.n_Conv_69(t_516) + t_518 = torch.add(t_517, t_512) + t_519 = F.relu(t_518) + t_520 = self.n_Conv_70(t_519) + t_521 = F.relu(t_520) + t_521_padded = F.pad(t_521, [1, 1, 1, 1], value=0) + t_522 = self.n_Conv_71(t_521_padded) + t_523 = F.relu(t_522) + t_524 = self.n_Conv_72(t_523) + t_525 = torch.add(t_524, t_519) + t_526 = F.relu(t_525) + t_527 = self.n_Conv_73(t_526) + t_528 = F.relu(t_527) + t_528_padded = F.pad(t_528, [1, 1, 1, 1], value=0) + t_529 = self.n_Conv_74(t_528_padded) + t_530 = F.relu(t_529) + t_531 = self.n_Conv_75(t_530) + t_532 = torch.add(t_531, t_526) + t_533 = F.relu(t_532) + t_534 = self.n_Conv_76(t_533) + t_535 = F.relu(t_534) + t_535_padded = F.pad(t_535, [1, 1, 1, 1], value=0) + t_536 = self.n_Conv_77(t_535_padded) + t_537 = F.relu(t_536) + t_538 = self.n_Conv_78(t_537) + t_539 = torch.add(t_538, t_533) + t_540 = F.relu(t_539) + t_541 = self.n_Conv_79(t_540) + t_542 = F.relu(t_541) + t_542_padded = F.pad(t_542, [1, 1, 1, 1], value=0) + t_543 = self.n_Conv_80(t_542_padded) + t_544 = F.relu(t_543) + t_545 = self.n_Conv_81(t_544) + t_546 = torch.add(t_545, t_540) + t_547 = F.relu(t_546) + t_548 = self.n_Conv_82(t_547) + t_549 = F.relu(t_548) + t_549_padded = F.pad(t_549, [1, 1, 1, 1], value=0) + t_550 = self.n_Conv_83(t_549_padded) + t_551 = F.relu(t_550) + t_552 = self.n_Conv_84(t_551) + t_553 = torch.add(t_552, t_547) + t_554 = F.relu(t_553) + t_555 = self.n_Conv_85(t_554) + t_556 = F.relu(t_555) + t_556_padded = F.pad(t_556, [1, 1, 1, 1], value=0) + t_557 = self.n_Conv_86(t_556_padded) + t_558 = F.relu(t_557) + t_559 = self.n_Conv_87(t_558) + t_560 = torch.add(t_559, t_554) + t_561 = F.relu(t_560) + t_562 = self.n_Conv_88(t_561) + t_563 = F.relu(t_562) + t_563_padded = F.pad(t_563, [1, 1, 1, 1], value=0) + t_564 = self.n_Conv_89(t_563_padded) + t_565 = F.relu(t_564) + t_566 = self.n_Conv_90(t_565) + t_567 = torch.add(t_566, t_561) + t_568 = F.relu(t_567) + t_569 = self.n_Conv_91(t_568) + t_570 = F.relu(t_569) + t_570_padded = F.pad(t_570, [1, 1, 1, 1], value=0) + t_571 = self.n_Conv_92(t_570_padded) + t_572 = F.relu(t_571) + t_573 = self.n_Conv_93(t_572) + t_574 = torch.add(t_573, t_568) + t_575 = F.relu(t_574) + t_576 = self.n_Conv_94(t_575) + t_577 = F.relu(t_576) + t_577_padded = F.pad(t_577, [1, 1, 1, 1], value=0) + t_578 = self.n_Conv_95(t_577_padded) + t_579 = F.relu(t_578) + t_580 = self.n_Conv_96(t_579) + t_581 = torch.add(t_580, t_575) + t_582 = F.relu(t_581) + t_583 = self.n_Conv_97(t_582) + t_584 = F.relu(t_583) + t_584_padded = F.pad(t_584, [0, 1, 0, 1], value=0) + t_585 = self.n_Conv_98(t_584_padded) + t_586 = F.relu(t_585) + t_587 = self.n_Conv_99(t_586) + t_588 = self.n_Conv_100(t_582) + t_589 = torch.add(t_587, t_588) + t_590 = F.relu(t_589) + t_591 = self.n_Conv_101(t_590) + t_592 = F.relu(t_591) + t_592_padded = F.pad(t_592, [1, 1, 1, 1], value=0) + t_593 = self.n_Conv_102(t_592_padded) + t_594 = F.relu(t_593) + t_595 = self.n_Conv_103(t_594) + t_596 = torch.add(t_595, t_590) + t_597 = F.relu(t_596) + t_598 = self.n_Conv_104(t_597) + t_599 = F.relu(t_598) + t_599_padded = F.pad(t_599, [1, 1, 1, 1], value=0) + t_600 = self.n_Conv_105(t_599_padded) + t_601 = F.relu(t_600) + t_602 = self.n_Conv_106(t_601) + t_603 = torch.add(t_602, t_597) + t_604 = F.relu(t_603) + t_605 = self.n_Conv_107(t_604) + t_606 = F.relu(t_605) + t_606_padded = F.pad(t_606, [1, 1, 1, 1], value=0) + t_607 = self.n_Conv_108(t_606_padded) + t_608 = F.relu(t_607) + t_609 = self.n_Conv_109(t_608) + t_610 = torch.add(t_609, t_604) + t_611 = F.relu(t_610) + t_612 = self.n_Conv_110(t_611) + t_613 = F.relu(t_612) + t_613_padded = F.pad(t_613, [1, 1, 1, 1], value=0) + t_614 = self.n_Conv_111(t_613_padded) + t_615 = F.relu(t_614) + t_616 = self.n_Conv_112(t_615) + t_617 = torch.add(t_616, t_611) + t_618 = F.relu(t_617) + t_619 = self.n_Conv_113(t_618) + t_620 = F.relu(t_619) + t_620_padded = F.pad(t_620, [1, 1, 1, 1], value=0) + t_621 = self.n_Conv_114(t_620_padded) + t_622 = F.relu(t_621) + t_623 = self.n_Conv_115(t_622) + t_624 = torch.add(t_623, t_618) + t_625 = F.relu(t_624) + t_626 = self.n_Conv_116(t_625) + t_627 = F.relu(t_626) + t_627_padded = F.pad(t_627, [1, 1, 1, 1], value=0) + t_628 = self.n_Conv_117(t_627_padded) + t_629 = F.relu(t_628) + t_630 = self.n_Conv_118(t_629) + t_631 = torch.add(t_630, t_625) + t_632 = F.relu(t_631) + t_633 = self.n_Conv_119(t_632) + t_634 = F.relu(t_633) + t_634_padded = F.pad(t_634, [1, 1, 1, 1], value=0) + t_635 = self.n_Conv_120(t_634_padded) + t_636 = F.relu(t_635) + t_637 = self.n_Conv_121(t_636) + t_638 = torch.add(t_637, t_632) + t_639 = F.relu(t_638) + t_640 = self.n_Conv_122(t_639) + t_641 = F.relu(t_640) + t_641_padded = F.pad(t_641, [1, 1, 1, 1], value=0) + t_642 = self.n_Conv_123(t_641_padded) + t_643 = F.relu(t_642) + t_644 = self.n_Conv_124(t_643) + t_645 = torch.add(t_644, t_639) + t_646 = F.relu(t_645) + t_647 = self.n_Conv_125(t_646) + t_648 = F.relu(t_647) + t_648_padded = F.pad(t_648, [1, 1, 1, 1], value=0) + t_649 = self.n_Conv_126(t_648_padded) + t_650 = F.relu(t_649) + t_651 = self.n_Conv_127(t_650) + t_652 = torch.add(t_651, t_646) + t_653 = F.relu(t_652) + t_654 = self.n_Conv_128(t_653) + t_655 = F.relu(t_654) + t_655_padded = F.pad(t_655, [1, 1, 1, 1], value=0) + t_656 = self.n_Conv_129(t_655_padded) + t_657 = F.relu(t_656) + t_658 = self.n_Conv_130(t_657) + t_659 = torch.add(t_658, t_653) + t_660 = F.relu(t_659) + t_661 = self.n_Conv_131(t_660) + t_662 = F.relu(t_661) + t_662_padded = F.pad(t_662, [1, 1, 1, 1], value=0) + t_663 = self.n_Conv_132(t_662_padded) + t_664 = F.relu(t_663) + t_665 = self.n_Conv_133(t_664) + t_666 = torch.add(t_665, t_660) + t_667 = F.relu(t_666) + t_668 = self.n_Conv_134(t_667) + t_669 = F.relu(t_668) + t_669_padded = F.pad(t_669, [1, 1, 1, 1], value=0) + t_670 = self.n_Conv_135(t_669_padded) + t_671 = F.relu(t_670) + t_672 = self.n_Conv_136(t_671) + t_673 = torch.add(t_672, t_667) + t_674 = F.relu(t_673) + t_675 = self.n_Conv_137(t_674) + t_676 = F.relu(t_675) + t_676_padded = F.pad(t_676, [1, 1, 1, 1], value=0) + t_677 = self.n_Conv_138(t_676_padded) + t_678 = F.relu(t_677) + t_679 = self.n_Conv_139(t_678) + t_680 = torch.add(t_679, t_674) + t_681 = F.relu(t_680) + t_682 = self.n_Conv_140(t_681) + t_683 = F.relu(t_682) + t_683_padded = F.pad(t_683, [1, 1, 1, 1], value=0) + t_684 = self.n_Conv_141(t_683_padded) + t_685 = F.relu(t_684) + t_686 = self.n_Conv_142(t_685) + t_687 = torch.add(t_686, t_681) + t_688 = F.relu(t_687) + t_689 = self.n_Conv_143(t_688) + t_690 = F.relu(t_689) + t_690_padded = F.pad(t_690, [1, 1, 1, 1], value=0) + t_691 = self.n_Conv_144(t_690_padded) + t_692 = F.relu(t_691) + t_693 = self.n_Conv_145(t_692) + t_694 = torch.add(t_693, t_688) + t_695 = F.relu(t_694) + t_696 = self.n_Conv_146(t_695) + t_697 = F.relu(t_696) + t_697_padded = F.pad(t_697, [1, 1, 1, 1], value=0) + t_698 = self.n_Conv_147(t_697_padded) + t_699 = F.relu(t_698) + t_700 = self.n_Conv_148(t_699) + t_701 = torch.add(t_700, t_695) + t_702 = F.relu(t_701) + t_703 = self.n_Conv_149(t_702) + t_704 = F.relu(t_703) + t_704_padded = F.pad(t_704, [1, 1, 1, 1], value=0) + t_705 = self.n_Conv_150(t_704_padded) + t_706 = F.relu(t_705) + t_707 = self.n_Conv_151(t_706) + t_708 = torch.add(t_707, t_702) + t_709 = F.relu(t_708) + t_710 = self.n_Conv_152(t_709) + t_711 = F.relu(t_710) + t_711_padded = F.pad(t_711, [1, 1, 1, 1], value=0) + t_712 = self.n_Conv_153(t_711_padded) + t_713 = F.relu(t_712) + t_714 = self.n_Conv_154(t_713) + t_715 = torch.add(t_714, t_709) + t_716 = F.relu(t_715) + t_717 = self.n_Conv_155(t_716) + t_718 = F.relu(t_717) + t_718_padded = F.pad(t_718, [1, 1, 1, 1], value=0) + t_719 = self.n_Conv_156(t_718_padded) + t_720 = F.relu(t_719) + t_721 = self.n_Conv_157(t_720) + t_722 = torch.add(t_721, t_716) + t_723 = F.relu(t_722) + t_724 = self.n_Conv_158(t_723) + t_725 = self.n_Conv_159(t_723) + t_726 = F.relu(t_725) + t_726_padded = F.pad(t_726, [0, 1, 0, 1], value=0) + t_727 = self.n_Conv_160(t_726_padded) + t_728 = F.relu(t_727) + t_729 = self.n_Conv_161(t_728) + t_730 = torch.add(t_729, t_724) + t_731 = F.relu(t_730) + t_732 = self.n_Conv_162(t_731) + t_733 = F.relu(t_732) + t_733_padded = F.pad(t_733, [1, 1, 1, 1], value=0) + t_734 = self.n_Conv_163(t_733_padded) + t_735 = F.relu(t_734) + t_736 = self.n_Conv_164(t_735) + t_737 = torch.add(t_736, t_731) + t_738 = F.relu(t_737) + t_739 = self.n_Conv_165(t_738) + t_740 = F.relu(t_739) + t_740_padded = F.pad(t_740, [1, 1, 1, 1], value=0) + t_741 = self.n_Conv_166(t_740_padded) + t_742 = F.relu(t_741) + t_743 = self.n_Conv_167(t_742) + t_744 = torch.add(t_743, t_738) + t_745 = F.relu(t_744) + t_746 = self.n_Conv_168(t_745) + t_747 = self.n_Conv_169(t_745) + t_748 = F.relu(t_747) + t_748_padded = F.pad(t_748, [0, 1, 0, 1], value=0) + t_749 = self.n_Conv_170(t_748_padded) + t_750 = F.relu(t_749) + t_751 = self.n_Conv_171(t_750) + t_752 = torch.add(t_751, t_746) + t_753 = F.relu(t_752) + t_754 = self.n_Conv_172(t_753) + t_755 = F.relu(t_754) + t_755_padded = F.pad(t_755, [1, 1, 1, 1], value=0) + t_756 = self.n_Conv_173(t_755_padded) + t_757 = F.relu(t_756) + t_758 = self.n_Conv_174(t_757) + t_759 = torch.add(t_758, t_753) + t_760 = F.relu(t_759) + t_761 = self.n_Conv_175(t_760) + t_762 = F.relu(t_761) + t_762_padded = F.pad(t_762, [1, 1, 1, 1], value=0) + t_763 = self.n_Conv_176(t_762_padded) + t_764 = F.relu(t_763) + t_765 = self.n_Conv_177(t_764) + t_766 = torch.add(t_765, t_760) + t_767 = F.relu(t_766) + t_768 = self.n_Conv_178(t_767) + t_769 = F.avg_pool2d(t_768, kernel_size=t_768.shape[-2:]) + t_770 = torch.squeeze(t_769, 3) + t_770 = torch.squeeze(t_770, 2) + t_771 = torch.sigmoid(t_770) + return t_771 + + def load_state_dict(self, state_dict, **kwargs): # pylint: disable=arguments-differ,unused-argument + self.tags = state_dict.get('tags', []) + super(DeepDanbooruModel, self).load_state_dict({k: v for k, v in state_dict.items() if k != 'tags'}) # pylint: disable=R1725 diff --git a/modules/errors.py b/modules/errors.py index 122628bff..8cb2eb599 100644 --- a/modules/errors.py +++ b/modules/errors.py @@ -1,96 +1,96 @@ -import logging -import warnings -from rich.console import Console -from rich.theme import Theme -from rich.pretty import install as pretty_install -from rich.traceback import install as traceback_install -from installer import log as installer_log, setup_logging - - -setup_logging() -log = installer_log -console = Console(log_time=True, log_time_format='%H:%M:%S-%f', theme=Theme({ - "traceback.border": "black", - "traceback.border.syntax_error": "black", - "inspect.value.border": "black", -})) - -pretty_install(console=console) -traceback_install(console=console, extra_lines=1, width=console.width, word_wrap=False, indent_guides=False) -already_displayed = {} - - -def install(suppress=[]): # noqa: B006 - warnings.filterwarnings("ignore", category=UserWarning) - pretty_install(console=console) - traceback_install(console=console, extra_lines=1, width=console.width, word_wrap=False, indent_guides=False, suppress=suppress) - logging.basicConfig(level=logging.ERROR, format='%(asctime)s | %(levelname)s | %(pathname)s | %(message)s') - # for handler in logging.getLogger().handlers: - # handler.setLevel(logging.INFO) - - -def print_error_explanation(message): - lines = message.strip().split("\n") - for line in lines: - log.error(line) - - -def display(e: Exception, task, suppress=[]): # noqa: B006 - log.error(f"{task or 'error'}: {type(e).__name__}") - console.print_exception(show_locals=False, max_frames=10, extra_lines=1, suppress=suppress, theme="ansi_dark", word_wrap=False, width=min([console.width, 200])) - - -def display_once(e: Exception, task): - if task in already_displayed: - return - display(e, task) - already_displayed[task] = 1 - - -def run(code, task): - try: - code() - except Exception as e: - display(e, task) - - -def exception(suppress=[]): # noqa: B006 - console.print_exception(show_locals=False, max_frames=10, extra_lines=2, suppress=suppress, theme="ansi_dark", word_wrap=False, width=min([console.width, 200])) - - -def profile(profiler, msg: str): - profiler.disable() - import io - import pstats - stream = io.StringIO() # pylint: disable=abstract-class-instantiated - p = pstats.Stats(profiler, stream=stream) - p.sort_stats(pstats.SortKey.CUMULATIVE) - p.print_stats(100) - # p.print_title() - # p.print_call_heading(10, 'time') - # p.print_callees(10) - # p.print_callers(10) - profiler = None - lines = stream.getvalue().split('\n') - lines = [x for x in lines if '{self.commit_hash[:8]}

{datetime.fromtimestamp(self.commit_date).strftime('%a %b%d %Y %H:%M')}

" - except Exception as ex: - shared.log.error(f"Extension: failed reading data from git repo={self.name}: {ex}") - self.remote = None - - def list_files(self, subdir, extension): - from modules import scripts - dirpath = os.path.join(self.path, subdir) - if not os.path.isdir(dirpath): - return [] - res = [] - for filename in sorted(os.listdir(dirpath)): - if not filename.endswith(".py") and not filename.endswith(".js") and not filename.endswith(".mjs"): - continue - priority = '50' - if os.path.isfile(os.path.join(dirpath, "..", ".priority")): - with open(os.path.join(dirpath, "..", ".priority"), "r", encoding="utf-8") as f: - priority = str(f.read().strip()) - res.append(scripts.ScriptFile(self.path, filename, os.path.join(dirpath, filename), priority)) - if priority != '50': - shared.log.debug(f'Extension priority override: {os.path.dirname(dirpath)}:{priority}') - res = [x for x in res if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] - return res - - def check_updates(self): - try: - repo = git.Repo(self.path) - except Exception: - self.can_update = False - return - for fetch in repo.remote().fetch(dry_run=True): - if fetch.flags != fetch.HEAD_UPTODATE: - self.can_update = True - self.status = "new commits" - return - try: - origin = repo.rev_parse('origin') - if repo.head.commit != origin: - self.can_update = True - self.status = "behind HEAD" - return - except Exception: - self.can_update = False - self.status = "unknown (remote error)" - return - self.can_update = False - self.status = "latest" - - def git_fetch(self, commit='origin'): - repo = git.Repo(self.path) - # Fix: `error: Your local changes to the following files would be overwritten by merge`, - # because WSL2 Docker set 755 file permissions instead of 644, this results to the error. - repo.git.fetch(all=True) - repo.git.reset('origin', hard=True) - repo.git.reset(commit, hard=True) - self.have_info_from_repo = False - - -def list_extensions(): - extensions.clear() - if not os.path.isdir(extensions_dir): - return - if shared.opts.disable_all_extensions == "all" or shared.opts.disable_all_extensions == "user": - shared.log.warning(f"Option set: Disable extensions: {shared.opts.disable_all_extensions}") - extension_paths = [] - extension_names = [] - extension_folders = [extensions_builtin_dir] if shared.cmd_opts.safe else [extensions_builtin_dir, extensions_dir] - for dirname in extension_folders: - if not os.path.isdir(dirname): - return - for extension_dirname in sorted(os.listdir(dirname)): - path = os.path.join(dirname, extension_dirname) - if not os.path.isdir(path): - continue - if extension_dirname in extension_names: - shared.log.info(f'Skipping conflicting extension: {path}') - continue - extension_names.append(extension_dirname) - extension_paths.append((extension_dirname, path, dirname == extensions_builtin_dir)) - disabled_extensions = shared.opts.disabled_extensions + shared.temp_disable_extensions() - for dirname, path, is_builtin in extension_paths: - extension = Extension(name=dirname, path=path, enabled=dirname not in disabled_extensions, is_builtin=is_builtin) - extensions.append(extension) - shared.log.info(f'Disabled extensions: {[e.name for e in extensions if not e.enabled]}') +import os +from datetime import datetime +import git +from modules import shared, errors +from modules.paths import extensions_dir, extensions_builtin_dir + + +extensions = [] + + +if not os.path.exists(extensions_dir): + os.makedirs(extensions_dir) + + +def active(): + if shared.opts.disable_all_extensions == "all": + return [] + elif shared.opts.disable_all_extensions == "user": + return [x for x in extensions if x.enabled and x.is_builtin] + else: + return [x for x in extensions if x.enabled] + + +class Extension: + def __init__(self, name, path, enabled=True, is_builtin=False): + self.name = name + self.git_name = '' + self.path = path + self.enabled = enabled + self.status = '' + self.can_update = False + self.is_builtin = is_builtin + self.commit_hash = '' + self.commit_date = None + self.version = '' + self.description = '' + self.branch = None + self.remote = None + self.have_info_from_repo = False + self.mtime = 0 + self.ctime = 0 + + def read_info(self, force=False): + if self.have_info_from_repo and not force: + return + self.have_info_from_repo = True + repo = None + self.mtime = datetime.fromtimestamp(os.path.getmtime(self.path)).isoformat() + 'Z' + self.ctime = datetime.fromtimestamp(os.path.getctime(self.path)).isoformat() + 'Z' + try: + if os.path.exists(os.path.join(self.path, ".git")): + repo = git.Repo(self.path) + except Exception as e: + errors.display(e, f'github info from {self.path}') + if repo is None or repo.bare: + self.remote = None + else: + try: + self.status = 'unknown' + if len(repo.remotes) == 0: + shared.log.debug(f"Extension: no remotes info repo={self.name}") + return + self.git_name = repo.remotes.origin.url.split('.git')[0].split('/')[-1] + self.description = repo.description + if self.description is None or self.description.startswith("Unnamed repository"): + self.description = "[No description]" + self.remote = next(repo.remote().urls, None) + head = repo.head.commit + self.commit_date = repo.head.commit.committed_date + try: + if repo.active_branch: + self.branch = repo.active_branch.name + except Exception: + pass + self.commit_hash = head.hexsha + self.version = f"

{self.commit_hash[:8]}

{datetime.fromtimestamp(self.commit_date).strftime('%a %b%d %Y %H:%M')}

" + except Exception as ex: + shared.log.error(f"Extension: failed reading data from git repo={self.name}: {ex}") + self.remote = None + + def list_files(self, subdir, extension): + from modules import scripts + dirpath = os.path.join(self.path, subdir) + if not os.path.isdir(dirpath): + return [] + res = [] + for filename in sorted(os.listdir(dirpath)): + if not filename.endswith(".py") and not filename.endswith(".js") and not filename.endswith(".mjs"): + continue + priority = '50' + if os.path.isfile(os.path.join(dirpath, "..", ".priority")): + with open(os.path.join(dirpath, "..", ".priority"), "r", encoding="utf-8") as f: + priority = str(f.read().strip()) + res.append(scripts.ScriptFile(self.path, filename, os.path.join(dirpath, filename), priority)) + if priority != '50': + shared.log.debug(f'Extension priority override: {os.path.dirname(dirpath)}:{priority}') + res = [x for x in res if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] + return res + + def check_updates(self): + try: + repo = git.Repo(self.path) + except Exception: + self.can_update = False + return + for fetch in repo.remote().fetch(dry_run=True): + if fetch.flags != fetch.HEAD_UPTODATE: + self.can_update = True + self.status = "new commits" + return + try: + origin = repo.rev_parse('origin') + if repo.head.commit != origin: + self.can_update = True + self.status = "behind HEAD" + return + except Exception: + self.can_update = False + self.status = "unknown (remote error)" + return + self.can_update = False + self.status = "latest" + + def git_fetch(self, commit='origin'): + repo = git.Repo(self.path) + # Fix: `error: Your local changes to the following files would be overwritten by merge`, + # because WSL2 Docker set 755 file permissions instead of 644, this results to the error. + repo.git.fetch(all=True) + repo.git.reset('origin', hard=True) + repo.git.reset(commit, hard=True) + self.have_info_from_repo = False + + +def list_extensions(): + extensions.clear() + if not os.path.isdir(extensions_dir): + return + if shared.opts.disable_all_extensions == "all" or shared.opts.disable_all_extensions == "user": + shared.log.warning(f"Option set: Disable extensions: {shared.opts.disable_all_extensions}") + extension_paths = [] + extension_names = [] + extension_folders = [extensions_builtin_dir] if shared.cmd_opts.safe else [extensions_builtin_dir, extensions_dir] + for dirname in extension_folders: + if not os.path.isdir(dirname): + return + for extension_dirname in sorted(os.listdir(dirname)): + path = os.path.join(dirname, extension_dirname) + if not os.path.isdir(path): + continue + if extension_dirname in extension_names: + shared.log.info(f'Skipping conflicting extension: {path}') + continue + extension_names.append(extension_dirname) + extension_paths.append((extension_dirname, path, dirname == extensions_builtin_dir)) + disabled_extensions = shared.opts.disabled_extensions + shared.temp_disable_extensions() + for dirname, path, is_builtin in extension_paths: + extension = Extension(name=dirname, path=path, enabled=dirname not in disabled_extensions, is_builtin=is_builtin) + extensions.append(extension) + shared.log.info(f'Disabled extensions: {[e.name for e in extensions if not e.enabled]}') diff --git a/modules/extra_networks.py b/modules/extra_networks.py index d3ca87dd2..b74dfe0e3 100644 --- a/modules/extra_networks.py +++ b/modules/extra_networks.py @@ -1,137 +1,137 @@ -import re -from collections import defaultdict - -from modules import errors - -extra_network_registry = {} - - -def initialize(): - extra_network_registry.clear() - - -def register_extra_network(extra_network): - extra_network_registry[extra_network.name] = extra_network - - -def register_default_extra_networks(): - from modules.extra_networks_hypernet import ExtraNetworkHypernet - register_extra_network(ExtraNetworkHypernet()) - from modules.ui_extra_networks_styles import ExtraNetworkStyles - register_extra_network(ExtraNetworkStyles()) - - -class ExtraNetworkParams: - def __init__(self, items=None): - self.items = items or [] - self.positional = [] - self.named = {} - for item in self.items: - parts = item.split('=', 2) if isinstance(item, str) else [item] - if len(parts) == 2: - self.named[parts[0]] = parts[1] - else: - self.positional.append(item) - - -class ExtraNetwork: - def __init__(self, name): - self.name = name - - def activate(self, p, params_list): - """ - Called by processing on every run. Whatever the extra network is meant to do should be activated here. Passes arguments related to this extra network in params_list. User passes arguments by specifying this in his prompt: - - Where name matches the name of this ExtraNetwork object, and arg1:arg2:arg3 are any natural number of text arguments separated by colon. - Even if the user does not mention this ExtraNetwork in his prompt, the call will stil be made, with empty params_list - in this case, all effects of this extra networks should be disabled. - Can be called multiple times before deactivate() - each new call should override the previous call completely. - For example, if this ExtraNetwork's name is 'hypernet' and user's prompt is: - > "1girl, " - params_list will be: - [ - ExtraNetworkParams(items=["agm", "1.1"]), - ExtraNetworkParams(items=["ray"]) - ] - """ - raise NotImplementedError - - def deactivate(self, p): - """ - Called at the end of processing for housekeeping. No need to do anything here. - """ - raise NotImplementedError - - -def activate(p, extra_network_data): - """call activate for extra networks in extra_network_data in specified order, then call activate for all remaining registered networks with an empty argument list""" - if extra_network_data is None: - return - for extra_network_name, extra_network_args in extra_network_data.items(): - extra_network = extra_network_registry.get(extra_network_name, None) - if extra_network is None: - print(f"Skipping unknown extra network: {extra_network_name}") - continue - try: - extra_network.activate(p, extra_network_args) - except Exception as e: - errors.display(e, f"activating extra network: name={extra_network_name} args:{extra_network_args}") - - for extra_network_name, extra_network in extra_network_registry.items(): - args = extra_network_data.get(extra_network_name, None) - if args is not None: - continue - try: - extra_network.activate(p, []) - except Exception as e: - errors.display(e, f"activating extra network: name={extra_network_name}") - - -def deactivate(p, extra_network_data): - """call deactivate for extra networks in extra_network_data in specified order, then call deactivate for all remaining registered networks""" - if extra_network_data is None: - return - for extra_network_name in extra_network_data: - extra_network = extra_network_registry.get(extra_network_name, None) - if extra_network is None: - continue - try: - extra_network.deactivate(p) - except Exception as e: - errors.display(e, f"deactivating extra network {extra_network_name}") - - for extra_network_name, extra_network in extra_network_registry.items(): - args = extra_network_data.get(extra_network_name, None) - if args is not None: - continue - try: - extra_network.deactivate(p) - except Exception as e: - errors.display(e, f"deactivating unmentioned extra network {extra_network_name}") - - -re_extra_net = re.compile(r"<(\w+):([^>]+)>") - - -def parse_prompt(prompt): - res = defaultdict(list) - - def found(m): - name = m.group(1) - args = m.group(2) - res[name].append(ExtraNetworkParams(items=args.split(":"))) - return "" - prompt = re.sub(re_extra_net, found, prompt) - return prompt, res - - -def parse_prompts(prompts): - res = [] - extra_data = None - - for prompt in prompts: - updated_prompt, parsed_extra_data = parse_prompt(prompt) - if extra_data is None: - extra_data = parsed_extra_data - res.append(updated_prompt) - - return res, extra_data +import re +from collections import defaultdict + +from modules import errors + +extra_network_registry = {} + + +def initialize(): + extra_network_registry.clear() + + +def register_extra_network(extra_network): + extra_network_registry[extra_network.name] = extra_network + + +def register_default_extra_networks(): + from modules.extra_networks_hypernet import ExtraNetworkHypernet + register_extra_network(ExtraNetworkHypernet()) + from modules.ui_extra_networks_styles import ExtraNetworkStyles + register_extra_network(ExtraNetworkStyles()) + + +class ExtraNetworkParams: + def __init__(self, items=None): + self.items = items or [] + self.positional = [] + self.named = {} + for item in self.items: + parts = item.split('=', 2) if isinstance(item, str) else [item] + if len(parts) == 2: + self.named[parts[0]] = parts[1] + else: + self.positional.append(item) + + +class ExtraNetwork: + def __init__(self, name): + self.name = name + + def activate(self, p, params_list): + """ + Called by processing on every run. Whatever the extra network is meant to do should be activated here. Passes arguments related to this extra network in params_list. User passes arguments by specifying this in his prompt: + + Where name matches the name of this ExtraNetwork object, and arg1:arg2:arg3 are any natural number of text arguments separated by colon. + Even if the user does not mention this ExtraNetwork in his prompt, the call will stil be made, with empty params_list - in this case, all effects of this extra networks should be disabled. + Can be called multiple times before deactivate() - each new call should override the previous call completely. + For example, if this ExtraNetwork's name is 'hypernet' and user's prompt is: + > "1girl, " + params_list will be: + [ + ExtraNetworkParams(items=["agm", "1.1"]), + ExtraNetworkParams(items=["ray"]) + ] + """ + raise NotImplementedError + + def deactivate(self, p): + """ + Called at the end of processing for housekeeping. No need to do anything here. + """ + raise NotImplementedError + + +def activate(p, extra_network_data): + """call activate for extra networks in extra_network_data in specified order, then call activate for all remaining registered networks with an empty argument list""" + if extra_network_data is None: + return + for extra_network_name, extra_network_args in extra_network_data.items(): + extra_network = extra_network_registry.get(extra_network_name, None) + if extra_network is None: + print(f"Skipping unknown extra network: {extra_network_name}") + continue + try: + extra_network.activate(p, extra_network_args) + except Exception as e: + errors.display(e, f"activating extra network: name={extra_network_name} args:{extra_network_args}") + + for extra_network_name, extra_network in extra_network_registry.items(): + args = extra_network_data.get(extra_network_name, None) + if args is not None: + continue + try: + extra_network.activate(p, []) + except Exception as e: + errors.display(e, f"activating extra network: name={extra_network_name}") + + +def deactivate(p, extra_network_data): + """call deactivate for extra networks in extra_network_data in specified order, then call deactivate for all remaining registered networks""" + if extra_network_data is None: + return + for extra_network_name in extra_network_data: + extra_network = extra_network_registry.get(extra_network_name, None) + if extra_network is None: + continue + try: + extra_network.deactivate(p) + except Exception as e: + errors.display(e, f"deactivating extra network {extra_network_name}") + + for extra_network_name, extra_network in extra_network_registry.items(): + args = extra_network_data.get(extra_network_name, None) + if args is not None: + continue + try: + extra_network.deactivate(p) + except Exception as e: + errors.display(e, f"deactivating unmentioned extra network {extra_network_name}") + + +re_extra_net = re.compile(r"<(\w+):([^>]+)>") + + +def parse_prompt(prompt): + res = defaultdict(list) + + def found(m): + name = m.group(1) + args = m.group(2) + res[name].append(ExtraNetworkParams(items=args.split(":"))) + return "" + prompt = re.sub(re_extra_net, found, prompt) + return prompt, res + + +def parse_prompts(prompts): + res = [] + extra_data = None + + for prompt in prompts: + updated_prompt, parsed_extra_data = parse_prompt(prompt) + if extra_data is None: + extra_data = parsed_extra_data + res.append(updated_prompt) + + return res, extra_data diff --git a/modules/extra_networks_hypernet.py b/modules/extra_networks_hypernet.py index aa2a14efd..dce11b68a 100644 --- a/modules/extra_networks_hypernet.py +++ b/modules/extra_networks_hypernet.py @@ -1,28 +1,28 @@ -from modules import extra_networks, shared -from modules.hypernetworks import hypernetwork - - -class ExtraNetworkHypernet(extra_networks.ExtraNetwork): - def __init__(self): - super().__init__('hypernet') - - def activate(self, p, params_list): - additional = shared.opts.sd_hypernetwork - - if additional != "None" and additional in shared.hypernetworks and len([x for x in params_list if x.items[0] == additional]) == 0: - hypernet_prompt_text = f"" - p.all_prompts = [f"{prompt}{hypernet_prompt_text}" for prompt in p.all_prompts] - params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) - - names = [] - multipliers = [] - for params in params_list: - assert len(params.items) > 0 - - names.append(params.items[0]) - multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0) - - hypernetwork.load_hypernetworks(names, multipliers) - - def deactivate(self, p): - pass +from modules import extra_networks, shared +from modules.hypernetworks import hypernetwork + + +class ExtraNetworkHypernet(extra_networks.ExtraNetwork): + def __init__(self): + super().__init__('hypernet') + + def activate(self, p, params_list): + additional = shared.opts.sd_hypernetwork + + if additional != "None" and additional in shared.hypernetworks and len([x for x in params_list if x.items[0] == additional]) == 0: + hypernet_prompt_text = f"" + p.all_prompts = [f"{prompt}{hypernet_prompt_text}" for prompt in p.all_prompts] + params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) + + names = [] + multipliers = [] + for params in params_list: + assert len(params.items) > 0 + + names.append(params.items[0]) + multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0) + + hypernetwork.load_hypernetworks(names, multipliers) + + def deactivate(self, p): + pass diff --git a/modules/extras.py b/modules/extras.py index fa3e07378..8e24635ff 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -1,348 +1,348 @@ -import os -import html -import json -import time -import shutil - -import torch -import tqdm -import gradio as gr -import safetensors.torch -from modules.merging.merge import merge_models -from modules.merging.merge_utils import TRIPLE_METHODS - -from modules import shared, images, sd_models, sd_vae, sd_models_config, devices - - -def run_pnginfo(image): - if image is None: - return '', '', '' - geninfo, items = images.read_info_from_image(image) - items = {**{'parameters': geninfo}, **items} - info = '' - for key, text in items.items(): - if key != 'UserComment': - info += f"
{html.escape(str(key))}: {html.escape(str(text))}
" - return '', geninfo, info - - -def create_config(ckpt_result, config_source, a, b, c): - def config(x): - res = sd_models_config.find_checkpoint_config_near_filename(x) if x else None - return res if res != shared.sd_default_config else None - - if config_source == 0: - cfg = config(a) or config(b) or config(c) - elif config_source == 1: - cfg = config(b) - elif config_source == 2: - cfg = config(c) - else: - cfg = None - if cfg is None: - return - filename, _ = os.path.splitext(ckpt_result) - checkpoint_filename = filename + ".yaml" - shared.log.info("Copying config: {cfg} -> {checkpoint_filename}") - shutil.copyfile(cfg, checkpoint_filename) - - -def to_half(tensor, enable): - if enable and tensor.dtype == torch.float: - return tensor.half() - return tensor - - -def run_modelmerger(id_task, **kwargs): # pylint: disable=unused-argument - shared.state.begin('merge') - t0 = time.time() - - def fail(message): - shared.state.textinfo = message - shared.state.end() - return [*[gr.update() for _ in range(4)], message] - - kwargs["models"] = { - "model_a": sd_models.get_closet_checkpoint_match(kwargs.get("primary_model_name", None)).filename, - "model_b": sd_models.get_closet_checkpoint_match(kwargs.get("secondary_model_name", None)).filename, - } - - if kwargs.get("primary_model_name", None) in [None, 'None']: - return fail("Failed: Merging requires a primary model.") - primary_model_info = sd_models.get_closet_checkpoint_match(kwargs.get("primary_model_name", None)) - if kwargs.get("secondary_model_name", None) in [None, 'None']: - return fail("Failed: Merging requires a secondary model.") - secondary_model_info = sd_models.get_closet_checkpoint_match(kwargs.get("secondary_model_name", None)) - if kwargs.get("tertiary_model_name", None) in [None, 'None'] and kwargs.get("merge_mode", None) in TRIPLE_METHODS: - return fail(f"Failed: Interpolation method ({kwargs.get('merge_mode', None)}) requires a tertiary model.") - tertiary_model_info = sd_models.get_closet_checkpoint_match(kwargs.get("tertiary_model_name", None)) if kwargs.get("merge_mode", None) in TRIPLE_METHODS else None - - del kwargs["primary_model_name"] - del kwargs["secondary_model_name"] - if kwargs.get("tertiary_model_name", None) is not None: - kwargs["models"] |= {"model_c": sd_models.get_closet_checkpoint_match(kwargs.get("tertiary_model_name", None)).filename} - del kwargs["tertiary_model_name"] - - if hasattr(kwargs, "alpha_base") and hasattr(kwargs, "alpha_in_blocks") and hasattr(kwargs, "alpha_mid_block") and hasattr(kwargs, "alpha_out_blocks"): - try: - alpha = [float(x) for x in - [kwargs["alpha_base"]] + kwargs["alpha_in_blocks"].split(",") + [kwargs["alpha_mid_block"]] + kwargs["alpha_out_blocks"].split(",")] - assert len(alpha) == 26 or len(alpha) == 20, "Alpha Block Weights are wrong length (26 or 20 for SDXL) falling back" - kwargs["alpha"] = alpha - except KeyError as ke: - shared.log.warning(f"Merge: Malformed manual block weight: {ke}") - elif hasattr(kwargs, "alpha_preset") or hasattr(kwargs, "alpha"): - kwargs["alpha"] = kwargs.get("alpha_preset", kwargs["alpha"]) - - kwargs.pop("alpha_base", None) - kwargs.pop("alpha_in_blocks", None) - kwargs.pop("alpha_mid_block", None) - kwargs.pop("alpha_out_blocks", None) - kwargs.pop("alpha_preset", None) - - if hasattr(kwargs, "beta_base") and hasattr(kwargs, "beta_in_blocks") and hasattr(kwargs, "beta_mid_block") and hasattr(kwargs, "beta_out_blocks"): - try: - beta = [float(x) for x in - [kwargs["beta_base"]] + kwargs["beta_in_blocks"].split(",") + [kwargs["beta_mid_block"]] + kwargs["beta_out_blocks"].split(",")] - assert len(beta) == 26 or len(beta) == 20, "Beta Block Weights are wrong length (26 or 20 for SDXL) falling back" - kwargs["beta"] = beta - except KeyError as ke: - shared.log.warning(f"Merge: Malformed manual block weight: {ke}") - elif hasattr(kwargs, "beta_preset") or hasattr(kwargs, "beta"): - kwargs["beta"] = kwargs.get("beta_preset", kwargs["beta"]) - - kwargs.pop("beta_base", None) - kwargs.pop("beta_in_blocks", None) - kwargs.pop("beta_mid_block", None) - kwargs.pop("beta_out_blocks", None) - kwargs.pop("beta_preset", None) - - if kwargs["device"] == "gpu": - kwargs["device"] = devices.device - elif kwargs["device"] == "shuffle": - kwargs["device"] = torch.device("cpu") - kwargs["work_device"] = devices.device - else: - kwargs["device"] = torch.device("cpu") - if kwargs.pop("unload", False): - sd_models.unload_model_weights() - - try: - theta_0 = merge_models(**kwargs) - except Exception as e: - return fail(f"{e}") - - try: - theta_0 = theta_0.to_dict() #TensorDict -> Dict if necessary - except Exception: - pass - - bake_in_vae_filename = sd_vae.vae_dict.get(kwargs.get("bake_in_vae", None), None) - if bake_in_vae_filename is not None: - shared.log.info(f"Merge VAE='{bake_in_vae_filename}'") - shared.state.textinfo = 'Merge VAE' - vae_dict = sd_vae.load_vae_dict(bake_in_vae_filename) - for key in vae_dict.keys(): - theta_0_key = 'first_stage_model.' + key - if theta_0_key in theta_0: - theta_0[theta_0_key] = to_half(vae_dict[key], kwargs.get("precision", "fp16") == "fp16") - del vae_dict - - ckpt_dir = shared.opts.ckpt_dir or sd_models.model_path - filename = kwargs.get("custom_name", "Unnamed_Merge") - filename += "." + kwargs.get("checkpoint_format", None) - output_modelname = os.path.join(ckpt_dir, filename) - shared.state.textinfo = "merge saving" - metadata = None - if kwargs.get("save_metadata", False): - metadata = {"format": "pt", "sd_merge_models": {}} - merge_recipe = { - "type": "SDNext", # indicate this model was merged with webui's built-in merger - "primary_model_hash": primary_model_info.sha256, - "secondary_model_hash": secondary_model_info.sha256 if secondary_model_info else None, - "tertiary_model_hash": tertiary_model_info.sha256 if tertiary_model_info else None, - "merge_mode": kwargs.get('merge_mode', None), - "alpha": kwargs.get('alpha', None), - "beta": kwargs.get('beta', None), - "precision": kwargs.get('precision', None), - "custom_name": kwargs.get("custom_name", "Unamed_Merge"), - } - metadata["sd_merge_recipe"] = json.dumps(merge_recipe) - - def add_model_metadata(checkpoint_info): - checkpoint_info.calculate_shorthash() - metadata["sd_merge_models"][checkpoint_info.sha256] = { - "name": checkpoint_info.name, - "legacy_hash": checkpoint_info.hash, - "sd_merge_recipe": checkpoint_info.metadata.get("sd_merge_recipe", None) - } - metadata["sd_merge_models"].update(checkpoint_info.metadata.get("sd_merge_models", {})) - - add_model_metadata(primary_model_info) - if secondary_model_info: - add_model_metadata(secondary_model_info) - if tertiary_model_info: - add_model_metadata(tertiary_model_info) - metadata["sd_merge_models"] = json.dumps(metadata["sd_merge_models"]) - - _, extension = os.path.splitext(output_modelname) - - if os.path.exists(output_modelname) and not kwargs.get("overwrite", False): - return [*[gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)], f"Model alredy exists: {output_modelname}"] - if extension.lower() == ".safetensors": - safetensors.torch.save_file(theta_0, output_modelname, metadata=metadata) - else: - torch.save(theta_0, output_modelname) - - t1 = time.time() - shared.log.info(f"Merge complete: saved='{output_modelname}' time={t1-t0:.2f}") - sd_models.list_models() - created_model = next((ckpt for ckpt in sd_models.checkpoints_list.values() if ckpt.name == filename), None) - if created_model: - created_model.calculate_shorthash() - devices.torch_gc(force=True) - shared.state.end() - return [*[gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)], f"Model saved to {output_modelname}"] - - -def run_modelconvert(model, checkpoint_formats, precision, conv_type, custom_name, unet_conv, text_encoder_conv, - vae_conv, others_conv, fix_clip): - # position_ids in clip is int64. model_ema.num_updates is int32 - dtypes_to_fp16 = {torch.float32, torch.float64, torch.bfloat16} - dtypes_to_bf16 = {torch.float32, torch.float64, torch.float16} - - def conv_fp16(t: torch.Tensor): - return t.half() if t.dtype in dtypes_to_fp16 else t - - def conv_bf16(t: torch.Tensor): - return t.bfloat16() if t.dtype in dtypes_to_bf16 else t - - def conv_full(t): - return t - - _g_precision_func = { - "full": conv_full, - "fp32": conv_full, - "fp16": conv_fp16, - "bf16": conv_bf16, - } - - def check_weight_type(k: str) -> str: - if k.startswith("model.diffusion_model"): - return "unet" - elif k.startswith("first_stage_model"): - return "vae" - elif k.startswith("cond_stage_model"): - return "clip" - return "other" - - def load_model(path): - if path.endswith(".safetensors"): - m = safetensors.torch.load_file(path, device="cpu") - else: - m = torch.load(path, map_location="cpu") - state_dict = m["state_dict"] if "state_dict" in m else m - return state_dict - - def fix_model(model, fix_clip=False): - # code from model-toolkit - nai_keys = { - 'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.', - 'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.', - 'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.' - } - for k in list(model.keys()): - for r in nai_keys: - if type(k) == str and k.startswith(r): - new_key = k.replace(r, nai_keys[r]) - model[new_key] = model[k] - del model[k] - shared.log.warning(f"Model convert: fixed NovelAI error key: {k}") - break - if fix_clip: - i = "cond_stage_model.transformer.text_model.embeddings.position_ids" - if i in model: - correct = torch.Tensor([list(range(77))]).to(torch.int64) - now = model[i].to(torch.int64) - - broken = correct.ne(now) - broken = [i for i in range(77) if broken[0][i]] - model[i] = correct - if len(broken) != 0: - shared.log.warning(f"Model convert: fixed broken CLiP: {broken}") - - return model - - if model == "": - return "Error: you must choose a model" - if len(checkpoint_formats) == 0: - return "Error: at least choose one model save format" - - extra_opt = { - "unet": unet_conv, - "clip": text_encoder_conv, - "vae": vae_conv, - "other": others_conv - } - shared.state.begin('convert') - model_info = sd_models.checkpoints_list[model] - shared.state.textinfo = f"Loading {model_info.filename}..." - shared.log.info(f"Model convert loading: {model_info.filename}") - state_dict = load_model(model_info.filename) - - ok = {} # {"state_dict": {}} - - conv_func = _g_precision_func[precision] - - def _hf(wk: str, t: torch.Tensor): - if not isinstance(t, torch.Tensor): - return - w_t = check_weight_type(wk) - conv_t = extra_opt[w_t] - if conv_t == "convert": - ok[wk] = conv_func(t) - elif conv_t == "copy": - ok[wk] = t - elif conv_t == "delete": - return - - shared.log.info("Model convert: running") - if conv_type == "ema-only": - for k in tqdm.tqdm(state_dict): - ema_k = "___" - try: - ema_k = "model_ema." + k[6:].replace(".", "") - except Exception: - pass - if ema_k in state_dict: - _hf(k, state_dict[ema_k]) - elif not k.startswith("model_ema.") or k in ["model_ema.num_updates", "model_ema.decay"]: - _hf(k, state_dict[k]) - elif conv_type == "no-ema": - for k, v in tqdm.tqdm(state_dict.items()): - if "model_ema." not in k: - _hf(k, v) - else: - for k, v in tqdm.tqdm(state_dict.items()): - _hf(k, v) - - ok = fix_model(ok, fix_clip=fix_clip) - output = "" - ckpt_dir = shared.cmd_opts.ckpt_dir or sd_models.model_path - save_name = f"{model_info.model_name}-{precision}" - if conv_type != "disabled": - save_name += f"-{conv_type}" - if custom_name != "": - save_name = custom_name - for fmt in checkpoint_formats: - ext = ".safetensors" if fmt == "safetensors" else ".ckpt" - _save_name = save_name + ext - save_path = os.path.join(ckpt_dir, _save_name) - shared.log.info(f"Model convert saving: {save_path}") - if fmt == "safetensors": - safetensors.torch.save_file(ok, save_path) - else: - torch.save({"state_dict": ok}, save_path) - output += f"Checkpoint saved to {save_path}
" - shared.state.end() - return output +import os +import html +import json +import time +import shutil + +import torch +import tqdm +import gradio as gr +import safetensors.torch +from modules.merging.merge import merge_models +from modules.merging.merge_utils import TRIPLE_METHODS + +from modules import shared, images, sd_models, sd_vae, sd_models_config, devices + + +def run_pnginfo(image): + if image is None: + return '', '', '' + geninfo, items = images.read_info_from_image(image) + items = {**{'parameters': geninfo}, **items} + info = '' + for key, text in items.items(): + if key != 'UserComment': + info += f"
{html.escape(str(key))}: {html.escape(str(text))}
" + return '', geninfo, info + + +def create_config(ckpt_result, config_source, a, b, c): + def config(x): + res = sd_models_config.find_checkpoint_config_near_filename(x) if x else None + return res if res != shared.sd_default_config else None + + if config_source == 0: + cfg = config(a) or config(b) or config(c) + elif config_source == 1: + cfg = config(b) + elif config_source == 2: + cfg = config(c) + else: + cfg = None + if cfg is None: + return + filename, _ = os.path.splitext(ckpt_result) + checkpoint_filename = filename + ".yaml" + shared.log.info("Copying config: {cfg} -> {checkpoint_filename}") + shutil.copyfile(cfg, checkpoint_filename) + + +def to_half(tensor, enable): + if enable and tensor.dtype == torch.float: + return tensor.half() + return tensor + + +def run_modelmerger(id_task, **kwargs): # pylint: disable=unused-argument + shared.state.begin('merge') + t0 = time.time() + + def fail(message): + shared.state.textinfo = message + shared.state.end() + return [*[gr.update() for _ in range(4)], message] + + kwargs["models"] = { + "model_a": sd_models.get_closet_checkpoint_match(kwargs.get("primary_model_name", None)).filename, + "model_b": sd_models.get_closet_checkpoint_match(kwargs.get("secondary_model_name", None)).filename, + } + + if kwargs.get("primary_model_name", None) in [None, 'None']: + return fail("Failed: Merging requires a primary model.") + primary_model_info = sd_models.get_closet_checkpoint_match(kwargs.get("primary_model_name", None)) + if kwargs.get("secondary_model_name", None) in [None, 'None']: + return fail("Failed: Merging requires a secondary model.") + secondary_model_info = sd_models.get_closet_checkpoint_match(kwargs.get("secondary_model_name", None)) + if kwargs.get("tertiary_model_name", None) in [None, 'None'] and kwargs.get("merge_mode", None) in TRIPLE_METHODS: + return fail(f"Failed: Interpolation method ({kwargs.get('merge_mode', None)}) requires a tertiary model.") + tertiary_model_info = sd_models.get_closet_checkpoint_match(kwargs.get("tertiary_model_name", None)) if kwargs.get("merge_mode", None) in TRIPLE_METHODS else None + + del kwargs["primary_model_name"] + del kwargs["secondary_model_name"] + if kwargs.get("tertiary_model_name", None) is not None: + kwargs["models"] |= {"model_c": sd_models.get_closet_checkpoint_match(kwargs.get("tertiary_model_name", None)).filename} + del kwargs["tertiary_model_name"] + + if hasattr(kwargs, "alpha_base") and hasattr(kwargs, "alpha_in_blocks") and hasattr(kwargs, "alpha_mid_block") and hasattr(kwargs, "alpha_out_blocks"): + try: + alpha = [float(x) for x in + [kwargs["alpha_base"]] + kwargs["alpha_in_blocks"].split(",") + [kwargs["alpha_mid_block"]] + kwargs["alpha_out_blocks"].split(",")] + assert len(alpha) == 26 or len(alpha) == 20, "Alpha Block Weights are wrong length (26 or 20 for SDXL) falling back" + kwargs["alpha"] = alpha + except KeyError as ke: + shared.log.warning(f"Merge: Malformed manual block weight: {ke}") + elif hasattr(kwargs, "alpha_preset") or hasattr(kwargs, "alpha"): + kwargs["alpha"] = kwargs.get("alpha_preset", kwargs["alpha"]) + + kwargs.pop("alpha_base", None) + kwargs.pop("alpha_in_blocks", None) + kwargs.pop("alpha_mid_block", None) + kwargs.pop("alpha_out_blocks", None) + kwargs.pop("alpha_preset", None) + + if hasattr(kwargs, "beta_base") and hasattr(kwargs, "beta_in_blocks") and hasattr(kwargs, "beta_mid_block") and hasattr(kwargs, "beta_out_blocks"): + try: + beta = [float(x) for x in + [kwargs["beta_base"]] + kwargs["beta_in_blocks"].split(",") + [kwargs["beta_mid_block"]] + kwargs["beta_out_blocks"].split(",")] + assert len(beta) == 26 or len(beta) == 20, "Beta Block Weights are wrong length (26 or 20 for SDXL) falling back" + kwargs["beta"] = beta + except KeyError as ke: + shared.log.warning(f"Merge: Malformed manual block weight: {ke}") + elif hasattr(kwargs, "beta_preset") or hasattr(kwargs, "beta"): + kwargs["beta"] = kwargs.get("beta_preset", kwargs["beta"]) + + kwargs.pop("beta_base", None) + kwargs.pop("beta_in_blocks", None) + kwargs.pop("beta_mid_block", None) + kwargs.pop("beta_out_blocks", None) + kwargs.pop("beta_preset", None) + + if kwargs["device"] == "gpu": + kwargs["device"] = devices.device + elif kwargs["device"] == "shuffle": + kwargs["device"] = torch.device("cpu") + kwargs["work_device"] = devices.device + else: + kwargs["device"] = torch.device("cpu") + if kwargs.pop("unload", False): + sd_models.unload_model_weights() + + try: + theta_0 = merge_models(**kwargs) + except Exception as e: + return fail(f"{e}") + + try: + theta_0 = theta_0.to_dict() #TensorDict -> Dict if necessary + except Exception: + pass + + bake_in_vae_filename = sd_vae.vae_dict.get(kwargs.get("bake_in_vae", None), None) + if bake_in_vae_filename is not None: + shared.log.info(f"Merge VAE='{bake_in_vae_filename}'") + shared.state.textinfo = 'Merge VAE' + vae_dict = sd_vae.load_vae_dict(bake_in_vae_filename) + for key in vae_dict.keys(): + theta_0_key = 'first_stage_model.' + key + if theta_0_key in theta_0: + theta_0[theta_0_key] = to_half(vae_dict[key], kwargs.get("precision", "fp16") == "fp16") + del vae_dict + + ckpt_dir = shared.opts.ckpt_dir or sd_models.model_path + filename = kwargs.get("custom_name", "Unnamed_Merge") + filename += "." + kwargs.get("checkpoint_format", None) + output_modelname = os.path.join(ckpt_dir, filename) + shared.state.textinfo = "merge saving" + metadata = None + if kwargs.get("save_metadata", False): + metadata = {"format": "pt", "sd_merge_models": {}} + merge_recipe = { + "type": "SDNext", # indicate this model was merged with webui's built-in merger + "primary_model_hash": primary_model_info.sha256, + "secondary_model_hash": secondary_model_info.sha256 if secondary_model_info else None, + "tertiary_model_hash": tertiary_model_info.sha256 if tertiary_model_info else None, + "merge_mode": kwargs.get('merge_mode', None), + "alpha": kwargs.get('alpha', None), + "beta": kwargs.get('beta', None), + "precision": kwargs.get('precision', None), + "custom_name": kwargs.get("custom_name", "Unamed_Merge"), + } + metadata["sd_merge_recipe"] = json.dumps(merge_recipe) + + def add_model_metadata(checkpoint_info): + checkpoint_info.calculate_shorthash() + metadata["sd_merge_models"][checkpoint_info.sha256] = { + "name": checkpoint_info.name, + "legacy_hash": checkpoint_info.hash, + "sd_merge_recipe": checkpoint_info.metadata.get("sd_merge_recipe", None) + } + metadata["sd_merge_models"].update(checkpoint_info.metadata.get("sd_merge_models", {})) + + add_model_metadata(primary_model_info) + if secondary_model_info: + add_model_metadata(secondary_model_info) + if tertiary_model_info: + add_model_metadata(tertiary_model_info) + metadata["sd_merge_models"] = json.dumps(metadata["sd_merge_models"]) + + _, extension = os.path.splitext(output_modelname) + + if os.path.exists(output_modelname) and not kwargs.get("overwrite", False): + return [*[gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)], f"Model alredy exists: {output_modelname}"] + if extension.lower() == ".safetensors": + safetensors.torch.save_file(theta_0, output_modelname, metadata=metadata) + else: + torch.save(theta_0, output_modelname) + + t1 = time.time() + shared.log.info(f"Merge complete: saved='{output_modelname}' time={t1-t0:.2f}") + sd_models.list_models() + created_model = next((ckpt for ckpt in sd_models.checkpoints_list.values() if ckpt.name == filename), None) + if created_model: + created_model.calculate_shorthash() + devices.torch_gc(force=True) + shared.state.end() + return [*[gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)], f"Model saved to {output_modelname}"] + + +def run_modelconvert(model, checkpoint_formats, precision, conv_type, custom_name, unet_conv, text_encoder_conv, + vae_conv, others_conv, fix_clip): + # position_ids in clip is int64. model_ema.num_updates is int32 + dtypes_to_fp16 = {torch.float32, torch.float64, torch.bfloat16} + dtypes_to_bf16 = {torch.float32, torch.float64, torch.float16} + + def conv_fp16(t: torch.Tensor): + return t.half() if t.dtype in dtypes_to_fp16 else t + + def conv_bf16(t: torch.Tensor): + return t.bfloat16() if t.dtype in dtypes_to_bf16 else t + + def conv_full(t): + return t + + _g_precision_func = { + "full": conv_full, + "fp32": conv_full, + "fp16": conv_fp16, + "bf16": conv_bf16, + } + + def check_weight_type(k: str) -> str: + if k.startswith("model.diffusion_model"): + return "unet" + elif k.startswith("first_stage_model"): + return "vae" + elif k.startswith("cond_stage_model"): + return "clip" + return "other" + + def load_model(path): + if path.endswith(".safetensors"): + m = safetensors.torch.load_file(path, device="cpu") + else: + m = torch.load(path, map_location="cpu") + state_dict = m["state_dict"] if "state_dict" in m else m + return state_dict + + def fix_model(model, fix_clip=False): + # code from model-toolkit + nai_keys = { + 'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.', + 'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.', + 'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.' + } + for k in list(model.keys()): + for r in nai_keys: + if type(k) == str and k.startswith(r): + new_key = k.replace(r, nai_keys[r]) + model[new_key] = model[k] + del model[k] + shared.log.warning(f"Model convert: fixed NovelAI error key: {k}") + break + if fix_clip: + i = "cond_stage_model.transformer.text_model.embeddings.position_ids" + if i in model: + correct = torch.Tensor([list(range(77))]).to(torch.int64) + now = model[i].to(torch.int64) + + broken = correct.ne(now) + broken = [i for i in range(77) if broken[0][i]] + model[i] = correct + if len(broken) != 0: + shared.log.warning(f"Model convert: fixed broken CLiP: {broken}") + + return model + + if model == "": + return "Error: you must choose a model" + if len(checkpoint_formats) == 0: + return "Error: at least choose one model save format" + + extra_opt = { + "unet": unet_conv, + "clip": text_encoder_conv, + "vae": vae_conv, + "other": others_conv + } + shared.state.begin('convert') + model_info = sd_models.checkpoints_list[model] + shared.state.textinfo = f"Loading {model_info.filename}..." + shared.log.info(f"Model convert loading: {model_info.filename}") + state_dict = load_model(model_info.filename) + + ok = {} # {"state_dict": {}} + + conv_func = _g_precision_func[precision] + + def _hf(wk: str, t: torch.Tensor): + if not isinstance(t, torch.Tensor): + return + w_t = check_weight_type(wk) + conv_t = extra_opt[w_t] + if conv_t == "convert": + ok[wk] = conv_func(t) + elif conv_t == "copy": + ok[wk] = t + elif conv_t == "delete": + return + + shared.log.info("Model convert: running") + if conv_type == "ema-only": + for k in tqdm.tqdm(state_dict): + ema_k = "___" + try: + ema_k = "model_ema." + k[6:].replace(".", "") + except Exception: + pass + if ema_k in state_dict: + _hf(k, state_dict[ema_k]) + elif not k.startswith("model_ema.") or k in ["model_ema.num_updates", "model_ema.decay"]: + _hf(k, state_dict[k]) + elif conv_type == "no-ema": + for k, v in tqdm.tqdm(state_dict.items()): + if "model_ema." not in k: + _hf(k, v) + else: + for k, v in tqdm.tqdm(state_dict.items()): + _hf(k, v) + + ok = fix_model(ok, fix_clip=fix_clip) + output = "" + ckpt_dir = shared.cmd_opts.ckpt_dir or sd_models.model_path + save_name = f"{model_info.model_name}-{precision}" + if conv_type != "disabled": + save_name += f"-{conv_type}" + if custom_name != "": + save_name = custom_name + for fmt in checkpoint_formats: + ext = ".safetensors" if fmt == "safetensors" else ".ckpt" + _save_name = save_name + ext + save_path = os.path.join(ckpt_dir, _save_name) + shared.log.info(f"Model convert saving: {save_path}") + if fmt == "safetensors": + safetensors.torch.save_file(ok, save_path) + else: + torch.save({"state_dict": ok}, save_path) + output += f"Checkpoint saved to {save_path}
" + shared.state.end() + return output diff --git a/modules/face_restoration.py b/modules/face_restoration.py index 55e1033c6..d7fc5d1e9 100644 --- a/modules/face_restoration.py +++ b/modules/face_restoration.py @@ -1,17 +1,17 @@ -from modules import shared - - -class FaceRestoration: - def name(self): - return "None" - - def restore(self, np_image): - return np_image - - -def restore_faces(np_image): - face_restorers = [x for x in shared.face_restorers if x.name() == shared.opts.face_restoration_model or shared.opts.face_restoration_model is None] - if len(face_restorers) == 0: - return np_image - face_restorer = face_restorers[0] - return face_restorer.restore(np_image) +from modules import shared + + +class FaceRestoration: + def name(self): + return "None" + + def restore(self, np_image): + return np_image + + +def restore_faces(np_image): + face_restorers = [x for x in shared.face_restorers if x.name() == shared.opts.face_restoration_model or shared.opts.face_restoration_model is None] + if len(face_restorers) == 0: + return np_image + face_restorer = face_restorers[0] + return face_restorer.restore(np_image) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 9c5e50063..6ee979a7c 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,383 +1,383 @@ -import base64 -import io -import os -import re -import json -from PIL import Image -import gradio as gr -from modules.paths import data_path -from modules import shared, ui_tempdir, script_callbacks, images - - -re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' -re_param = re.compile(re_param_code) -re_imagesize = re.compile(r"^(\d+)x(\d+)$") -re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$") # pylint: disable=anomalous-backslash-in-string -type_of_gr_update = type(gr.update()) -paste_fields = {} -registered_param_bindings = [] -debug = shared.log.trace if os.environ.get('SD_PASTE_DEBUG', None) is not None else lambda *args, **kwargs: None -debug('Trace: PASTE') - - -class ParamBinding: - def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None): - self.paste_button = paste_button - self.tabname = tabname - self.source_text_component = source_text_component - self.source_image_component = source_image_component - self.source_tabname = source_tabname - self.override_settings_component = override_settings_component - self.paste_field_names = paste_field_names or [] - debug(f'ParamBinding: {vars(self)}') - - -def reset(): - paste_fields.clear() - - -def quote(text): - if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text): - return text - return json.dumps(text, ensure_ascii=False) - - -def unquote(text): - if len(text) == 0 or text[0] != '"' or text[-1] != '"': - return text - try: - return json.loads(text) - except Exception: - return text - - -def image_from_url_text(filedata): - if filedata is None: - return None - if type(filedata) == list and len(filedata) > 0 and type(filedata[0]) == dict and filedata[0].get("is_file", False): - filedata = filedata[0] - if type(filedata) == dict and filedata.get("is_file", False): - filename = filedata["name"] - is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename) - if is_in_right_dir: - filename = filename.rsplit('?', 1)[0] - if not os.path.exists(filename): - shared.log.error(f'Image file not found: {filename}') - image = Image.new('RGB', (512, 512)) - image.info['parameters'] = f'Image file not found: {filename}' - return image - image = Image.open(filename) - geninfo, _items = images.read_info_from_image(image) - image.info['parameters'] = geninfo - return image - else: - shared.log.warning(f'File access denied: {filename}') - return None - if type(filedata) == list: - if len(filedata) == 0: - return None - filedata = filedata[0] - if type(filedata) == dict: - shared.log.warning('Incorrect filedata received') - return None - if filedata.startswith("data:image/png;base64,"): - filedata = filedata[len("data:image/png;base64,"):] - if filedata.startswith("data:image/webp;base64,"): - filedata = filedata[len("data:image/webp;base64,"):] - if filedata.startswith("data:image/jpeg;base64,"): - filedata = filedata[len("data:image/jpeg;base64,"):] - filedata = base64.decodebytes(filedata.encode('utf-8')) - image = Image.open(io.BytesIO(filedata)) - images.read_info_from_image(image) - return image - - -def add_paste_fields(tabname, init_img, fields, override_settings_component=None): - paste_fields[tabname] = {"init_img": init_img, "fields": fields, "override_settings_component": override_settings_component} - # backwards compatibility for existing extensions - import modules.ui - if tabname == 'txt2img': - modules.ui.txt2img_paste_fields = fields - elif tabname == 'img2img': - modules.ui.img2img_paste_fields = fields - - -def create_buttons(tabs_list): - buttons = {} - for tab in tabs_list: - name = tab - if name == 'txt2img': - name = 'Text' - elif name == 'img2img': - name = 'Image' - elif name == 'inpaint': - name = 'Inpaint' - elif name == 'extras': - name = 'Process' - elif name == 'control': - name = 'Control' - buttons[tab] = gr.Button(f"➠ {name}", elem_id=f"{tab}_tab") - return buttons - - -def bind_buttons(buttons, send_image, send_generate_info): - """old function for backwards compatibility; do not use this, use register_paste_params_button""" - for tabname, button in buttons.items(): - source_text_component = send_generate_info if isinstance(send_generate_info, gr.components.Component) else None - source_tabname = send_generate_info if isinstance(send_generate_info, str) else None - bindings = ParamBinding(paste_button=button, tabname=tabname, source_text_component=source_text_component, source_image_component=send_image, source_tabname=source_tabname) - register_paste_params_button(bindings) - - -def register_paste_params_button(binding: ParamBinding): - registered_param_bindings.append(binding) - - -def connect_paste_params_buttons(): - binding: ParamBinding - for binding in registered_param_bindings: - if binding.tabname not in paste_fields: - debug(f"Not not registered: tab={binding.tabname}") - continue - destination_image_component = paste_fields[binding.tabname]["init_img"] - fields = paste_fields[binding.tabname]["fields"] - override_settings_component = binding.override_settings_component or paste_fields[binding.tabname]["override_settings_component"] - destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None) - destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None) - - if binding.source_image_component and destination_image_component: - if isinstance(binding.source_image_component, gr.Gallery): - func = send_image_and_dimensions if destination_width_component else image_from_url_text - jsfunc = "extract_image_from_gallery" - else: - func = send_image_and_dimensions if destination_width_component else lambda x: x - jsfunc = None - binding.paste_button.click( - fn=func, - _js=jsfunc, - inputs=[binding.source_image_component], - outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component], - show_progress=False, - ) - if binding.source_text_component is not None and fields is not None: - connect_paste(binding.paste_button, fields, binding.source_text_component, override_settings_component, binding.tabname) - if binding.source_tabname is not None and fields is not None: - paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else []) + binding.paste_field_names - binding.paste_button.click( - fn=lambda *x: x, - inputs=[field for field, name in paste_fields[binding.source_tabname]["fields"] if name in paste_field_names], - outputs=[field for field, name in fields if name in paste_field_names], - ) - binding.paste_button.click( - fn=None, - _js=f"switch_to_{binding.tabname}", - inputs=[], - outputs=[], - show_progress=False, - ) - - -def send_image_and_dimensions(x): - img = x if isinstance(x, Image.Image) else image_from_url_text(x) - if shared.opts.send_size and isinstance(img, Image.Image): - w = img.width - h = img.height - else: - w = gr.update() - h = gr.update() - return img, w, h - - -def find_hypernetwork_key(hypernet_name, hypernet_hash=None): - """Determines the config parameter name to use for the hypernet based on the parameters in the infotext. - Example: an infotext provides "Hypernet: ke-ta" and "Hypernet hash: 1234abcd". For the "Hypernet" config - parameter this means there should be an entry that looks like "ke-ta-10000(1234abcd)" to set it to. - If the infotext has no hash, then a hypernet with the same name will be selected instead. - """ - hypernet_name = hypernet_name.lower() - if hypernet_hash is not None: - # Try to match the hash in the name - for hypernet_key in shared.hypernetworks.keys(): - result = re_hypernet_hash.search(hypernet_key) - if result is not None and result[1] == hypernet_hash: - return hypernet_key - else: - # Fall back to a hypernet with the same name - for hypernet_key in shared.hypernetworks.keys(): - if hypernet_key.lower().startswith(hypernet_name): - return hypernet_key - - return None - - -def parse_generation_parameters(x: str): - res = {} - if x is None: - return res - remaining = x.replace('\n', ' ').strip() - if len(remaining) == 0: - return res - remaining = x[7:] if x.startswith('Prompt: ') else x - remaining = x[11:] if x.startswith('parameters: ') else x - if 'Steps: ' in remaining and 'Negative prompt: ' not in remaining: - remaining = remaining.replace('Steps: ', 'Negative prompt: Steps: ') - prompt, remaining = remaining.strip().split('Negative prompt: ', maxsplit=1) if 'Negative prompt: ' in remaining else (remaining, '') - res["Prompt"] = prompt.strip() - negative, remaining = remaining.strip().split('Steps: ', maxsplit=1) if 'Steps: ' in remaining else (remaining, None) - res["Negative prompt"] = negative.strip() - if remaining is None: - return res - remaining = f'Steps: {remaining}' - for k, v in re_param.findall(remaining.strip()): - try: - if v[0] == '"' and v[-1] == '"': - v = unquote(v) - m = re_imagesize.match(v) - if m is not None: - res[f"{k}-1"] = m.group(1) - res[f"{k}-2"] = m.group(2) - else: - res[k] = v - except Exception: - pass - if res.get('VAE', None) == 'TAESD': - res["Full quality"] = False - debug(f"Parse prompt: {res}") - return res - - -settings_map = {} - - -infotext_to_setting_name_mapping = [ - ('Backend', 'sd_backend'), - ('Model hash', 'sd_model_checkpoint'), - ('Refiner', 'sd_model_refiner'), - ('VAE', 'sd_vae'), - ('Parser', 'prompt_attention'), - ('Color correction', 'img2img_color_correction'), - # Samplers - ('Sampler Eta', 'scheduler_eta'), - ('Sampler ENSD', 'eta_noise_seed_delta'), - ('Sampler order', 'schedulers_solver_order'), - # Samplers diffusers - ('Sampler beta schedule', 'schedulers_beta_schedule'), - ('Sampler beta start', 'schedulers_beta_start'), - ('Sampler beta end', 'schedulers_beta_end'), - ('Sampler DPM solver', 'schedulers_dpm_solver'), - # Samplers original - ('Sampler brownian', 'schedulers_brownian_noise'), - ('Sampler discard', 'schedulers_discard_penultimate'), - ('Sampler dyn threshold', 'schedulers_use_thresholding'), - ('Sampler karras', 'schedulers_use_karras'), - ('Sampler low order', 'schedulers_use_loworder'), - ('Sampler quantization', 'enable_quantization'), - ('Sampler sigma', 'schedulers_sigma'), - ('Sampler sigma min', 's_min'), - ('Sampler sigma max', 's_max'), - ('Sampler sigma churn', 's_churn'), - ('Sampler sigma uncond', 's_min_uncond'), - ('Sampler sigma noise', 's_noise'), - ('Sampler sigma tmin', 's_tmin'), - ('Sampler ENSM', 'initial_noise_multiplier'), # img2img only - ('UniPC skip type', 'uni_pc_skip_type'), - ('UniPC variant', 'uni_pc_variant'), - # Token Merging - ('Mask weight', 'inpainting_mask_weight'), - ('Token merging ratio', 'token_merging_ratio'), - ('ToMe', 'token_merging_ratio'), - ('ToMe hires', 'token_merging_ratio_hr'), - ('ToMe img2img', 'token_merging_ratio_img2img'), -] - - -def create_override_settings_dict(text_pairs): - res = {} - params = {} - for pair in text_pairs: - k, v = pair.split(":", maxsplit=1) - params[k] = v.strip() - for param_name, setting_name in infotext_to_setting_name_mapping: - value = params.get(param_name, None) - if value is None: - continue - res[setting_name] = shared.opts.cast_value(setting_name, value) - return res - - -def connect_paste(button, local_paste_fields, input_comp, override_settings_component, tabname): - - def paste_func(prompt): - if prompt is None or len(prompt.strip()) == 0 and not shared.cmd_opts.hide_ui_dir_config: - filename = os.path.join(data_path, "params.txt") - if os.path.exists(filename): - with open(filename, "r", encoding="utf8") as file: - prompt = file.read() - shared.log.debug(f'Paste prompt: type="params" prompt="{prompt}"') - else: - prompt = '' - else: - shared.log.debug(f'Paste prompt: type="current" prompt="{prompt}"') - params = parse_generation_parameters(prompt) - script_callbacks.infotext_pasted_callback(prompt, params) - res = [] - applied = {} - for output, key in local_paste_fields: - if callable(key): - v = key(params) - else: - v = params.get(key, None) - if v is None: - res.append(gr.update()) - elif isinstance(v, type_of_gr_update): - res.append(v) - applied[key] = v - else: - try: - valtype = type(output.value) - if valtype == bool and v == "False": - val = False - else: - val = valtype(v) - res.append(gr.update(value=val)) - applied[key] = val - except Exception: - res.append(gr.update()) - debug(f"Parse apply: {applied}") - return res - - if override_settings_component is not None: - def paste_settings(params): - vals = {} - for param_name, setting_name in infotext_to_setting_name_mapping: - v = params.get(param_name, None) - if v is None: - continue - if shared.opts.disable_weights_auto_swap: - if setting_name == "sd_model_checkpoint" or setting_name == 'sd_model_refiner' or setting_name == 'sd_backend' or setting_name == 'sd_vae': - continue - v = shared.opts.cast_value(setting_name, v) - current_value = getattr(shared.opts, setting_name, None) - if v == current_value: - continue - if type(current_value) == str and v == os.path.splitext(current_value)[0]: - continue - vals[param_name] = v - vals_pairs = [f"{k}: {v}" for k, v in vals.items()] - shared.log.debug(f'Settings overrides: {vals_pairs}') - return gr.Dropdown.update(value=vals_pairs, choices=vals_pairs, visible=len(vals_pairs) > 0) - local_paste_fields = local_paste_fields + [(override_settings_component, paste_settings)] - - button.click( - fn=paste_func, - inputs=[input_comp], - outputs=[x[0] for x in local_paste_fields], - show_progress=False, - ) - button.click( - fn=None, - _js=f"recalculate_prompts_{tabname}", - inputs=[], - outputs=[], - show_progress=False, - ) +import base64 +import io +import os +import re +import json +from PIL import Image +import gradio as gr +from modules.paths import data_path +from modules import shared, ui_tempdir, script_callbacks, images + + +re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' +re_param = re.compile(re_param_code) +re_imagesize = re.compile(r"^(\d+)x(\d+)$") +re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$") # pylint: disable=anomalous-backslash-in-string +type_of_gr_update = type(gr.update()) +paste_fields = {} +registered_param_bindings = [] +debug = shared.log.trace if os.environ.get('SD_PASTE_DEBUG', None) is not None else lambda *args, **kwargs: None +debug('Trace: PASTE') + + +class ParamBinding: + def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None): + self.paste_button = paste_button + self.tabname = tabname + self.source_text_component = source_text_component + self.source_image_component = source_image_component + self.source_tabname = source_tabname + self.override_settings_component = override_settings_component + self.paste_field_names = paste_field_names or [] + debug(f'ParamBinding: {vars(self)}') + + +def reset(): + paste_fields.clear() + + +def quote(text): + if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text): + return text + return json.dumps(text, ensure_ascii=False) + + +def unquote(text): + if len(text) == 0 or text[0] != '"' or text[-1] != '"': + return text + try: + return json.loads(text) + except Exception: + return text + + +def image_from_url_text(filedata): + if filedata is None: + return None + if type(filedata) == list and len(filedata) > 0 and type(filedata[0]) == dict and filedata[0].get("is_file", False): + filedata = filedata[0] + if type(filedata) == dict and filedata.get("is_file", False): + filename = filedata["name"] + is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename) + if is_in_right_dir: + filename = filename.rsplit('?', 1)[0] + if not os.path.exists(filename): + shared.log.error(f'Image file not found: {filename}') + image = Image.new('RGB', (512, 512)) + image.info['parameters'] = f'Image file not found: {filename}' + return image + image = Image.open(filename) + geninfo, _items = images.read_info_from_image(image) + image.info['parameters'] = geninfo + return image + else: + shared.log.warning(f'File access denied: {filename}') + return None + if type(filedata) == list: + if len(filedata) == 0: + return None + filedata = filedata[0] + if type(filedata) == dict: + shared.log.warning('Incorrect filedata received') + return None + if filedata.startswith("data:image/png;base64,"): + filedata = filedata[len("data:image/png;base64,"):] + if filedata.startswith("data:image/webp;base64,"): + filedata = filedata[len("data:image/webp;base64,"):] + if filedata.startswith("data:image/jpeg;base64,"): + filedata = filedata[len("data:image/jpeg;base64,"):] + filedata = base64.decodebytes(filedata.encode('utf-8')) + image = Image.open(io.BytesIO(filedata)) + images.read_info_from_image(image) + return image + + +def add_paste_fields(tabname, init_img, fields, override_settings_component=None): + paste_fields[tabname] = {"init_img": init_img, "fields": fields, "override_settings_component": override_settings_component} + # backwards compatibility for existing extensions + import modules.ui + if tabname == 'txt2img': + modules.ui.txt2img_paste_fields = fields + elif tabname == 'img2img': + modules.ui.img2img_paste_fields = fields + + +def create_buttons(tabs_list): + buttons = {} + for tab in tabs_list: + name = tab + if name == 'txt2img': + name = 'Text' + elif name == 'img2img': + name = 'Image' + elif name == 'inpaint': + name = 'Inpaint' + elif name == 'extras': + name = 'Process' + elif name == 'control': + name = 'Control' + buttons[tab] = gr.Button(f"➠ {name}", elem_id=f"{tab}_tab") + return buttons + + +def bind_buttons(buttons, send_image, send_generate_info): + """old function for backwards compatibility; do not use this, use register_paste_params_button""" + for tabname, button in buttons.items(): + source_text_component = send_generate_info if isinstance(send_generate_info, gr.components.Component) else None + source_tabname = send_generate_info if isinstance(send_generate_info, str) else None + bindings = ParamBinding(paste_button=button, tabname=tabname, source_text_component=source_text_component, source_image_component=send_image, source_tabname=source_tabname) + register_paste_params_button(bindings) + + +def register_paste_params_button(binding: ParamBinding): + registered_param_bindings.append(binding) + + +def connect_paste_params_buttons(): + binding: ParamBinding + for binding in registered_param_bindings: + if binding.tabname not in paste_fields: + debug(f"Not not registered: tab={binding.tabname}") + continue + destination_image_component = paste_fields[binding.tabname]["init_img"] + fields = paste_fields[binding.tabname]["fields"] + override_settings_component = binding.override_settings_component or paste_fields[binding.tabname]["override_settings_component"] + destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None) + destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None) + + if binding.source_image_component and destination_image_component: + if isinstance(binding.source_image_component, gr.Gallery): + func = send_image_and_dimensions if destination_width_component else image_from_url_text + jsfunc = "extract_image_from_gallery" + else: + func = send_image_and_dimensions if destination_width_component else lambda x: x + jsfunc = None + binding.paste_button.click( + fn=func, + _js=jsfunc, + inputs=[binding.source_image_component], + outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component], + show_progress=False, + ) + if binding.source_text_component is not None and fields is not None: + connect_paste(binding.paste_button, fields, binding.source_text_component, override_settings_component, binding.tabname) + if binding.source_tabname is not None and fields is not None: + paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else []) + binding.paste_field_names + binding.paste_button.click( + fn=lambda *x: x, + inputs=[field for field, name in paste_fields[binding.source_tabname]["fields"] if name in paste_field_names], + outputs=[field for field, name in fields if name in paste_field_names], + ) + binding.paste_button.click( + fn=None, + _js=f"switch_to_{binding.tabname}", + inputs=[], + outputs=[], + show_progress=False, + ) + + +def send_image_and_dimensions(x): + img = x if isinstance(x, Image.Image) else image_from_url_text(x) + if shared.opts.send_size and isinstance(img, Image.Image): + w = img.width + h = img.height + else: + w = gr.update() + h = gr.update() + return img, w, h + + +def find_hypernetwork_key(hypernet_name, hypernet_hash=None): + """Determines the config parameter name to use for the hypernet based on the parameters in the infotext. + Example: an infotext provides "Hypernet: ke-ta" and "Hypernet hash: 1234abcd". For the "Hypernet" config + parameter this means there should be an entry that looks like "ke-ta-10000(1234abcd)" to set it to. + If the infotext has no hash, then a hypernet with the same name will be selected instead. + """ + hypernet_name = hypernet_name.lower() + if hypernet_hash is not None: + # Try to match the hash in the name + for hypernet_key in shared.hypernetworks.keys(): + result = re_hypernet_hash.search(hypernet_key) + if result is not None and result[1] == hypernet_hash: + return hypernet_key + else: + # Fall back to a hypernet with the same name + for hypernet_key in shared.hypernetworks.keys(): + if hypernet_key.lower().startswith(hypernet_name): + return hypernet_key + + return None + + +def parse_generation_parameters(x: str): + res = {} + if x is None: + return res + remaining = x.replace('\n', ' ').strip() + if len(remaining) == 0: + return res + remaining = x[7:] if x.startswith('Prompt: ') else x + remaining = x[11:] if x.startswith('parameters: ') else x + if 'Steps: ' in remaining and 'Negative prompt: ' not in remaining: + remaining = remaining.replace('Steps: ', 'Negative prompt: Steps: ') + prompt, remaining = remaining.strip().split('Negative prompt: ', maxsplit=1) if 'Negative prompt: ' in remaining else (remaining, '') + res["Prompt"] = prompt.strip() + negative, remaining = remaining.strip().split('Steps: ', maxsplit=1) if 'Steps: ' in remaining else (remaining, None) + res["Negative prompt"] = negative.strip() + if remaining is None: + return res + remaining = f'Steps: {remaining}' + for k, v in re_param.findall(remaining.strip()): + try: + if v[0] == '"' and v[-1] == '"': + v = unquote(v) + m = re_imagesize.match(v) + if m is not None: + res[f"{k}-1"] = m.group(1) + res[f"{k}-2"] = m.group(2) + else: + res[k] = v + except Exception: + pass + if res.get('VAE', None) == 'TAESD': + res["Full quality"] = False + debug(f"Parse prompt: {res}") + return res + + +settings_map = {} + + +infotext_to_setting_name_mapping = [ + ('Backend', 'sd_backend'), + ('Model hash', 'sd_model_checkpoint'), + ('Refiner', 'sd_model_refiner'), + ('VAE', 'sd_vae'), + ('Parser', 'prompt_attention'), + ('Color correction', 'img2img_color_correction'), + # Samplers + ('Sampler Eta', 'scheduler_eta'), + ('Sampler ENSD', 'eta_noise_seed_delta'), + ('Sampler order', 'schedulers_solver_order'), + # Samplers diffusers + ('Sampler beta schedule', 'schedulers_beta_schedule'), + ('Sampler beta start', 'schedulers_beta_start'), + ('Sampler beta end', 'schedulers_beta_end'), + ('Sampler DPM solver', 'schedulers_dpm_solver'), + # Samplers original + ('Sampler brownian', 'schedulers_brownian_noise'), + ('Sampler discard', 'schedulers_discard_penultimate'), + ('Sampler dyn threshold', 'schedulers_use_thresholding'), + ('Sampler karras', 'schedulers_use_karras'), + ('Sampler low order', 'schedulers_use_loworder'), + ('Sampler quantization', 'enable_quantization'), + ('Sampler sigma', 'schedulers_sigma'), + ('Sampler sigma min', 's_min'), + ('Sampler sigma max', 's_max'), + ('Sampler sigma churn', 's_churn'), + ('Sampler sigma uncond', 's_min_uncond'), + ('Sampler sigma noise', 's_noise'), + ('Sampler sigma tmin', 's_tmin'), + ('Sampler ENSM', 'initial_noise_multiplier'), # img2img only + ('UniPC skip type', 'uni_pc_skip_type'), + ('UniPC variant', 'uni_pc_variant'), + # Token Merging + ('Mask weight', 'inpainting_mask_weight'), + ('Token merging ratio', 'token_merging_ratio'), + ('ToMe', 'token_merging_ratio'), + ('ToMe hires', 'token_merging_ratio_hr'), + ('ToMe img2img', 'token_merging_ratio_img2img'), +] + + +def create_override_settings_dict(text_pairs): + res = {} + params = {} + for pair in text_pairs: + k, v = pair.split(":", maxsplit=1) + params[k] = v.strip() + for param_name, setting_name in infotext_to_setting_name_mapping: + value = params.get(param_name, None) + if value is None: + continue + res[setting_name] = shared.opts.cast_value(setting_name, value) + return res + + +def connect_paste(button, local_paste_fields, input_comp, override_settings_component, tabname): + + def paste_func(prompt): + if prompt is None or len(prompt.strip()) == 0 and not shared.cmd_opts.hide_ui_dir_config: + filename = os.path.join(data_path, "params.txt") + if os.path.exists(filename): + with open(filename, "r", encoding="utf8") as file: + prompt = file.read() + shared.log.debug(f'Paste prompt: type="params" prompt="{prompt}"') + else: + prompt = '' + else: + shared.log.debug(f'Paste prompt: type="current" prompt="{prompt}"') + params = parse_generation_parameters(prompt) + script_callbacks.infotext_pasted_callback(prompt, params) + res = [] + applied = {} + for output, key in local_paste_fields: + if callable(key): + v = key(params) + else: + v = params.get(key, None) + if v is None: + res.append(gr.update()) + elif isinstance(v, type_of_gr_update): + res.append(v) + applied[key] = v + else: + try: + valtype = type(output.value) + if valtype == bool and v == "False": + val = False + else: + val = valtype(v) + res.append(gr.update(value=val)) + applied[key] = val + except Exception: + res.append(gr.update()) + debug(f"Parse apply: {applied}") + return res + + if override_settings_component is not None: + def paste_settings(params): + vals = {} + for param_name, setting_name in infotext_to_setting_name_mapping: + v = params.get(param_name, None) + if v is None: + continue + if shared.opts.disable_weights_auto_swap: + if setting_name == "sd_model_checkpoint" or setting_name == 'sd_model_refiner' or setting_name == 'sd_backend' or setting_name == 'sd_vae': + continue + v = shared.opts.cast_value(setting_name, v) + current_value = getattr(shared.opts, setting_name, None) + if v == current_value: + continue + if type(current_value) == str and v == os.path.splitext(current_value)[0]: + continue + vals[param_name] = v + vals_pairs = [f"{k}: {v}" for k, v in vals.items()] + shared.log.debug(f'Settings overrides: {vals_pairs}') + return gr.Dropdown.update(value=vals_pairs, choices=vals_pairs, visible=len(vals_pairs) > 0) + local_paste_fields = local_paste_fields + [(override_settings_component, paste_settings)] + + button.click( + fn=paste_func, + inputs=[input_comp], + outputs=[x[0] for x in local_paste_fields], + show_progress=False, + ) + button.click( + fn=None, + _js=f"recalculate_prompts_{tabname}", + inputs=[], + outputs=[], + show_progress=False, + ) diff --git a/modules/hashes.py b/modules/hashes.py index 892368d3c..607140fe6 100644 --- a/modules/hashes.py +++ b/modules/hashes.py @@ -1,107 +1,107 @@ -import copy -import hashlib -import os.path -from rich import progress, errors -from modules import shared -from modules.paths import data_path - -cache_filename = os.path.join(data_path, "cache.json") -cache_data = None -progress_ok = True - -def dump_cache(): - shared.writefile(cache_data, cache_filename) - - -def cache(subsection): - global cache_data # pylint: disable=global-statement - if cache_data is None: - cache_data = {} if not os.path.isfile(cache_filename) else shared.readfile(cache_filename, lock=True) - s = cache_data.get(subsection, {}) - cache_data[subsection] = s - return s - - -def calculate_sha256(filename, quiet=False): - global progress_ok # pylint: disable=global-statement - hash_sha256 = hashlib.sha256() - blksize = 1024 * 1024 - if not quiet: - if progress_ok: - try: - with progress.open(filename, 'rb', description=f'[cyan]Calculating hash: [yellow]{filename}', auto_refresh=True, console=shared.console) as f: - for chunk in iter(lambda: f.read(blksize), b""): - hash_sha256.update(chunk) - except errors.LiveError: - shared.log.warning('Hash: attempting to use function in a thread') - progress_ok = False - if not progress_ok: - with open(filename, 'rb') as f: - for chunk in iter(lambda: f.read(blksize), b""): - hash_sha256.update(chunk) - else: - with open(filename, 'rb') as f: - for chunk in iter(lambda: f.read(blksize), b""): - hash_sha256.update(chunk) - return hash_sha256.hexdigest() - - -def sha256_from_cache(filename, title, use_addnet_hash=False): - hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes") - if title not in hashes: - return None - cached_sha256 = hashes[title].get("sha256", None) - cached_mtime = hashes[title].get("mtime", 0) - ondisk_mtime = os.path.getmtime(filename) if os.path.isfile(filename) else 0 - if ondisk_mtime > cached_mtime or cached_sha256 is None: - return None - return cached_sha256 - - -def sha256(filename, title, use_addnet_hash=False): - global progress_ok # pylint: disable=global-statement - hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes") - sha256_value = sha256_from_cache(filename, title, use_addnet_hash) - if sha256_value is not None: - return sha256_value - if shared.cmd_opts.no_hashing: - return None - if not os.path.isfile(filename): - return None - orig_state = copy.deepcopy(shared.state) - shared.state.begin("hash") - if use_addnet_hash: - if progress_ok: - try: - with progress.open(filename, 'rb', description=f'[cyan]Calculating hash: [yellow]{filename}', auto_refresh=True, console=shared.console) as f: - sha256_value = addnet_hash_safetensors(f) - except errors.LiveError: - shared.log.warning('Hash: attempting to use function in a thread') - progress_ok = False - if not progress_ok: - with open(filename, 'rb') as f: - sha256_value = addnet_hash_safetensors(f) - else: - sha256_value = calculate_sha256(filename) - hashes[title] = { - "mtime": os.path.getmtime(filename), - "sha256": sha256_value - } - shared.state.end() - shared.state = orig_state - dump_cache() - return sha256_value - - -def addnet_hash_safetensors(b): - """kohya-ss hash for safetensors from https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py""" - hash_sha256 = hashlib.sha256() - blksize = 1024 * 1024 - b.seek(0) - header = b.read(8) - n = int.from_bytes(header, "little") - offset = n + 8 - b.seek(offset) - for chunk in iter(lambda: b.read(blksize), b""): - hash_sha256.update(chunk) - return hash_sha256.hexdigest() +import copy +import hashlib +import os.path +from rich import progress, errors +from modules import shared +from modules.paths import data_path + +cache_filename = os.path.join(data_path, "cache.json") +cache_data = None +progress_ok = True + +def dump_cache(): + shared.writefile(cache_data, cache_filename) + + +def cache(subsection): + global cache_data # pylint: disable=global-statement + if cache_data is None: + cache_data = {} if not os.path.isfile(cache_filename) else shared.readfile(cache_filename, lock=True) + s = cache_data.get(subsection, {}) + cache_data[subsection] = s + return s + + +def calculate_sha256(filename, quiet=False): + global progress_ok # pylint: disable=global-statement + hash_sha256 = hashlib.sha256() + blksize = 1024 * 1024 + if not quiet: + if progress_ok: + try: + with progress.open(filename, 'rb', description=f'[cyan]Calculating hash: [yellow]{filename}', auto_refresh=True, console=shared.console) as f: + for chunk in iter(lambda: f.read(blksize), b""): + hash_sha256.update(chunk) + except errors.LiveError: + shared.log.warning('Hash: attempting to use function in a thread') + progress_ok = False + if not progress_ok: + with open(filename, 'rb') as f: + for chunk in iter(lambda: f.read(blksize), b""): + hash_sha256.update(chunk) + else: + with open(filename, 'rb') as f: + for chunk in iter(lambda: f.read(blksize), b""): + hash_sha256.update(chunk) + return hash_sha256.hexdigest() + + +def sha256_from_cache(filename, title, use_addnet_hash=False): + hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes") + if title not in hashes: + return None + cached_sha256 = hashes[title].get("sha256", None) + cached_mtime = hashes[title].get("mtime", 0) + ondisk_mtime = os.path.getmtime(filename) if os.path.isfile(filename) else 0 + if ondisk_mtime > cached_mtime or cached_sha256 is None: + return None + return cached_sha256 + + +def sha256(filename, title, use_addnet_hash=False): + global progress_ok # pylint: disable=global-statement + hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes") + sha256_value = sha256_from_cache(filename, title, use_addnet_hash) + if sha256_value is not None: + return sha256_value + if shared.cmd_opts.no_hashing: + return None + if not os.path.isfile(filename): + return None + orig_state = copy.deepcopy(shared.state) + shared.state.begin("hash") + if use_addnet_hash: + if progress_ok: + try: + with progress.open(filename, 'rb', description=f'[cyan]Calculating hash: [yellow]{filename}', auto_refresh=True, console=shared.console) as f: + sha256_value = addnet_hash_safetensors(f) + except errors.LiveError: + shared.log.warning('Hash: attempting to use function in a thread') + progress_ok = False + if not progress_ok: + with open(filename, 'rb') as f: + sha256_value = addnet_hash_safetensors(f) + else: + sha256_value = calculate_sha256(filename) + hashes[title] = { + "mtime": os.path.getmtime(filename), + "sha256": sha256_value + } + shared.state.end() + shared.state = orig_state + dump_cache() + return sha256_value + + +def addnet_hash_safetensors(b): + """kohya-ss hash for safetensors from https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py""" + hash_sha256 = hashlib.sha256() + blksize = 1024 * 1024 + b.seek(0) + header = b.read(8) + n = int.from_bytes(header, "little") + offset = n + 8 + b.seek(offset) + for chunk in iter(lambda: b.read(blksize), b""): + hash_sha256.update(chunk) + return hash_sha256.hexdigest() diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 54f82c36a..5afa572be 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -1,755 +1,755 @@ -import datetime -import html -import os -from collections import deque -import inspect -from statistics import stdev, mean -from rich import progress -import tqdm -import torch -from torch import einsum -from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_ -from einops import rearrange, repeat -from ldm.util import default -from modules import devices, processing, sd_models, shared, hashes, errors -import modules.textual_inversion.dataset -from modules.textual_inversion import textual_inversion, ti_logging -from modules.textual_inversion.learn_schedule import LearnRateScheduler - - -optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"} - -class HypernetworkModule(torch.nn.Module): - activation_dict = { - "linear": torch.nn.Identity, - "relu": torch.nn.ReLU, - "leakyrelu": torch.nn.LeakyReLU, - "elu": torch.nn.ELU, - "swish": torch.nn.Hardswish, - "tanh": torch.nn.Tanh, - "sigmoid": torch.nn.Sigmoid, - } - activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'}) - - def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', - add_layer_norm=False, activate_output=False, dropout_structure=None): - super().__init__() - self.multiplier = 1.0 - assert layer_structure is not None, "layer_structure must not be None" - assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!" - assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!" - linears = [] - for i in range(len(layer_structure) - 1): - # Add a fully-connected layer - linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1]))) - # Add an activation func except last layer - if activation_func == "linear" or activation_func is None or (i >= len(layer_structure) - 2 and not activate_output): - pass - elif activation_func in self.activation_dict: - linears.append(self.activation_dict[activation_func]()) - else: - raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}') - # Add layer normalization - if add_layer_norm: - linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) - # Everything should be now parsed into dropout structure, and applied here. - # Since we only have dropouts after layers, dropout structure should start with 0 and end with 0. - if dropout_structure is not None and dropout_structure[i+1] > 0: - assert 0 < dropout_structure[i+1] < 1, "Dropout probability should be 0 or float between 0 and 1!" - linears.append(torch.nn.Dropout(p=dropout_structure[i+1])) - # Code explanation : [1, 2, 1] -> dropout is missing when last_layer_dropout is false. [1, 2, 2, 1] -> [0, 0.3, 0, 0], when its True, [0, 0.3, 0.3, 0]. - self.linear = torch.nn.Sequential(*linears) - if state_dict is not None: - self.fix_old_state_dict(state_dict) - self.load_state_dict(state_dict) - else: - for layer in self.linear: - if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm: - w, b = layer.weight.data, layer.bias.data - if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm: - normal_(w, mean=0.0, std=0.01) - normal_(b, mean=0.0, std=0) - elif weight_init == 'XavierUniform': - xavier_uniform_(w) - zeros_(b) - elif weight_init == 'XavierNormal': - xavier_normal_(w) - zeros_(b) - elif weight_init == 'KaimingUniform': - kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu') - zeros_(b) - elif weight_init == 'KaimingNormal': - kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu') - zeros_(b) - else: - raise KeyError(f"Key {weight_init} is not defined as initialization!") - self.to(devices.device) - - def fix_old_state_dict(self, state_dict): - changes = { - 'linear1.bias': 'linear.0.bias', - 'linear1.weight': 'linear.0.weight', - 'linear2.bias': 'linear.1.bias', - 'linear2.weight': 'linear.1.weight', - } - for fr, to in changes.items(): - x = state_dict.get(fr, None) - if x is None: - continue - del state_dict[fr] - state_dict[to] = x - - def forward(self, x): - return x + self.linear(x) * (self.multiplier if not self.training else 1) - - def trainables(self): - layer_structure = [] - for layer in self.linear: - if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm: - layer_structure += [layer.weight, layer.bias] - return layer_structure - - -#param layer_structure : sequence used for length, use_dropout : controlling boolean, last_layer_dropout : for compatibility check. -def parse_dropout_structure(layer_structure, use_dropout, last_layer_dropout): - if layer_structure is None: - layer_structure = [1, 2, 1] - if not use_dropout: - return [0] * len(layer_structure) - dropout_values = [0] - dropout_values.extend([0.3] * (len(layer_structure) - 3)) - if last_layer_dropout: - dropout_values.append(0.3) - else: - dropout_values.append(0) - dropout_values.append(0) - return dropout_values - - -class Hypernetwork: - filename = None - name = None - - def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs): - self.filename = None - self.name = name - self.layers = {} - self.step = 0 - self.sd_checkpoint = None - self.sd_checkpoint_name = None - self.layer_structure = layer_structure - self.activation_func = activation_func - self.weight_init = weight_init - self.add_layer_norm = add_layer_norm - self.use_dropout = use_dropout - self.activate_output = activate_output - self.last_layer_dropout = kwargs.get('last_layer_dropout', True) - self.dropout_structure = kwargs.get('dropout_structure', None) - if self.dropout_structure is None: - self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout) - self.optimizer_name = None - self.optimizer_state_dict = None - self.optional_info = None - for size in enable_sizes or []: - self.layers[size] = ( - HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, - self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure), - HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, - self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure), - ) - self.eval() - - def weights(self): - res = [] - for layers in self.layers.values(): - for layer in layers: - res += layer.parameters() - return res - - def train(self, mode=True): - for layers in self.layers.values(): - for layer in layers: - layer.train(mode=mode) - for param in layer.parameters(): - param.requires_grad = mode - - def to(self, device): - for layers in self.layers.values(): - for layer in layers: - layer.to(device) - - return self - - def set_multiplier(self, multiplier): - for layers in self.layers.values(): - for layer in layers: - layer.multiplier = multiplier - - return self - - def eval(self): - for layers in self.layers.values(): - for layer in layers: - layer.eval() - for param in layer.parameters(): - param.requires_grad = False - - def save(self, filename): - state_dict = {} - optimizer_saved_dict = {} - for k, v in self.layers.items(): - state_dict[k] = (v[0].state_dict(), v[1].state_dict()) - state_dict['step'] = self.step - state_dict['name'] = self.name - state_dict['layer_structure'] = self.layer_structure - state_dict['activation_func'] = self.activation_func - state_dict['is_layer_norm'] = self.add_layer_norm - state_dict['weight_initialization'] = self.weight_init - state_dict['sd_checkpoint'] = self.sd_checkpoint - state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name - state_dict['activate_output'] = self.activate_output - state_dict['use_dropout'] = self.use_dropout - state_dict['dropout_structure'] = self.dropout_structure - state_dict['last_layer_dropout'] = (self.dropout_structure[-2] != 0) if self.dropout_structure is not None else self.last_layer_dropout - state_dict['optional_info'] = self.optional_info if self.optional_info else None - if self.optimizer_name is not None: - optimizer_saved_dict['optimizer_name'] = self.optimizer_name - torch.save(state_dict, filename) - if shared.opts.save_optimizer_state and self.optimizer_state_dict: - optimizer_saved_dict['hash'] = self.shorthash() - optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict - torch.save(optimizer_saved_dict, f"{filename}.optim") - - def load(self, filename): - self.filename = filename if os.path.exists(filename) else os.path.join(shared.opts.hypernetwork_dir, filename) - if self.name is None: - self.name = os.path.splitext(os.path.basename(self.filename))[0] - with progress.open(self.filename, 'rb', description=f'Load hypernetwork: [cyan]{self.filename}', auto_refresh=True, console=shared.console) as f: - state_dict = torch.load(f, map_location='cpu') - self.layer_structure = state_dict.get('layer_structure', [1, 2, 1]) - self.optional_info = state_dict.get('optional_info', None) - self.activation_func = state_dict.get('activation_func', None) - self.weight_init = state_dict.get('weight_initialization', 'Normal') - self.add_layer_norm = state_dict.get('is_layer_norm', False) - self.dropout_structure = state_dict.get('dropout_structure', None) - self.use_dropout = True if self.dropout_structure is not None and any(self.dropout_structure) else state_dict.get('use_dropout', False) - self.activate_output = state_dict.get('activate_output', True) - self.last_layer_dropout = state_dict.get('last_layer_dropout', False) - # Dropout structure should have same length as layer structure, Every digits should be in [0,1), and last digit must be 0. - if self.dropout_structure is None: - self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout) - if shared.opts.print_hypernet_extra: - if self.optional_info is not None: - print(f" INFO:\n {self.optional_info}\n") - print(f" Layer structure: {self.layer_structure}") - print(f" Activation function: {self.activation_func}") - print(f" Weight initialization: {self.weight_init}") - print(f" Layer norm: {self.add_layer_norm}") - print(f" Dropout usage: {self.use_dropout}" ) - print(f" Activate last layer: {self.activate_output}") - print(f" Dropout structure: {self.dropout_structure}") - optimizer_saved_dict = torch.load(self.filename + '.optim', map_location='cpu') if os.path.exists(self.filename + '.optim') else {} - if self.shorthash() == optimizer_saved_dict.get('hash', None): - self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None) - else: - self.optimizer_state_dict = None - if self.optimizer_state_dict: - self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW') - if shared.opts.print_hypernet_extra: - print("Load existing optimizer from checkpoint") - print(f"Optimizer name is {self.optimizer_name}") - else: - self.optimizer_name = "AdamW" - if shared.opts.print_hypernet_extra: - print("No saved optimizer exists in checkpoint") - for size, sd in state_dict.items(): - if type(size) == int: - self.layers[size] = ( - HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, - self.add_layer_norm, self.activate_output, self.dropout_structure), - HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, - self.add_layer_norm, self.activate_output, self.dropout_structure), - ) - self.name = state_dict.get('name', self.name) - self.step = state_dict.get('step', 0) - self.sd_checkpoint = state_dict.get('sd_checkpoint', None) - self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None) - self.eval() - - def shorthash(self): - sha256 = hashes.sha256(self.filename, f'hypernet/{self.name}') - return sha256[0:10] if sha256 else None - - -def list_hypernetworks(path): - res = {} - def list_folder(folder): - for filename in os.listdir(folder): - fn = os.path.join(folder, filename) - if os.path.isfile(fn) and fn.lower().endswith(".pt"): - name = os.path.splitext(os.path.basename(fn))[0] - res[name] = fn - elif os.path.isdir(fn) and not fn.startswith('.'): - list_folder(fn) - - list_folder(path) - return res - - -def load_hypernetwork(name): - path = shared.hypernetworks.get(name, None) - if path is None: - return None - hypernetwork = Hypernetwork() - try: - hypernetwork.load(path) - except Exception as e: - errors.display(e, f'hypernetwork load: {path}') - return None - return hypernetwork - - -def load_hypernetworks(names, multipliers=None): - already_loaded = {} - for hypernetwork in shared.loaded_hypernetworks: - if hypernetwork.name in names: - already_loaded[hypernetwork.name] = hypernetwork - shared.loaded_hypernetworks.clear() - for i, name in enumerate(names): - hypernetwork = already_loaded.get(name, None) - if hypernetwork is None: - hypernetwork = load_hypernetwork(name) - if hypernetwork is None: - continue - hypernetwork.set_multiplier(multipliers[i] if multipliers else 1.0) - shared.loaded_hypernetworks.append(hypernetwork) - - -def find_closest_hypernetwork_name(search: str): - if not search: - return None - search = search.lower() - applicable = [name for name in shared.hypernetworks if search in name.lower()] - if not applicable: - return None - applicable = sorted(applicable, key=lambda name: len(name)) - return applicable[0] - - -def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None): - hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context_k.shape[2], None) - if hypernetwork_layers is None: - return context_k, context_v - if layer is not None: - layer.hyper_k = hypernetwork_layers[0] - layer.hyper_v = hypernetwork_layers[1] - context_k = devices.cond_cast_unet(hypernetwork_layers[0](devices.cond_cast_float(context_k))) - context_v = devices.cond_cast_unet(hypernetwork_layers[1](devices.cond_cast_float(context_v))) - return context_k, context_v - - -def apply_hypernetworks(hypernetworks, context, layer=None): - context_k = context - context_v = context - for hypernetwork in hypernetworks: - context_k, context_v = apply_single_hypernetwork(hypernetwork, context_k, context_v, layer) - return context_k, context_v - - -def attention_CrossAttention_forward(self, x, context=None, mask=None): - h = self.heads - q = self.to_q(x) - context = default(context, x) - context_k, context_v = apply_hypernetworks(shared.loaded_hypernetworks, context, self) - k = self.to_k(context_k) - v = self.to_v(context_v) - q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v)) - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale - if mask is not None: - mask = rearrange(mask, 'b ... -> b (...)') - max_neg_value = -torch.finfo(sim.dtype).max - mask = repeat(mask, 'b j -> (b h) () j', h=h) - sim.masked_fill_(~mask, max_neg_value) - # attention, what we cannot get enough of - attn = sim.softmax(dim=-1) - out = einsum('b i j, b j d -> b i d', attn, v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) - return self.to_out(out) - - -def stack_conds(conds): - if len(conds) == 1: - return torch.stack(conds) - # same as in reconstruct_multicond_batch - token_count = max([x.shape[0] for x in conds]) - for i in range(len(conds)): - if conds[i].shape[0] != token_count: - last_vector = conds[i][-1:] - last_vector_repeated = last_vector.repeat([token_count - conds[i].shape[0], 1]) - conds[i] = torch.vstack([conds[i], last_vector_repeated]) - return torch.stack(conds) - - -def statistics(data): - if len(data) < 2: - std = 0 - else: - std = stdev(data) - total_information = f"loss:{mean(data):.3f}" + "\u00B1" + f"({std/ (len(data) ** 0.5):.3f})" - recent_data = data[-32:] - if len(recent_data) < 2: - std = 0 - else: - std = stdev(recent_data) - recent_information = f"recent 32 loss:{mean(recent_data):.3f}" + "\u00B1" + f"({std / (len(recent_data) ** 0.5):.3f})" - return total_information, recent_information - - -def report_statistics(loss_info:dict): - keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x])) - for key in keys: - try: - print("Loss statistics for file " + key) - info, recent = statistics(list(loss_info[key])) - print(info) - print(recent) - except Exception as e: - print(e) - - -def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): - # Remove illegal characters from name. - name = "".join( x for x in name if (x.isalnum() or x in "._- ")) - assert name, "Name cannot be empty!" - fn = os.path.join(shared.opts.hypernetwork_dir, f"{name}.pt") - if not overwrite_old: - assert not os.path.exists(fn), f"file {fn} already exists" - if type(layer_structure) == str: - layer_structure = [float(x.strip()) for x in layer_structure.split(",")] - if use_dropout and dropout_structure and type(dropout_structure) == str: - dropout_structure = [float(x.strip()) for x in dropout_structure.split(",")] - else: - dropout_structure = [0] * len(layer_structure) - hypernet = modules.hypernetworks.hypernetwork.Hypernetwork( - name=name, - enable_sizes=[int(x) for x in enable_sizes], - layer_structure=layer_structure, - activation_func=activation_func, - weight_init=weight_init, - add_layer_norm=add_layer_norm, - use_dropout=use_dropout, - dropout_structure=dropout_structure - ) - hypernet.save(fn) - shared.reload_hypernetworks() - return name - - -def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): # pylint: disable=unused-argument - # images allows training previews to have infotext. Importing it at the top causes a circular import problem. - from modules import images, sd_hijack_checkpoint - - save_hypernetwork_every = save_hypernetwork_every or 0 - create_image_every = create_image_every or 0 - template_file = textual_inversion.textual_inversion_templates.get(template_filename, None) - textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_hypernetwork_every, create_image_every, name="hypernetwork") - template_file = template_file.path - - path = shared.hypernetworks.get(hypernetwork_name, None) - hypernetwork = Hypernetwork() - hypernetwork.load(path) - shared.loaded_hypernetworks = [hypernetwork] - - shared.state.job = "train" - shared.state.textinfo = "Initializing hypernetwork training..." - shared.state.job_count = steps - - hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0] - filename = os.path.join(shared.opts.hypernetwork_dir, f'{hypernetwork_name}.pt') - - log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name) - unload = shared.opts.unload_models_when_training - - if save_hypernetwork_every > 0: - hypernetwork_dir = os.path.join(log_directory, "hypernetworks") - os.makedirs(hypernetwork_dir, exist_ok=True) - else: - hypernetwork_dir = None - - if create_image_every > 0: - images_dir = os.path.join(log_directory, "images") - os.makedirs(images_dir, exist_ok=True) - else: - images_dir = None - - checkpoint = sd_models.select_checkpoint() - - initial_step = hypernetwork.step or 0 - if initial_step >= steps: - shared.state.textinfo = "Model has already been trained beyond specified max steps" - return hypernetwork, filename - - scheduler = LearnRateScheduler(learn_rate, steps, initial_step) - - clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else None - if clip_grad: - clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False) - - if shared.opts.training_enable_tensorboard: - tensorboard_writer = textual_inversion.tensorboard_setup(log_directory) - - # dataset loading may take a while, so input validations and early returns should be done before this - shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." - - pin_memory = shared.opts.pin_memory - - ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight) - - if shared.opts.save_training_settings_to_txt: - saved_params = dict( - model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), - **{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]} - ) - ti_logging.save_settings_to_file(log_directory, {**saved_params, **locals()}) - - latent_sampling_method = ds.latent_sampling_method - - dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory) - - old_parallel_processing_allowed = shared.parallel_processing_allowed - - if unload: - shared.parallel_processing_allowed = False - shared.sd_model.cond_stage_model.to(devices.cpu) - shared.sd_model.first_stage_model.to(devices.cpu) - - weights = hypernetwork.weights() - hypernetwork.train() - - # Here we use optimizer from saved HN, or we can specify as UI option. - if hypernetwork.optimizer_name in optimizer_dict: - optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate) - optimizer_name = hypernetwork.optimizer_name - else: - print(f"Optimizer type {hypernetwork.optimizer_name} is not defined!") - optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate) - optimizer_name = 'AdamW' - - if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer. - try: - optimizer.load_state_dict(hypernetwork.optimizer_state_dict) - except RuntimeError as e: - print("Cannot resume from saved optimizer!") - print(e) - - scaler = torch.cuda.amp.GradScaler() - - batch_size = ds.batch_size - gradient_step = ds.gradient_step - # n steps = batch_size * gradient_step * n image processed - steps_per_epoch = len(ds) // batch_size // gradient_step - max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step - loss_step = 0 - _loss_step = 0 #internal - # size = len(ds.indexes) - # loss_dict = defaultdict(lambda : deque(maxlen = 1024)) - loss_logging = deque(maxlen=len(ds) * 3) # this should be configurable parameter, this is 3 * epoch(dataset size) - # losses = torch.zeros((size,)) - # previous_mean_losses = [0] - # previous_mean_loss = 0 - # print("Mean loss of {} elements".format(size)) - - _steps_without_grad = 0 - - last_saved_file = "" - last_saved_image = "" - forced_filename = "" - - pbar = tqdm.tqdm(total=steps - initial_step) - try: - sd_hijack_checkpoint.add() - - for _i in range((steps-initial_step) * gradient_step): - if scheduler.finished: - break - if shared.state.interrupted: - break - for j, batch in enumerate(dl): - # works as a drop_last=True for gradient accumulation - if j == max_steps_per_epoch: - break - scheduler.apply(optimizer, hypernetwork.step) - if scheduler.finished: - break - if shared.state.interrupted: - break - - if clip_grad: - clip_grad_sched.step(hypernetwork.step) - - with devices.autocast(): - x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) - if use_weight: - w = batch.weight.to(devices.device, non_blocking=pin_memory) - if tag_drop_out != 0 or shuffle_tags: - shared.sd_model.cond_stage_model.to(devices.device) - c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory) - shared.sd_model.cond_stage_model.to(devices.cpu) - else: - c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory) - if use_weight: - loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step - del w - else: - loss = shared.sd_model.forward(x, c)[0] / gradient_step - del x - del c - _loss_step += loss.item() - - scaler.scale(loss).backward() - # go back until we reach gradient accumulation steps - if (j + 1) % gradient_step != 0: - continue - loss_logging.append(_loss_step) - if clip_grad: - clip_grad(weights, clip_grad_sched.learn_rate) - - scaler.step(optimizer) - scaler.update() - hypernetwork.step += 1 - pbar.update() - optimizer.zero_grad(set_to_none=True) - loss_step = _loss_step - _loss_step = 0 - steps_done = hypernetwork.step + 1 - epoch_num = hypernetwork.step // steps_per_epoch - epoch_step = hypernetwork.step % steps_per_epoch - - description = f"Training hypernetwork [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}" - pbar.set_description(description) - if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: - # Before saving, change name to match current checkpoint. - hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}' - last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt') - hypernetwork.optimizer_name = optimizer_name - if shared.opts.save_optimizer_state: - hypernetwork.optimizer_state_dict = optimizer.state_dict() - save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file) - hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory. - - - - if shared.opts.training_enable_tensorboard: - epoch_num = hypernetwork.step // len(ds) - epoch_step = hypernetwork.step - (epoch_num * len(ds)) + 1 - mean_loss = sum(loss_logging) / len(loss_logging) - textual_inversion.tensorboard_add(tensorboard_writer, loss=mean_loss, global_step=hypernetwork.step, step=epoch_step, learn_rate=scheduler.learn_rate, epoch_num=epoch_num) - - textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, steps_per_epoch, { - "loss": f"{loss_step:.7f}", - "learn_rate": scheduler.learn_rate - }) - - if images_dir is not None and steps_done % create_image_every == 0: - forced_filename = f'{hypernetwork_name}-{steps_done}' - last_saved_image = os.path.join(images_dir, forced_filename) - hypernetwork.eval() - rng_state = torch.get_rng_state() - cuda_rng_state = None - cuda_rng_state = torch.cuda.get_rng_state_all() - shared.sd_model.cond_stage_model.to(devices.device) - shared.sd_model.first_stage_model.to(devices.device) - - p = processing.StableDiffusionProcessingTxt2Img( - sd_model=shared.sd_model, - do_not_save_grid=True, - do_not_save_samples=True, - ) - - p.disable_extra_networks = True - - if preview_from_txt2img: - p.prompt = preview_prompt - p.negative_prompt = preview_negative_prompt - p.steps = preview_steps - p.sampler_name = processing.get_sampler_name(preview_sampler_index) - p.cfg_scale = preview_cfg_scale - p.seed = preview_seed - p.width = preview_width - p.height = preview_height - else: - p.prompt = batch.cond_text[0] - p.steps = 20 - p.width = training_width - p.height = training_height - - preview_text = p.prompt - - processed = processing.process_images(p) - image = processed.images[0] if len(processed.images) > 0 else None - - if unload: - shared.sd_model.cond_stage_model.to(devices.cpu) - shared.sd_model.first_stage_model.to(devices.cpu) - torch.set_rng_state(rng_state) - torch.cuda.set_rng_state_all(cuda_rng_state) - hypernetwork.train() - if image is not None: - shared.state.assign_current_image(image) - if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images: - textual_inversion.tensorboard_add_image(tensorboard_writer, - f"Validation at epoch {epoch_num}", image, - hypernetwork.step) - last_saved_image, _last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) - last_saved_image += f", prompt: {preview_text}" - - shared.state.job_no = hypernetwork.step - - shared.state.textinfo = f""" -

-Loss: {loss_step:.7f}
-Step: {steps_done}
-Last prompt: {html.escape(batch.cond_text[0])}
-Last saved hypernetwork: {html.escape(last_saved_file)}
-Last saved image: {html.escape(last_saved_image)}
-

-""" - except Exception as e: - errors.display(e, 'hypernetwork train') - finally: - pbar.leave = False - pbar.close() - hypernetwork.eval() - #report_statistics(loss_dict) - sd_hijack_checkpoint.remove() - - - - filename = os.path.join(shared.opts.hypernetwork_dir, f'{hypernetwork_name}.pt') - hypernetwork.optimizer_name = optimizer_name - if shared.opts.save_optimizer_state: - hypernetwork.optimizer_state_dict = optimizer.state_dict() - save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename) - - del optimizer - hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory. - shared.sd_model.cond_stage_model.to(devices.device) - shared.sd_model.first_stage_model.to(devices.device) - shared.parallel_processing_allowed = old_parallel_processing_allowed - - return hypernetwork, filename - -def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename): - old_hypernetwork_name = hypernetwork.name - old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None - old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None - try: - hypernetwork.sd_checkpoint = checkpoint.shorthash - hypernetwork.sd_checkpoint_name = checkpoint.model_name - hypernetwork.name = hypernetwork_name - hypernetwork.save(filename) - except Exception: - hypernetwork.sd_checkpoint = old_sd_checkpoint - hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name - hypernetwork.name = old_hypernetwork_name - raise +import datetime +import html +import os +from collections import deque +import inspect +from statistics import stdev, mean +from rich import progress +import tqdm +import torch +from torch import einsum +from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_ +from einops import rearrange, repeat +from ldm.util import default +from modules import devices, processing, sd_models, shared, hashes, errors +import modules.textual_inversion.dataset +from modules.textual_inversion import textual_inversion, ti_logging +from modules.textual_inversion.learn_schedule import LearnRateScheduler + + +optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"} + +class HypernetworkModule(torch.nn.Module): + activation_dict = { + "linear": torch.nn.Identity, + "relu": torch.nn.ReLU, + "leakyrelu": torch.nn.LeakyReLU, + "elu": torch.nn.ELU, + "swish": torch.nn.Hardswish, + "tanh": torch.nn.Tanh, + "sigmoid": torch.nn.Sigmoid, + } + activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'}) + + def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', + add_layer_norm=False, activate_output=False, dropout_structure=None): + super().__init__() + self.multiplier = 1.0 + assert layer_structure is not None, "layer_structure must not be None" + assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!" + assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!" + linears = [] + for i in range(len(layer_structure) - 1): + # Add a fully-connected layer + linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1]))) + # Add an activation func except last layer + if activation_func == "linear" or activation_func is None or (i >= len(layer_structure) - 2 and not activate_output): + pass + elif activation_func in self.activation_dict: + linears.append(self.activation_dict[activation_func]()) + else: + raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}') + # Add layer normalization + if add_layer_norm: + linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) + # Everything should be now parsed into dropout structure, and applied here. + # Since we only have dropouts after layers, dropout structure should start with 0 and end with 0. + if dropout_structure is not None and dropout_structure[i+1] > 0: + assert 0 < dropout_structure[i+1] < 1, "Dropout probability should be 0 or float between 0 and 1!" + linears.append(torch.nn.Dropout(p=dropout_structure[i+1])) + # Code explanation : [1, 2, 1] -> dropout is missing when last_layer_dropout is false. [1, 2, 2, 1] -> [0, 0.3, 0, 0], when its True, [0, 0.3, 0.3, 0]. + self.linear = torch.nn.Sequential(*linears) + if state_dict is not None: + self.fix_old_state_dict(state_dict) + self.load_state_dict(state_dict) + else: + for layer in self.linear: + if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm: + w, b = layer.weight.data, layer.bias.data + if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm: + normal_(w, mean=0.0, std=0.01) + normal_(b, mean=0.0, std=0) + elif weight_init == 'XavierUniform': + xavier_uniform_(w) + zeros_(b) + elif weight_init == 'XavierNormal': + xavier_normal_(w) + zeros_(b) + elif weight_init == 'KaimingUniform': + kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu') + zeros_(b) + elif weight_init == 'KaimingNormal': + kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu') + zeros_(b) + else: + raise KeyError(f"Key {weight_init} is not defined as initialization!") + self.to(devices.device) + + def fix_old_state_dict(self, state_dict): + changes = { + 'linear1.bias': 'linear.0.bias', + 'linear1.weight': 'linear.0.weight', + 'linear2.bias': 'linear.1.bias', + 'linear2.weight': 'linear.1.weight', + } + for fr, to in changes.items(): + x = state_dict.get(fr, None) + if x is None: + continue + del state_dict[fr] + state_dict[to] = x + + def forward(self, x): + return x + self.linear(x) * (self.multiplier if not self.training else 1) + + def trainables(self): + layer_structure = [] + for layer in self.linear: + if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm: + layer_structure += [layer.weight, layer.bias] + return layer_structure + + +#param layer_structure : sequence used for length, use_dropout : controlling boolean, last_layer_dropout : for compatibility check. +def parse_dropout_structure(layer_structure, use_dropout, last_layer_dropout): + if layer_structure is None: + layer_structure = [1, 2, 1] + if not use_dropout: + return [0] * len(layer_structure) + dropout_values = [0] + dropout_values.extend([0.3] * (len(layer_structure) - 3)) + if last_layer_dropout: + dropout_values.append(0.3) + else: + dropout_values.append(0) + dropout_values.append(0) + return dropout_values + + +class Hypernetwork: + filename = None + name = None + + def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs): + self.filename = None + self.name = name + self.layers = {} + self.step = 0 + self.sd_checkpoint = None + self.sd_checkpoint_name = None + self.layer_structure = layer_structure + self.activation_func = activation_func + self.weight_init = weight_init + self.add_layer_norm = add_layer_norm + self.use_dropout = use_dropout + self.activate_output = activate_output + self.last_layer_dropout = kwargs.get('last_layer_dropout', True) + self.dropout_structure = kwargs.get('dropout_structure', None) + if self.dropout_structure is None: + self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout) + self.optimizer_name = None + self.optimizer_state_dict = None + self.optional_info = None + for size in enable_sizes or []: + self.layers[size] = ( + HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure), + HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure), + ) + self.eval() + + def weights(self): + res = [] + for layers in self.layers.values(): + for layer in layers: + res += layer.parameters() + return res + + def train(self, mode=True): + for layers in self.layers.values(): + for layer in layers: + layer.train(mode=mode) + for param in layer.parameters(): + param.requires_grad = mode + + def to(self, device): + for layers in self.layers.values(): + for layer in layers: + layer.to(device) + + return self + + def set_multiplier(self, multiplier): + for layers in self.layers.values(): + for layer in layers: + layer.multiplier = multiplier + + return self + + def eval(self): + for layers in self.layers.values(): + for layer in layers: + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def save(self, filename): + state_dict = {} + optimizer_saved_dict = {} + for k, v in self.layers.items(): + state_dict[k] = (v[0].state_dict(), v[1].state_dict()) + state_dict['step'] = self.step + state_dict['name'] = self.name + state_dict['layer_structure'] = self.layer_structure + state_dict['activation_func'] = self.activation_func + state_dict['is_layer_norm'] = self.add_layer_norm + state_dict['weight_initialization'] = self.weight_init + state_dict['sd_checkpoint'] = self.sd_checkpoint + state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name + state_dict['activate_output'] = self.activate_output + state_dict['use_dropout'] = self.use_dropout + state_dict['dropout_structure'] = self.dropout_structure + state_dict['last_layer_dropout'] = (self.dropout_structure[-2] != 0) if self.dropout_structure is not None else self.last_layer_dropout + state_dict['optional_info'] = self.optional_info if self.optional_info else None + if self.optimizer_name is not None: + optimizer_saved_dict['optimizer_name'] = self.optimizer_name + torch.save(state_dict, filename) + if shared.opts.save_optimizer_state and self.optimizer_state_dict: + optimizer_saved_dict['hash'] = self.shorthash() + optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict + torch.save(optimizer_saved_dict, f"{filename}.optim") + + def load(self, filename): + self.filename = filename if os.path.exists(filename) else os.path.join(shared.opts.hypernetwork_dir, filename) + if self.name is None: + self.name = os.path.splitext(os.path.basename(self.filename))[0] + with progress.open(self.filename, 'rb', description=f'Load hypernetwork: [cyan]{self.filename}', auto_refresh=True, console=shared.console) as f: + state_dict = torch.load(f, map_location='cpu') + self.layer_structure = state_dict.get('layer_structure', [1, 2, 1]) + self.optional_info = state_dict.get('optional_info', None) + self.activation_func = state_dict.get('activation_func', None) + self.weight_init = state_dict.get('weight_initialization', 'Normal') + self.add_layer_norm = state_dict.get('is_layer_norm', False) + self.dropout_structure = state_dict.get('dropout_structure', None) + self.use_dropout = True if self.dropout_structure is not None and any(self.dropout_structure) else state_dict.get('use_dropout', False) + self.activate_output = state_dict.get('activate_output', True) + self.last_layer_dropout = state_dict.get('last_layer_dropout', False) + # Dropout structure should have same length as layer structure, Every digits should be in [0,1), and last digit must be 0. + if self.dropout_structure is None: + self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout) + if shared.opts.print_hypernet_extra: + if self.optional_info is not None: + print(f" INFO:\n {self.optional_info}\n") + print(f" Layer structure: {self.layer_structure}") + print(f" Activation function: {self.activation_func}") + print(f" Weight initialization: {self.weight_init}") + print(f" Layer norm: {self.add_layer_norm}") + print(f" Dropout usage: {self.use_dropout}" ) + print(f" Activate last layer: {self.activate_output}") + print(f" Dropout structure: {self.dropout_structure}") + optimizer_saved_dict = torch.load(self.filename + '.optim', map_location='cpu') if os.path.exists(self.filename + '.optim') else {} + if self.shorthash() == optimizer_saved_dict.get('hash', None): + self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None) + else: + self.optimizer_state_dict = None + if self.optimizer_state_dict: + self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW') + if shared.opts.print_hypernet_extra: + print("Load existing optimizer from checkpoint") + print(f"Optimizer name is {self.optimizer_name}") + else: + self.optimizer_name = "AdamW" + if shared.opts.print_hypernet_extra: + print("No saved optimizer exists in checkpoint") + for size, sd in state_dict.items(): + if type(size) == int: + self.layers[size] = ( + HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.activate_output, self.dropout_structure), + HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.activate_output, self.dropout_structure), + ) + self.name = state_dict.get('name', self.name) + self.step = state_dict.get('step', 0) + self.sd_checkpoint = state_dict.get('sd_checkpoint', None) + self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None) + self.eval() + + def shorthash(self): + sha256 = hashes.sha256(self.filename, f'hypernet/{self.name}') + return sha256[0:10] if sha256 else None + + +def list_hypernetworks(path): + res = {} + def list_folder(folder): + for filename in os.listdir(folder): + fn = os.path.join(folder, filename) + if os.path.isfile(fn) and fn.lower().endswith(".pt"): + name = os.path.splitext(os.path.basename(fn))[0] + res[name] = fn + elif os.path.isdir(fn) and not fn.startswith('.'): + list_folder(fn) + + list_folder(path) + return res + + +def load_hypernetwork(name): + path = shared.hypernetworks.get(name, None) + if path is None: + return None + hypernetwork = Hypernetwork() + try: + hypernetwork.load(path) + except Exception as e: + errors.display(e, f'hypernetwork load: {path}') + return None + return hypernetwork + + +def load_hypernetworks(names, multipliers=None): + already_loaded = {} + for hypernetwork in shared.loaded_hypernetworks: + if hypernetwork.name in names: + already_loaded[hypernetwork.name] = hypernetwork + shared.loaded_hypernetworks.clear() + for i, name in enumerate(names): + hypernetwork = already_loaded.get(name, None) + if hypernetwork is None: + hypernetwork = load_hypernetwork(name) + if hypernetwork is None: + continue + hypernetwork.set_multiplier(multipliers[i] if multipliers else 1.0) + shared.loaded_hypernetworks.append(hypernetwork) + + +def find_closest_hypernetwork_name(search: str): + if not search: + return None + search = search.lower() + applicable = [name for name in shared.hypernetworks if search in name.lower()] + if not applicable: + return None + applicable = sorted(applicable, key=lambda name: len(name)) + return applicable[0] + + +def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None): + hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context_k.shape[2], None) + if hypernetwork_layers is None: + return context_k, context_v + if layer is not None: + layer.hyper_k = hypernetwork_layers[0] + layer.hyper_v = hypernetwork_layers[1] + context_k = devices.cond_cast_unet(hypernetwork_layers[0](devices.cond_cast_float(context_k))) + context_v = devices.cond_cast_unet(hypernetwork_layers[1](devices.cond_cast_float(context_v))) + return context_k, context_v + + +def apply_hypernetworks(hypernetworks, context, layer=None): + context_k = context + context_v = context + for hypernetwork in hypernetworks: + context_k, context_v = apply_single_hypernetwork(hypernetwork, context_k, context_v, layer) + return context_k, context_v + + +def attention_CrossAttention_forward(self, x, context=None, mask=None): + h = self.heads + q = self.to_q(x) + context = default(context, x) + context_k, context_v = apply_hypernetworks(shared.loaded_hypernetworks, context, self) + k = self.to_k(context_k) + v = self.to_v(context_v) + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v)) + sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + if mask is not None: + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + # attention, what we cannot get enough of + attn = sim.softmax(dim=-1) + out = einsum('b i j, b j d -> b i d', attn, v) + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + return self.to_out(out) + + +def stack_conds(conds): + if len(conds) == 1: + return torch.stack(conds) + # same as in reconstruct_multicond_batch + token_count = max([x.shape[0] for x in conds]) + for i in range(len(conds)): + if conds[i].shape[0] != token_count: + last_vector = conds[i][-1:] + last_vector_repeated = last_vector.repeat([token_count - conds[i].shape[0], 1]) + conds[i] = torch.vstack([conds[i], last_vector_repeated]) + return torch.stack(conds) + + +def statistics(data): + if len(data) < 2: + std = 0 + else: + std = stdev(data) + total_information = f"loss:{mean(data):.3f}" + "\u00B1" + f"({std/ (len(data) ** 0.5):.3f})" + recent_data = data[-32:] + if len(recent_data) < 2: + std = 0 + else: + std = stdev(recent_data) + recent_information = f"recent 32 loss:{mean(recent_data):.3f}" + "\u00B1" + f"({std / (len(recent_data) ** 0.5):.3f})" + return total_information, recent_information + + +def report_statistics(loss_info:dict): + keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x])) + for key in keys: + try: + print("Loss statistics for file " + key) + info, recent = statistics(list(loss_info[key])) + print(info) + print(recent) + except Exception as e: + print(e) + + +def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): + # Remove illegal characters from name. + name = "".join( x for x in name if (x.isalnum() or x in "._- ")) + assert name, "Name cannot be empty!" + fn = os.path.join(shared.opts.hypernetwork_dir, f"{name}.pt") + if not overwrite_old: + assert not os.path.exists(fn), f"file {fn} already exists" + if type(layer_structure) == str: + layer_structure = [float(x.strip()) for x in layer_structure.split(",")] + if use_dropout and dropout_structure and type(dropout_structure) == str: + dropout_structure = [float(x.strip()) for x in dropout_structure.split(",")] + else: + dropout_structure = [0] * len(layer_structure) + hypernet = modules.hypernetworks.hypernetwork.Hypernetwork( + name=name, + enable_sizes=[int(x) for x in enable_sizes], + layer_structure=layer_structure, + activation_func=activation_func, + weight_init=weight_init, + add_layer_norm=add_layer_norm, + use_dropout=use_dropout, + dropout_structure=dropout_structure + ) + hypernet.save(fn) + shared.reload_hypernetworks() + return name + + +def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): # pylint: disable=unused-argument + # images allows training previews to have infotext. Importing it at the top causes a circular import problem. + from modules import images, sd_hijack_checkpoint + + save_hypernetwork_every = save_hypernetwork_every or 0 + create_image_every = create_image_every or 0 + template_file = textual_inversion.textual_inversion_templates.get(template_filename, None) + textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_hypernetwork_every, create_image_every, name="hypernetwork") + template_file = template_file.path + + path = shared.hypernetworks.get(hypernetwork_name, None) + hypernetwork = Hypernetwork() + hypernetwork.load(path) + shared.loaded_hypernetworks = [hypernetwork] + + shared.state.job = "train" + shared.state.textinfo = "Initializing hypernetwork training..." + shared.state.job_count = steps + + hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0] + filename = os.path.join(shared.opts.hypernetwork_dir, f'{hypernetwork_name}.pt') + + log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name) + unload = shared.opts.unload_models_when_training + + if save_hypernetwork_every > 0: + hypernetwork_dir = os.path.join(log_directory, "hypernetworks") + os.makedirs(hypernetwork_dir, exist_ok=True) + else: + hypernetwork_dir = None + + if create_image_every > 0: + images_dir = os.path.join(log_directory, "images") + os.makedirs(images_dir, exist_ok=True) + else: + images_dir = None + + checkpoint = sd_models.select_checkpoint() + + initial_step = hypernetwork.step or 0 + if initial_step >= steps: + shared.state.textinfo = "Model has already been trained beyond specified max steps" + return hypernetwork, filename + + scheduler = LearnRateScheduler(learn_rate, steps, initial_step) + + clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else None + if clip_grad: + clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False) + + if shared.opts.training_enable_tensorboard: + tensorboard_writer = textual_inversion.tensorboard_setup(log_directory) + + # dataset loading may take a while, so input validations and early returns should be done before this + shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." + + pin_memory = shared.opts.pin_memory + + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight) + + if shared.opts.save_training_settings_to_txt: + saved_params = dict( + model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), + **{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]} + ) + ti_logging.save_settings_to_file(log_directory, {**saved_params, **locals()}) + + latent_sampling_method = ds.latent_sampling_method + + dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory) + + old_parallel_processing_allowed = shared.parallel_processing_allowed + + if unload: + shared.parallel_processing_allowed = False + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) + + weights = hypernetwork.weights() + hypernetwork.train() + + # Here we use optimizer from saved HN, or we can specify as UI option. + if hypernetwork.optimizer_name in optimizer_dict: + optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate) + optimizer_name = hypernetwork.optimizer_name + else: + print(f"Optimizer type {hypernetwork.optimizer_name} is not defined!") + optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate) + optimizer_name = 'AdamW' + + if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer. + try: + optimizer.load_state_dict(hypernetwork.optimizer_state_dict) + except RuntimeError as e: + print("Cannot resume from saved optimizer!") + print(e) + + scaler = torch.cuda.amp.GradScaler() + + batch_size = ds.batch_size + gradient_step = ds.gradient_step + # n steps = batch_size * gradient_step * n image processed + steps_per_epoch = len(ds) // batch_size // gradient_step + max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step + loss_step = 0 + _loss_step = 0 #internal + # size = len(ds.indexes) + # loss_dict = defaultdict(lambda : deque(maxlen = 1024)) + loss_logging = deque(maxlen=len(ds) * 3) # this should be configurable parameter, this is 3 * epoch(dataset size) + # losses = torch.zeros((size,)) + # previous_mean_losses = [0] + # previous_mean_loss = 0 + # print("Mean loss of {} elements".format(size)) + + _steps_without_grad = 0 + + last_saved_file = "" + last_saved_image = "" + forced_filename = "" + + pbar = tqdm.tqdm(total=steps - initial_step) + try: + sd_hijack_checkpoint.add() + + for _i in range((steps-initial_step) * gradient_step): + if scheduler.finished: + break + if shared.state.interrupted: + break + for j, batch in enumerate(dl): + # works as a drop_last=True for gradient accumulation + if j == max_steps_per_epoch: + break + scheduler.apply(optimizer, hypernetwork.step) + if scheduler.finished: + break + if shared.state.interrupted: + break + + if clip_grad: + clip_grad_sched.step(hypernetwork.step) + + with devices.autocast(): + x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) + if use_weight: + w = batch.weight.to(devices.device, non_blocking=pin_memory) + if tag_drop_out != 0 or shuffle_tags: + shared.sd_model.cond_stage_model.to(devices.device) + c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory) + shared.sd_model.cond_stage_model.to(devices.cpu) + else: + c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory) + if use_weight: + loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step + del w + else: + loss = shared.sd_model.forward(x, c)[0] / gradient_step + del x + del c + _loss_step += loss.item() + + scaler.scale(loss).backward() + # go back until we reach gradient accumulation steps + if (j + 1) % gradient_step != 0: + continue + loss_logging.append(_loss_step) + if clip_grad: + clip_grad(weights, clip_grad_sched.learn_rate) + + scaler.step(optimizer) + scaler.update() + hypernetwork.step += 1 + pbar.update() + optimizer.zero_grad(set_to_none=True) + loss_step = _loss_step + _loss_step = 0 + steps_done = hypernetwork.step + 1 + epoch_num = hypernetwork.step // steps_per_epoch + epoch_step = hypernetwork.step % steps_per_epoch + + description = f"Training hypernetwork [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}" + pbar.set_description(description) + if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: + # Before saving, change name to match current checkpoint. + hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}' + last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt') + hypernetwork.optimizer_name = optimizer_name + if shared.opts.save_optimizer_state: + hypernetwork.optimizer_state_dict = optimizer.state_dict() + save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file) + hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory. + + + + if shared.opts.training_enable_tensorboard: + epoch_num = hypernetwork.step // len(ds) + epoch_step = hypernetwork.step - (epoch_num * len(ds)) + 1 + mean_loss = sum(loss_logging) / len(loss_logging) + textual_inversion.tensorboard_add(tensorboard_writer, loss=mean_loss, global_step=hypernetwork.step, step=epoch_step, learn_rate=scheduler.learn_rate, epoch_num=epoch_num) + + textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, steps_per_epoch, { + "loss": f"{loss_step:.7f}", + "learn_rate": scheduler.learn_rate + }) + + if images_dir is not None and steps_done % create_image_every == 0: + forced_filename = f'{hypernetwork_name}-{steps_done}' + last_saved_image = os.path.join(images_dir, forced_filename) + hypernetwork.eval() + rng_state = torch.get_rng_state() + cuda_rng_state = None + cuda_rng_state = torch.cuda.get_rng_state_all() + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + ) + + p.disable_extra_networks = True + + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_name = processing.get_sampler_name(preview_sampler_index) + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = batch.cond_text[0] + p.steps = 20 + p.width = training_width + p.height = training_height + + preview_text = p.prompt + + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images) > 0 else None + + if unload: + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) + torch.set_rng_state(rng_state) + torch.cuda.set_rng_state_all(cuda_rng_state) + hypernetwork.train() + if image is not None: + shared.state.assign_current_image(image) + if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images: + textual_inversion.tensorboard_add_image(tensorboard_writer, + f"Validation at epoch {epoch_num}", image, + hypernetwork.step) + last_saved_image, _last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" + + shared.state.job_no = hypernetwork.step + + shared.state.textinfo = f""" +

+Loss: {loss_step:.7f}
+Step: {steps_done}
+Last prompt: {html.escape(batch.cond_text[0])}
+Last saved hypernetwork: {html.escape(last_saved_file)}
+Last saved image: {html.escape(last_saved_image)}
+

+""" + except Exception as e: + errors.display(e, 'hypernetwork train') + finally: + pbar.leave = False + pbar.close() + hypernetwork.eval() + #report_statistics(loss_dict) + sd_hijack_checkpoint.remove() + + + + filename = os.path.join(shared.opts.hypernetwork_dir, f'{hypernetwork_name}.pt') + hypernetwork.optimizer_name = optimizer_name + if shared.opts.save_optimizer_state: + hypernetwork.optimizer_state_dict = optimizer.state_dict() + save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename) + + del optimizer + hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory. + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + shared.parallel_processing_allowed = old_parallel_processing_allowed + + return hypernetwork, filename + +def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename): + old_hypernetwork_name = hypernetwork.name + old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None + old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None + try: + hypernetwork.sd_checkpoint = checkpoint.shorthash + hypernetwork.sd_checkpoint_name = checkpoint.model_name + hypernetwork.name = hypernetwork_name + hypernetwork.save(filename) + except Exception: + hypernetwork.sd_checkpoint = old_sd_checkpoint + hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name + hypernetwork.name = old_hypernetwork_name + raise diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index c8b6cd5c3..42ca9b5b6 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -1,31 +1,31 @@ -import html -import gradio as gr -import modules.hypernetworks.hypernetwork -from modules import devices, sd_hijack, shared - -not_available = ["hardswish", "multiheadattention"] -keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available] - - -def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): - filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure) - return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", "" - - -def train_hypernetwork(*args): - shared.loaded_hypernetworks = [] - assert not shared.cmd_opts.lowvram, 'Training models with lowvram is not possible' - try: - sd_hijack.undo_optimizations() - hypernetwork, filename = modules.hypernetworks.hypernetwork.train_hypernetwork(*args) - res = f""" -Training {'interrupted' if shared.state.interrupted else 'finished'} at {hypernetwork.step} steps. -Hypernetwork saved to {html.escape(filename)} -""" - return res, "" - except Exception as e: - raise RuntimeError("Hypernetwork error") from e - finally: - shared.sd_model.cond_stage_model.to(devices.device) - shared.sd_model.first_stage_model.to(devices.device) - sd_hijack.apply_optimizations() +import html +import gradio as gr +import modules.hypernetworks.hypernetwork +from modules import devices, sd_hijack, shared + +not_available = ["hardswish", "multiheadattention"] +keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available] + + +def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): + filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure) + return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", "" + + +def train_hypernetwork(*args): + shared.loaded_hypernetworks = [] + assert not shared.cmd_opts.lowvram, 'Training models with lowvram is not possible' + try: + sd_hijack.undo_optimizations() + hypernetwork, filename = modules.hypernetworks.hypernetwork.train_hypernetwork(*args) + res = f""" +Training {'interrupted' if shared.state.interrupted else 'finished'} at {hypernetwork.step} steps. +Hypernetwork saved to {html.escape(filename)} +""" + return res, "" + except Exception as e: + raise RuntimeError("Hypernetwork error") from e + finally: + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + sd_hijack.apply_optimizations() diff --git a/modules/images.py b/modules/images.py index e117b6860..083b2e5ad 100644 --- a/modules/images.py +++ b/modules/images.py @@ -1,867 +1,867 @@ -import io -import re -import os -import sys -import math -import json -import uuid -import queue -import string -import hashlib -import datetime -import threading -from pathlib import Path -from collections import namedtuple -import numpy as np -import piexif -import piexif.helper -from PIL import Image, ImageFont, ImageDraw, PngImagePlugin, ExifTags -from modules import sd_samplers, shared, script_callbacks, errors, paths - - -debug = errors.log.trace if os.environ.get('SD_PATH_DEBUG', None) is not None else lambda *args, **kwargs: None -try: - from pi_heif import register_heif_opener - register_heif_opener() -except Exception: - pass - - -def check_grid_size(imgs): - mp = 0 - for img in imgs: - mp += img.width * img.height - mp = round(mp / 1000000) - ok = mp <= shared.opts.img_max_size_mp - if not ok: - shared.log.warning(f'Maximum image size exceded: size={mp} maximum={shared.opts.img_max_size_mp} MPixels') - return ok - - -def image_grid(imgs, batch_size=1, rows=None): - if rows is None: - if shared.opts.n_rows > 0: - rows = shared.opts.n_rows - elif shared.opts.n_rows == 0: - rows = batch_size - else: - rows = math.floor(math.sqrt(len(imgs))) - while len(imgs) % rows != 0: - rows -= 1 - if rows > len(imgs): - rows = len(imgs) - cols = math.ceil(len(imgs) / rows) - params = script_callbacks.ImageGridLoopParams(imgs, cols, rows) - script_callbacks.image_grid_callback(params) - w, h = imgs[0].size - grid = Image.new('RGB', size=(params.cols * w, params.rows * h), color=shared.opts.grid_background) - for i, img in enumerate(params.imgs): - grid.paste(img, box=(i % params.cols * w, i // params.cols * h)) - return grid - - -Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"]) - - -def split_grid(image, tile_w=512, tile_h=512, overlap=64): - w = image.width - h = image.height - non_overlap_width = tile_w - overlap - non_overlap_height = tile_h - overlap - cols = math.ceil((w - overlap) / non_overlap_width) - rows = math.ceil((h - overlap) / non_overlap_height) - dx = (w - tile_w) / (cols - 1) if cols > 1 else 0 - dy = (h - tile_h) / (rows - 1) if rows > 1 else 0 - grid = Grid([], tile_w, tile_h, w, h, overlap) - for row in range(rows): - row_images = [] - y = int(row * dy) - if y + tile_h >= h: - y = h - tile_h - for col in range(cols): - x = int(col * dx) - if x + tile_w >= w: - x = w - tile_w - tile = image.crop((x, y, x + tile_w, y + tile_h)) - row_images.append([x, tile_w, tile]) - grid.tiles.append([y, tile_h, row_images]) - return grid - - -def combine_grid(grid): - def make_mask_image(r): - r = r * 255 / grid.overlap - r = r.astype(np.uint8) - return Image.fromarray(r, 'L') - - mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0)) - mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1)) - combined_image = Image.new("RGB", (grid.image_w, grid.image_h)) - for y, h, row in grid.tiles: - combined_row = Image.new("RGB", (grid.image_w, h)) - for x, w, tile in row: - if x == 0: - combined_row.paste(tile, (0, 0)) - continue - combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w) - combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0)) - if y == 0: - combined_image.paste(combined_row, (0, 0)) - continue - combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h) - combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap)) - return combined_image - - -class GridAnnotation: - def __init__(self, text='', is_active=True): - self.text = text - self.is_active = is_active - self.size = None - - -def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0, title=None): - def wrap(drawing, text, font, line_length): - lines = [''] - for word in text.split(): - line = f'{lines[-1]} {word}'.strip() - if drawing.textlength(line, font=font) <= line_length: - lines[-1] = line - else: - lines.append(word) - return lines - - def get_font(fontsize): - try: - return ImageFont.truetype(shared.opts.font or 'javascript/roboto.ttf', fontsize) - except Exception: - return ImageFont.truetype('javascript/roboto.ttf', fontsize) - - def draw_texts(drawing: ImageDraw, draw_x, draw_y, lines, initial_fnt, initial_fontsize): - for line in lines: - font = initial_fnt - fontsize = initial_fontsize - while drawing.multiline_textbbox((0,0), text=line.text, font=font)[0] > line.allowed_width and fontsize > 0: - fontsize -= 1 - font = get_font(fontsize) - drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=font, fill=shared.opts.font_color if line.is_active else color_inactive, anchor="mm", align="center") - if not line.is_active: - drawing.line((draw_x - line.size[0] // 2, draw_y + line.size[1] // 2, draw_x + line.size[0] // 2, draw_y + line.size[1] // 2), fill=color_inactive, width=4) - draw_y += line.size[1] + line_spacing - - fontsize = (width + height) // 25 - line_spacing = fontsize // 2 - font = get_font(fontsize) - color_inactive = (127, 127, 127) - pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4 - cols = im.width // width - rows = im.height // height - assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}' - assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}' - calc_img = Image.new("RGB", (1, 1), shared.opts.grid_background) - calc_d = ImageDraw.Draw(calc_img) - title_texts = [title] if title else [[GridAnnotation()]] - for texts, allowed_width in zip(hor_texts + ver_texts + title_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts) + [(width+margin)*cols]): - items = [] + texts - texts.clear() - for line in items: - wrapped = wrap(calc_d, line.text, font, allowed_width) - texts += [GridAnnotation(x, line.is_active) for x in wrapped] - for line in texts: - bbox = calc_d.multiline_textbbox((0, 0), line.text, font=font) - line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1]) - line.allowed_width = allowed_width - hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts] - ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in ver_texts] - pad_top = 0 if sum(hor_text_heights) == 0 else max(hor_text_heights) + line_spacing * 2 - title_pad = 0 - if title: - title_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in title_texts] # pylint: disable=unsubscriptable-object - title_pad = 0 if sum(title_text_heights) == 0 else max(title_text_heights) + line_spacing * 2 - result = Image.new("RGB", (im.width + pad_left + margin * (cols-1), im.height + pad_top + title_pad + margin * (rows-1)), shared.opts.grid_background) - for row in range(rows): - for col in range(cols): - cell = im.crop((width * col, height * row, width * (col+1), height * (row+1))) - result.paste(cell, (pad_left + (width + margin) * col, pad_top + title_pad + (height + margin) * row)) - d = ImageDraw.Draw(result) - if title: - x = pad_left + ((width+margin)*cols) / 2 - y = title_pad / 2 - title_text_heights[0] / 2 - draw_texts(d, x, y, title_texts[0], font, fontsize) - for col in range(cols): - x = pad_left + (width + margin) * col + width / 2 - y = (pad_top / 2 - hor_text_heights[col] / 2) + title_pad - draw_texts(d, x, y, hor_texts[col], font, fontsize) - for row in range(rows): - x = pad_left / 2 - y = (pad_top + (height + margin) * row + height / 2 - ver_text_heights[row] / 2) + title_pad - draw_texts(d, x, y, ver_texts[row], font, fontsize) - return result - - -def draw_prompt_matrix(im, width, height, all_prompts, margin=0): - prompts = all_prompts[1:] - boundary = math.ceil(len(prompts) / 2) - prompts_horiz = prompts[:boundary] - prompts_vert = prompts[boundary:] - hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))] - ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))] - return draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin) - - -def resize_image(resize_mode, im, width, height, upscaler_name=None, output_type='image'): - shared.log.debug(f'Image resize: mode={resize_mode} resolution={width}x{height} upscaler={upscaler_name} function={sys._getframe(1).f_code.co_name}') # pylint: disable=protected-access - """ - Resizes an image with the specified resize_mode, width, and height. - Args: - resize_mode: The mode to use when resizing the image. - 0: No resize - 1: Resize the image to the specified width and height. - 2: Resize the image to fill the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess. - 3: Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image. - im: The image to resize. - width: The width to resize the image to. - height: The height to resize the image to. - upscaler_name: The name of the upscaler to use. If not provided, defaults to opts.upscaler_for_img2img. - """ - upscaler_name = upscaler_name or shared.opts.upscaler_for_img2img - - def latent(im, w, h, upscaler): - from modules.processing_vae import vae_encode, vae_decode - import torch - latents = vae_encode(im, shared.sd_model, full_quality=False) # TODO enable full VAE mode - latents = torch.nn.functional.interpolate(latents, size=(h // 8, w // 8), mode=upscaler["mode"], antialias=upscaler["antialias"]) - im = vae_decode(latents, shared.sd_model, output_type='pil', full_quality=False)[0] - return im - - def resize(im, w, h): - if upscaler_name is None or upscaler_name == "None" or im.mode == 'L': - return im.resize((w, h), resample=Image.Resampling.LANCZOS) - scale = max(w / im.width, h / im.height) - if scale > 1.0: - upscalers = [x for x in shared.sd_upscalers if x.name == upscaler_name] - if len(upscalers) > 0: - upscaler = upscalers[0] - im = upscaler.scaler.upscale(im, scale, upscaler.data_path) - else: - upscaler = shared.latent_upscale_modes.get(upscaler_name, None) - if upscaler is not None: - im = latent(im, w, h, upscaler) - else: - shared.log.warning(f"Could not find upscaler: {upscaler_name or ''} using fallback: {upscaler.name}") - if im.width != w or im.height != h: - im = im.resize((w, h), resample=Image.Resampling.LANCZOS) - return im - - if resize_mode == 0 or (im.width == width and im.height == height): - res = im.copy() - elif resize_mode == 1: - res = resize(im, width, height) - elif resize_mode == 2: - ratio = width / height - src_ratio = im.width / im.height - src_w = width if ratio > src_ratio else im.width * height // im.height - src_h = height if ratio <= src_ratio else im.height * width // im.width - resized = resize(im, src_w, src_h) - res = Image.new(im.mode, (width, height)) - res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2)) - else: - ratio = width / height - src_ratio = im.width / im.height - src_w = width if ratio < src_ratio else im.width * height // im.height - src_h = height if ratio >= src_ratio else im.height * width // im.width - resized = resize(im, src_w, src_h) - res = Image.new(im.mode, (width, height)) - res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2)) - if ratio < src_ratio: - fill_height = height // 2 - src_h // 2 - res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0)) - res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h)) - elif ratio > src_ratio: - fill_width = width // 2 - src_w // 2 - res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0)) - res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0)) - if output_type == 'np': - return np.array(res) - return res - - -re_nonletters = re.compile(r'[\s' + string.punctuation + ']+') -re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)") -re_pattern_arg = re.compile(r"(.*)<([^>]*)>$") -re_attention = re.compile(r'[\(*\[*](\w+)(:\d+(\.\d+))?[\)*\]*]|') -re_network = re.compile(r'\<\w+:(\w+)(:\d+(\.\d+))?\>|') -re_brackets = re.compile(r'[\([{})\]]') - -NOTHING = object() - - -class FilenameGenerator: - replacements = { - 'width': lambda self: self.image.width, - 'height': lambda self: self.image.height, - 'batch_number': lambda self: self.batch_number, - 'iter_number': lambda self: self.iter_number, - 'num': lambda self: NOTHING if self.p.n_iter == 1 and self.p.batch_size == 1 else self.p.iteration * self.p.batch_size + self.p.batch_index + 1, - 'generation_number': lambda self: NOTHING if self.p.n_iter == 1 and self.p.batch_size == 1 else self.p.iteration * self.p.batch_size + self.p.batch_index + 1, - 'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'), - 'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime], [datetime