From c894f04f3176485e73faa08690151f332362d8bb Mon Sep 17 00:00:00 2001 From: Roger Condori <114810545+R3gm@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:07:11 +0000 Subject: [PATCH] rename kdpm2 to dpm2 --- README.md | 24 +++++++++++++++++------- stablepy/diffusers_vanilla/constants.py | 8 ++++---- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index f77c3d1..5a88179 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ The goal of this project is to make Stable Diffusion more accessible, simple and **Installation:** ``` -pip install stablepy==0.3.0 +pip install stablepy==0.4.0 ``` **Usage:** @@ -15,7 +15,7 @@ To use the project, simply create a new instance of the `Model_Diffusers` class. Once you have created a new instance of the `Model_Diffusers` class, you can call the `model()` method to generate an image. The `model()` method takes several arguments, including the prompt, the number of steps, the guidance scale, the sampler, the image width, the image height, the path to the upscaler model (if using), etc. -**Demo:** +**Interactive tutorial:** See [stablepy_demo.ipynb](https://github.com/R3gm/stablepy/blob/main/stablepy_demo.ipynb) @@ -26,7 +26,7 @@ See [stablepy_demo.ipynb](https://github.com/R3gm/stablepy/blob/main/stablepy_de **Examples:** -The following code examples show how to use the project to generate a text-to-image and an ControlNet diffusion: +The following code examples show how to use the project to generate a text-to-image and a ControlNet diffusion: ```python from stablepy import Model_Diffusers @@ -65,6 +65,7 @@ images, path_images = model( prompt='highly detailed portrait of an underwater city, with towering spires and domes rising up from the ocean floor', num_steps = 30, image_resolution = 768, + preprocessor_name = "Canny", guidance_scale = 7.5, seed = 567, FreeU = True, @@ -77,10 +78,19 @@ images, path_images = model( images[1] ``` - -**Documentation:** - -In process +**📖 News:** + +🔥 Version 0.4.0: New Update Details + +- IP Adapter with the variants FaceID and Instant-Style +- New samplers +- Appropriate support for SDXL safetensors models +- ControlNet for SDXL: OpenPose, Canny, Scribble, SoftEdge, Depth, LineArt, and SDXL_Tile_Realistic +- New variant prompt weight with emphasis +- ControlNet pattern for SD1.5 and SDXL +- ControlNet Canny now needs the `preprocessor_name="Canny"` +- Similarly, ControlNet MLSD requires the `preprocessor_name="MLSD"` +- Task names like "sdxl_canny" have been changed to "sdxl_canny_t2i" to refer to the T2I adapter that uses them. **Contributing:** diff --git a/stablepy/diffusers_vanilla/constants.py b/stablepy/diffusers_vanilla/constants.py index a393c11..1f2788d 100644 --- a/stablepy/diffusers_vanilla/constants.py +++ b/stablepy/diffusers_vanilla/constants.py @@ -101,10 +101,10 @@ "DPM++ 3M Karras": (DPMSolverMultistepScheduler, {"solver_order": 3, "use_karras_sigmas": True}), "DPM++ SDE": (DPMSolverSDEScheduler, {"use_karras_sigmas": False}), "DPM++ SDE Karras": (DPMSolverSDEScheduler, {"use_karras_sigmas": True}), - "KDPM2": (KDPM2DiscreteScheduler, {}), - "KDPM2 Karras": (KDPM2DiscreteScheduler, {"use_karras_sigmas": True}), - "KDPM2 a": (KDPM2AncestralDiscreteScheduler, {}), - "KDPM2 a Karras": (KDPM2AncestralDiscreteScheduler, {"use_karras_sigmas": True}), + "DPM2": (KDPM2DiscreteScheduler, {}), + "DPM2 Karras": (KDPM2DiscreteScheduler, {"use_karras_sigmas": True}), + "DPM2 a": (KDPM2AncestralDiscreteScheduler, {}), + "DPM2 a Karras": (KDPM2AncestralDiscreteScheduler, {"use_karras_sigmas": True}), "Euler": (EulerDiscreteScheduler, {}), "Euler a": (EulerAncestralDiscreteScheduler, {}), "Euler trailing": (EulerDiscreteScheduler, {"timestep_spacing": "trailing", "prediction_type": "sample"}),