Skip to content

Commit

Permalink
xformers disbled
Browse files Browse the repository at this point in the history
  • Loading branch information
R3gm committed Oct 19, 2023
1 parent 4148a66 commit 7f9e76f
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 28 deletions.
24 changes: 1 addition & 23 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ controlnet-aux = "^0.0.6"
mediapipe = "0.10.1"
pytorch-lightning = "^2.0.9.post0"
asdff = {git = "https://github.com/R3gm/asdff.git"}
xformers = "^0.0.22.post4"


[[tool.poetry.source]]
Expand Down
9 changes: 5 additions & 4 deletions stablepy/diffusers_vanilla/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -348,8 +348,8 @@ def load_pipe(

pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)

if self.device.type == "cuda":
pipe.enable_xformers_memory_efficient_attention()
# if self.device.type == "cuda":
# pipe.enable_xformers_memory_efficient_attention()

pipe.to(self.device)
torch.cuda.empty_cache()
Expand Down Expand Up @@ -1117,6 +1117,7 @@ def __call__(
# self.pipe = self.process_lora(lora_D, lora_scale_D)
# self.pipe = self.process_lora(lora_E, lora_scale_E)

xformers_memory_efficient_attention=False # disabled
if xformers_memory_efficient_attention and torch.cuda.is_available():
self.pipe.disable_xformers_memory_efficient_attention()
self.pipe.to(self.device)
Expand Down Expand Up @@ -1308,8 +1309,8 @@ def __call__(

compel = None
del compel

if torch.cuda.is_available():
if torch.cuda.is_available() and xformers_memory_efficient_attention:
if xformers_memory_efficient_attention:
self.pipe.enable_xformers_memory_efficient_attention()
else:
Expand Down

0 comments on commit 7f9e76f

Please sign in to comment.