Skip to content

Commit

Permalink
Merge branch 'develop'
Browse files Browse the repository at this point in the history
  • Loading branch information
mashb1t committed Jul 13, 2024
2 parents 236766b + f4d21e6 commit 376d69c
Show file tree
Hide file tree
Showing 7 changed files with 44 additions and 23 deletions.
4 changes: 2 additions & 2 deletions args_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@
args_parser.parser.add_argument("--always-download-new-model", action='store_true',
help="Always download newer models", default=False)

args_parser.parser.add_argument("--rebuild-hash-cache", action='store_true',
help="Generates missing model and LoRA hashes.", default=False)
args_parser.parser.add_argument("--rebuild-hash-cache", help="Generates missing model and LoRA hashes.",
type=int, nargs="?", metavar="CPU_NUM_THREADS", const=-1)

args_parser.parser.set_defaults(
disable_cuda_malloc=True,
Expand Down
2 changes: 2 additions & 0 deletions extras/GroundingDINO/util/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ def __init__(self):
self.load_device = torch.device('cpu')
self.offload_device = torch.device('cpu')

@torch.no_grad()
@torch.inference_mode()
def predict_with_caption(
self,
image: np.ndarray,
Expand Down
2 changes: 1 addition & 1 deletion fooocus_version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
version = '2.6.0-rc1 (mashb1t)'
version = '2.6.0-rc2 (mashb1t)'
20 changes: 2 additions & 18 deletions modules/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import tempfile
import modules.flags
import modules.sdxl_styles
from modules.hash_cache import load_cache_from_file, save_cache_to_file
from modules.hash_cache import init_cache

from modules.model_loader import load_file_from_url
from modules.extra_utils import makedirs_with_log, get_files_from_folder, try_eval_env_var
Expand Down Expand Up @@ -892,20 +892,4 @@ def downloading_sam_vit_h():


update_files()
load_cache_from_file()

if args_manager.args.rebuild_hash_cache:
from modules.hash_cache import sha256_from_cache
from modules.util import get_file_from_folder_list

print('[Cache] Rebuilding hash cache')
for filename in model_filenames:
filepath = get_file_from_folder_list(filename, paths_checkpoints)
sha256_from_cache(filepath)
for filename in lora_filenames:
filepath = get_file_from_folder_list(filename, paths_loras)
sha256_from_cache(filepath)
print('[Cache] Done')

# write cache to file again for sorting and cleanup of invalid cache entries
save_cache_to_file()
init_cache(model_filenames, paths_checkpoints, lora_filenames, paths_loras)
31 changes: 31 additions & 0 deletions modules/hash_cache.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
import json
import os
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count

import args_manager
from modules.util import get_file_from_folder_list
from modules.util import sha256, HASH_SHA256_LENGTH

hash_cache_filename = 'hash_cache.txt'
Expand All @@ -10,7 +14,9 @@
def sha256_from_cache(filepath):
global hash_cache
if filepath not in hash_cache:
print(f"[Cache] Calculating sha256 for {filepath}")
hash_value = sha256(filepath)
print(f"[Cache] sha256 for {filepath}: {hash_value}")
hash_cache[filepath] = hash_value
save_cache_to_file(filepath, hash_value)

Expand Down Expand Up @@ -51,3 +57,28 @@ def save_cache_to_file(filename=None, hash_value=None):
fp.write('\n')
except Exception as e:
print(f'[Cache] Saving failed: {e}')


def init_cache(model_filenames, paths_checkpoints, lora_filenames, paths_loras):
load_cache_from_file()

if args_manager.args.rebuild_hash_cache:
max_workers = args_manager.args.rebuild_hash_cache if args_manager.args.rebuild_hash_cache > 0 else cpu_count()
rebuild_cache(lora_filenames, model_filenames, paths_checkpoints, paths_loras, max_workers)

# write cache to file again for sorting and cleanup of invalid cache entries
save_cache_to_file()


def rebuild_cache(lora_filenames, model_filenames, paths_checkpoints, paths_loras, max_workers=cpu_count()):
def thread(filename, paths):
filepath = get_file_from_folder_list(filename, paths)
sha256_from_cache(filepath)

print('[Cache] Rebuilding hash cache')
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for model_filename in model_filenames:
executor.submit(thread, model_filename, paths_checkpoints)
for lora_filename in lora_filenames:
executor.submit(thread, lora_filename, paths_loras)
print('[Cache] Done')
2 changes: 0 additions & 2 deletions modules/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,13 +176,11 @@ def generate_temp_filename(folder='./outputs/', extension='png'):


def sha256(filename, use_addnet_hash=False, length=HASH_SHA256_LENGTH):
print(f"Calculating sha256 for {filename}: ", end='')
if use_addnet_hash:
with open(filename, "rb") as file:
sha256_value = addnet_hash_safetensors(file)
else:
sha256_value = calculate_sha256(filename)
print(f"{sha256_value}")

return sha256_value[:length] if length is not None else sha256_value

Expand Down
6 changes: 6 additions & 0 deletions update_log.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
# [2.6.0-rc2](https://github.com/mashb1t/Fooocus/releases/tag/v2.6.0-rc2)

* Add hash generation multi-threading support, change `--rebuild-hash-cache` from bool to int (number of CPU cores)
* Fix inference tensor version counter tracking issue for GroundingDINO after using Enhance (see [discussion](https://github.com/lllyasviel/Fooocus/discussions/3213))


# [2.6.0-rc1](https://github.com/mashb1t/Fooocus/releases/tag/v2.6.0-rc1)

* Update default models to latest versions
Expand Down

0 comments on commit 376d69c

Please sign in to comment.