Skip to content

Commit dfbf99a

Browse files
authored
model_mangament: make dynamic --disable-smart-memory work (Comfy-Org#12724)
This was previously considering the pool of dynamic models as one giant entity for the sake of smart memory, but that isnt really the useful or what a user would reasonably expect. Make Dynamic VRAM properly purge its models just like the old --disable-smart-memory but conditioning the dynamic-for-dynamic bypass on smart memory. Re-enable dynamic smart memory.
1 parent 602f6bd commit dfbf99a

2 files changed

Lines changed: 6 additions & 7 deletions

File tree

comfy/cli_args.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -260,4 +260,4 @@ def is_valid_directory(path: str) -> str:
260260
args.fast = set(args.fast)
261261

262262
def enables_dynamic_vram():
263-
return not args.disable_dynamic_vram and not args.highvram and not args.gpu_only and not args.novram and not args.cpu and not args.disable_smart_memory
263+
return not args.disable_dynamic_vram and not args.highvram and not args.gpu_only and not args.novram and not args.cpu

comfy/model_management.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -639,12 +639,11 @@ def free_memory(memory_required, device, keep_loaded=[], for_dynamic=False, ram_
639639
if not DISABLE_SMART_MEMORY:
640640
memory_to_free = memory_required - get_free_memory(device)
641641
ram_to_free = ram_required - get_free_ram()
642-
643-
if current_loaded_models[i].model.is_dynamic() and for_dynamic:
644-
#don't actually unload dynamic models for the sake of other dynamic models
645-
#as that works on-demand.
646-
memory_required -= current_loaded_models[i].model.loaded_size()
647-
memory_to_free = 0
642+
if current_loaded_models[i].model.is_dynamic() and for_dynamic:
643+
#don't actually unload dynamic models for the sake of other dynamic models
644+
#as that works on-demand.
645+
memory_required -= current_loaded_models[i].model.loaded_size()
646+
memory_to_free = 0
648647
if memory_to_free > 0 and current_loaded_models[i].model_unload(memory_to_free):
649648
logging.debug(f"Unloading {current_loaded_models[i].model.model.__class__.__name__}")
650649
unloaded_model.append(i)

0 commit comments

Comments
 (0)