Skip to content

Commit eeb020b

Browse files
Better chroma radiance and other models vram estimation. (Comfy-Org#11278)
1 parent ae65433 commit eeb020b

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

comfy/supported_models.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -965,7 +965,7 @@ class CosmosT2IPredict2(supported_models_base.BASE):
965965

966966
def __init__(self, unet_config):
967967
super().__init__(unet_config)
968-
self.memory_usage_factor = (unet_config.get("model_channels", 2048) / 2048) * 0.9
968+
self.memory_usage_factor = (unet_config.get("model_channels", 2048) / 2048) * 0.95
969969

970970
def get_model(self, state_dict, prefix="", device=None):
971971
out = model_base.CosmosPredict2(self, device=device)
@@ -1289,7 +1289,7 @@ class ChromaRadiance(Chroma):
12891289
latent_format = comfy.latent_formats.ChromaRadiance
12901290

12911291
# Pixel-space model, no spatial compression for model input.
1292-
memory_usage_factor = 0.038
1292+
memory_usage_factor = 0.044
12931293

12941294
def get_model(self, state_dict, prefix="", device=None):
12951295
return model_base.ChromaRadiance(self, device=device)
@@ -1332,7 +1332,7 @@ class Omnigen2(supported_models_base.BASE):
13321332
"shift": 2.6,
13331333
}
13341334

1335-
memory_usage_factor = 1.65 #TODO
1335+
memory_usage_factor = 1.95 #TODO
13361336

13371337
unet_extra_config = {}
13381338
latent_format = latent_formats.Flux
@@ -1397,7 +1397,7 @@ class HunyuanImage21(HunyuanVideo):
13971397

13981398
latent_format = latent_formats.HunyuanImage21
13991399

1400-
memory_usage_factor = 7.7
1400+
memory_usage_factor = 8.7
14011401

14021402
supported_inference_dtypes = [torch.bfloat16, torch.float32]
14031403

@@ -1488,7 +1488,7 @@ class Kandinsky5(supported_models_base.BASE):
14881488
unet_extra_config = {}
14891489
latent_format = latent_formats.HunyuanVideo
14901490

1491-
memory_usage_factor = 1.1 #TODO
1491+
memory_usage_factor = 1.25 #TODO
14921492

14931493
supported_inference_dtypes = [torch.bfloat16, torch.float32]
14941494

@@ -1517,7 +1517,7 @@ class Kandinsky5Image(Kandinsky5):
15171517
}
15181518

15191519
latent_format = latent_formats.Flux
1520-
memory_usage_factor = 1.1 #TODO
1520+
memory_usage_factor = 1.25 #TODO
15211521

15221522
def get_model(self, state_dict, prefix="", device=None):
15231523
out = model_base.Kandinsky5Image(self, device=device)

0 commit comments

Comments
 (0)