Skip to content

Commit 2a1f402

Browse files
Make Qwen 8B work with TextGenerate node. (Comfy-Org#13160)
1 parent 3eba2dc commit 2a1f402

2 files changed

Lines changed: 8 additions & 1 deletion

File tree

comfy/ops.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -928,6 +928,7 @@ def _load_from_state_dict(self, state_dict, prefix, local_metadata,
928928
weight = state_dict.pop(weight_key, None)
929929
if weight is None:
930930
logging.warning(f"Missing weight for layer {layer_name}")
931+
self.weight = None
931932
return
932933

933934
manually_loaded_keys = [weight_key]
@@ -1034,6 +1035,9 @@ def state_dict(self, *args, destination=None, prefix="", **kwargs):
10341035
if self.bias is not None:
10351036
sd["{}bias".format(prefix)] = self.bias
10361037

1038+
if self.weight is None:
1039+
return sd
1040+
10371041
if isinstance(self.weight, QuantizedTensor):
10381042
sd_out = self.weight.state_dict("{}weight".format(prefix))
10391043
for k in sd_out:

comfy/text_encoders/llama.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ class Qwen3_8BConfig:
224224
k_norm = "gemma3"
225225
rope_scale = None
226226
final_norm: bool = True
227-
lm_head: bool = False
227+
lm_head: bool = True
228228
stop_tokens = [151643, 151645]
229229

230230
@dataclass
@@ -912,6 +912,9 @@ def sample_token(self, logits, temperature, top_k, top_p, min_p, repetition_pena
912912
class BaseQwen3:
913913
def logits(self, x):
914914
input = x[:, -1:]
915+
if self.model.config.lm_head:
916+
return self.model.lm_head(input)
917+
915918
module = self.model.embed_tokens
916919

917920
offload_stream = None

0 commit comments

Comments
 (0)