Skip to content

Commit 3814bf4

Browse files
authored
Enable Pytorch Attention for gfx1150 (Comfy-Org#12973)
1 parent d062bec commit 3814bf4

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

comfy/model_management.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -400,7 +400,7 @@ def aotriton_supported(gpu_arch):
400400
if args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
401401
if aotriton_supported(arch): # AMD efficient attention implementation depends on aotriton.
402402
if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much
403-
if any((a in arch) for a in ["gfx90a", "gfx942", "gfx950", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx950
403+
if any((a in arch) for a in ["gfx90a", "gfx942", "gfx950", "gfx1100", "gfx1101", "gfx1150", "gfx1151"]): # TODO: more arches, TODO: gfx950
404404
ENABLE_PYTORCH_ATTENTION = True
405405
if rocm_version >= (7, 0):
406406
if any((a in arch) for a in ["gfx1200", "gfx1201"]):

0 commit comments

Comments
 (0)