Skip to content

Commit

Permalink
Triton flash followup (#556)
Browse files Browse the repository at this point in the history
* run on sm75

* remove op with Triton bwds for now
  • Loading branch information
dianaml0 authored Dec 6, 2022
1 parent f2f3424 commit 189828c
Showing 1 changed file with 1 addition and 3 deletions.
4 changes: 1 addition & 3 deletions xformers/ops/memory_efficient_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -775,8 +775,7 @@ def supports(cls, d: "AttentionOpDispatch") -> bool:
if not has_triton_flashattention:
return False
device_capability = torch.cuda.get_device_capability(d.device)
is_sm80 = device_capability[0] >= 8
if not is_sm80:
if not device_capability >= (7, 5):
return False
return super(TritonFlashAttentionOp, cls).supports(d)

Expand Down Expand Up @@ -1003,7 +1002,6 @@ def op(self) -> AttentionOp:
# TODO: remove once triton_faster_than_cutlass method complete
MemoryEfficientAttentionTritonFwdFlashBwOp,
MemoryEfficientAttentionCutlassOp,
TritonFlashAttentionOp,
MemoryEfficientAttentionOp,
]
if self.requires_grad and self._is_cutlass_fwd_faster_than_flash():
Expand Down

0 comments on commit 189828c

Please sign in to comment.