Skip to content

Commit

Permalink
[Fix] Remove ring_flash_attention warning (#9119)
Browse files Browse the repository at this point in the history
* fix
  • Loading branch information
DrownFish19 committed Sep 13, 2024
1 parent 399490b commit 11252bc
Showing 1 changed file with 11 additions and 11 deletions.
22 changes: 11 additions & 11 deletions paddlenlp/transformers/ring_flash_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,6 @@
from paddle import _C_ops
from paddle.autograd.py_layer import PyLayer

try:
from paddlenlp_ops import flash_attn_bwd
except (ImportError, ModuleNotFoundError):
from paddlenlp.utils.log import logger

logger.warning(
"if you run ring_flash_attention.py, please ensure you install "
"the paddlenlp_ops by following the instructions "
"provided at https://github.com/PaddlePaddle/PaddleNLP/blob/develop/csrc/README.md"
)


class RingCommunicator:
def __init__(self, group, local_key, local_value):
Expand Down Expand Up @@ -233,6 +222,17 @@ def balanced_ring_flash_attention_bwd_func(
if attn_mask is not None:
attn_masks_list = paddle.split(attn_mask, num_or_sections=cp_size * 2, axis=3)

try:
from paddlenlp_ops import flash_attn_bwd
except (ImportError, ModuleNotFoundError):
from paddlenlp.utils.log import logger

logger.warning(
"if you run ring_flash_attention.py, please ensure you install "
"the paddlenlp_ops by following the instructions "
"provided at https://github.com/PaddlePaddle/PaddleNLP/blob/develop/csrc/README.md"
)

for step in range(cp_size):
block_k, block_v = kv_comm_buffer.get_buffers()

Expand Down

0 comments on commit 11252bc

Please sign in to comment.