Skip to content

Commit

Permalink
Format and test changes
Browse files Browse the repository at this point in the history
Signed-off-by: Muralidhar Andoorveedu <[email protected]>
  • Loading branch information
andoorve committed May 2, 2024
1 parent 16a5aac commit 9d698fa
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 14 deletions.
3 changes: 2 additions & 1 deletion tests/core/test_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@

import pytest # noqa

from vllm.config import CacheConfig, LoRAConfig, ParallelConfig, SchedulerConfig
from vllm.config import (CacheConfig, LoRAConfig, ParallelConfig,
SchedulerConfig)
from vllm.core.interfaces import AllocStatus
from vllm.core.policy import PolicyFactory
from vllm.core.scheduler import Scheduler, SchedulingBudget
Expand Down
5 changes: 3 additions & 2 deletions vllm/engine/async_llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -513,8 +513,9 @@ async def run_engine_loop(self):
result = task.result()
virtual_engine = requests_in_progress.index(task)
if self.engine_use_ray:
has_unfinished_requests = await self.engine.has_unfinished_requests_for_virtual_engine.remote(
virtual_engine) # type: ignore
has_unfinished_requests = \
await self.engine.\
has_unfinished_requests_for_virtual_engine.remote(virtual_engine) # type: ignore
else:
has_unfinished_requests = self.engine.scheduler[
virtual_engine].has_unfinished_seqs()
Expand Down
4 changes: 3 additions & 1 deletion vllm/engine/llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,9 @@ def has_unfinished_requests(self) -> bool:

def has_unfinished_requests_for_virtual_engine(
self, virtual_engine: int) -> bool:
"""Returns True if there are unfinished requests for the virtual engine."""
"""
Returns True if there are unfinished requests for the virtual engine.
"""
return self.scheduler[virtual_engine].has_unfinished_seqs()

def _process_model_outputs(
Expand Down
12 changes: 7 additions & 5 deletions vllm/lora/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,13 @@
from transformers import PretrainedConfig

from vllm.config import LoRAConfig
from vllm.distributed import (
get_tensor_model_parallel_rank, get_tensor_model_parallel_src_rank,
get_tensor_model_parallel_world_size, split_tensor_along_last_dim,
tensor_model_parallel_all_gather, tensor_model_parallel_all_reduce,
tensor_model_parallel_gather)
from vllm.distributed import (get_tensor_model_parallel_rank,
get_tensor_model_parallel_src_rank,
get_tensor_model_parallel_world_size,
split_tensor_along_last_dim,
tensor_model_parallel_all_gather,
tensor_model_parallel_all_reduce,
tensor_model_parallel_gather)
from vllm.distributed.utils import divide
from vllm.lora.punica import add_lora, add_lora_slice, bgmv
from vllm.model_executor.layers.linear import (ColumnParallelLinear,
Expand Down
12 changes: 7 additions & 5 deletions vllm/model_executor/models/gpt2.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,13 @@
from transformers import GPT2Config

from vllm.attention import Attention, AttentionMetadata
from vllm.distributed import (
get_pipeline_model_parallel_rank, get_pipeline_model_parallel_world_size,
get_pp_indices, get_tensor_model_parallel_world_size,
is_pipeline_model_parallel_first_rank,
is_pipeline_model_parallel_last_rank, recv_prev_rank, send_next_rank)
from vllm.distributed import (get_pipeline_model_parallel_rank,
get_pipeline_model_parallel_world_size,
get_pp_indices,
get_tensor_model_parallel_world_size,
is_pipeline_model_parallel_first_rank,
is_pipeline_model_parallel_last_rank,
recv_prev_rank, send_next_rank)
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.linear import (ColumnParallelLinear,
QKVParallelLinear,
Expand Down

0 comments on commit 9d698fa

Please sign in to comment.