Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update cli autoresolve #240

Merged
merged 2 commits into from
May 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions libs/infinity_emb/infinity_emb/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,16 +45,16 @@ class EngineArgs:
batch_size: int = MANAGER.batch_size[0]
revision: Optional[str] = MANAGER.revision[0]
trust_remote_code: bool = MANAGER.trust_remote_code[0]
engine: InferenceEngine = InferenceEngine.torch
engine: InferenceEngine = MANAGER.engine[0]
model_warmup: bool = MANAGER.model_warmup[0]
vector_disk_cache_path: str = ""
device: Device = Device.auto
device: Device = MANAGER.device[0]
compile: bool = MANAGER.compile[0]
bettertransformer: bool = MANAGER.bettertransformer[0]
dtype: Dtype = Dtype.auto
pooling_method: PoolingMethod = PoolingMethod.auto
dtype: Dtype = MANAGER.dtype[0]
pooling_method: PoolingMethod = MANAGER.pooling_method[0]
lengths_via_tokenize: bool = MANAGER.lengths_via_tokenize[0]
embedding_dtype: EmbeddingDtype = EmbeddingDtype.float32
embedding_dtype: EmbeddingDtype = MANAGER.embedding_dtype[0]
served_model_name: str = MANAGER.served_model_name[0]

def __post_init__(self):
Expand Down
16 changes: 8 additions & 8 deletions libs/infinity_emb/infinity_emb/infinity_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,13 +361,13 @@ def v1(
revision: str = MANAGER.revision[0],
trust_remote_code: bool = MANAGER.trust_remote_code[0],
redirect_slash: str = MANAGER.redirect_slash,
engine: InferenceEngine = InferenceEngine.default_value(), # type: ignore # noqa
engine: InferenceEngine = MANAGER.engine[0], # type: ignore # noqa
model_warmup: bool = MANAGER.model_warmup[0],
vector_disk_cache: bool = MANAGER.vector_disk_cache[0],
device: Device = Device.default_value(), # type: ignore
device: Device = MANAGER.device[0], # type: ignore
lengths_via_tokenize: bool = MANAGER.lengths_via_tokenize[0],
dtype: Dtype = Dtype.default_value(), # type: ignore
pooling_method: PoolingMethod = PoolingMethod.default_value(), # type: ignore
dtype: Dtype = MANAGER.dtype[0], # type: ignore
pooling_method: PoolingMethod = MANAGER.pooling_method[0], # type: ignore
compile: bool = MANAGER.compile[0],
bettertransformer: bool = MANAGER.bettertransformer[0],
preload_only: bool = MANAGER.preload_only,
Expand Down Expand Up @@ -449,13 +449,13 @@ def v2(
batch_size: list[int] = MANAGER.batch_size,
revision: list[str] = MANAGER.revision,
trust_remote_code: list[bool] = MANAGER.trust_remote_code,
engine: list[InferenceEngine] = [InferenceEngine.default_value()], # type: ignore # noqa
engine: list[InferenceEngine] = MANAGER.engine, # type: ignore # noqa
model_warmup: list[bool] = MANAGER.model_warmup,
vector_disk_cache: list[bool] = MANAGER.vector_disk_cache,
device: list[Device] = [Device.default_value()], # type: ignore
device: list[Device] = MANAGER.device, # type: ignore
lengths_via_tokenize: list[bool] = MANAGER.lengths_via_tokenize,
dtype: list[Dtype] = [Dtype.default_value()], # type: ignore
pooling_method: list[PoolingMethod] = [PoolingMethod.default_value()], # type: ignore
dtype: list[Dtype] = MANAGER.dtype, # type: ignore
pooling_method: list[PoolingMethod] = MANAGER.pooling_method, # type: ignore
compile: list[bool] = MANAGER.compile,
bettertransformer: list[bool] = MANAGER.bettertransformer,
# arguments for uvicorn / server
Expand Down
Loading