Skip to content

Commit

Permalink
Support service-list for metrics collection in benchmark.py (#105)
Browse files Browse the repository at this point in the history
* Support service-list for metrics collection in benchmark.py

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
joshuayao and pre-commit-ci[bot] committed Sep 5, 2024
1 parent 02b60b5 commit 58502c5
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 10 deletions.
24 changes: 14 additions & 10 deletions evals/benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def extract_test_case_data(content):
}


def create_run_yaml_content(service_name, base_url, bench_target, concurrency, user_queries, test_suite_config):
def create_run_yaml_content(service, base_url, bench_target, concurrency, user_queries, test_suite_config):
"""Create content for the run.yaml file."""
return {
"profile": {
Expand All @@ -71,6 +71,7 @@ def create_run_yaml_content(service_name, base_url, bench_target, concurrency, u
"bench-target": bench_target,
"run-time": test_suite_config["run_time"],
"service-metric-collect": test_suite_config["collect_service_metric"],
"service-list": service.get("service_list", []),
"llm-model": test_suite_config["llm_model"],
"deployment-type": test_suite_config["deployment_type"],
},
Expand All @@ -79,10 +80,11 @@ def create_run_yaml_content(service_name, base_url, bench_target, concurrency, u
}


def create_and_save_run_yaml(example, deployment_type, service_type, service_name, base_url, test_suite_config, index):
def create_and_save_run_yaml(example, deployment_type, service_type, service, base_url, test_suite_config, index):
"""Create and save the run.yaml file for the service being tested."""
os.makedirs(test_suite_config["test_output_dir"], exist_ok=True)

service_name = service.get("service_name")
run_yaml_paths = []
for user_queries in test_suite_config["user_queries"]:
concurrency = max(1, user_queries // test_suite_config["concurrent_level"])
Expand All @@ -92,7 +94,7 @@ def create_and_save_run_yaml(example, deployment_type, service_type, service_nam
else:
bench_target = f"{service_type}{'bench' if test_suite_config['random_prompt'] else 'fixed'}"
run_yaml_content = create_run_yaml_content(
service_name, base_url, bench_target, concurrency, user_queries, test_suite_config
service, base_url, bench_target, concurrency, user_queries, test_suite_config
)

run_yaml_path = os.path.join(
Expand Down Expand Up @@ -135,7 +137,11 @@ def get_service_ip(service_name, deployment_type="k8s", service_ip=None, service
return svc_ip, port


def run_service_test(example, service_type, service_name, parameters, test_suite_config):
def run_service_test(example, service_type, service, test_suite_config):

# Get the service name
service_name = service.get("service_name")

# Get the deployment type from the test suite configuration
deployment_type = test_suite_config.get("deployment_type", "k8s")

Expand All @@ -154,7 +160,7 @@ def run_service_test(example, service_type, service_name, parameters, test_suite

# Create the run.yaml for the service
run_yaml_paths = create_and_save_run_yaml(
example, deployment_type, service_type, service_name, base_url, test_suite_config, timestamp
example, deployment_type, service_type, service, base_url, test_suite_config, timestamp
)

# Run the test using locust_runtests function
Expand All @@ -165,13 +171,11 @@ def run_service_test(example, service_type, service_name, parameters, test_suite
print(f"[OPEA BENCHMARK] 🚀 Test completed for {service_name} at {url}")


def process_service(example, service_name, case_data, test_suite_config):
service = case_data.get(service_name)
def process_service(example, service_type, case_data, test_suite_config):
service = case_data.get(service_type)
if service and service.get("run_test"):
print(f"[OPEA BENCHMARK] 🚀 Example: {example} Service: {service.get('service_name')}, Running test...")
run_service_test(
example, service_name, service.get("service_name"), service.get("parameters", {}), test_suite_config
)
run_service_test(example, service_type, service, test_suite_config)


if __name__ == "__main__":
Expand Down
4 changes: 4 additions & 0 deletions evals/benchmark/benchmark.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,10 @@ test_cases:
e2e:
run_test: true
service_name: "chatqna-backend-server-svc" # Replace with your service name
service_list: # Replace with your k8s service names for metrics collection,
# activate if deployment_type is k8s and collect_service_metric is true
- "chatqna-tei"
- "chatqna-teirerank"

codegen:
llm:
Expand Down

0 comments on commit 58502c5

Please sign in to comment.