Skip to content
This repository has been archived by the owner on Sep 18, 2024. It is now read-only.

fix: resolve CI tests #398

Merged
merged 6 commits into from
May 20, 2022
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ on: [push]

env:
HOST: ${{ secrets.FINETUNER_HOST }}
HUBBLE_STAGING_TOKEN: ${{ secrets.HUBBLE_STAGING_TOKEN }}
JINA_AUTH_TOKEN: ${{ secrets.JINA_AUTH_TOKEN }}

jobs:
check-codestyle:
Expand Down
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed

- Change logic behind artifact-id and return jsonified `dicts` instead of `requests.Response` objects. ([#390](https://github.com/jina-ai/finetuner/pull/390))

### Fixed

- resolve CI tests. ([#398](https://github.com/jina-ai/finetuner/pull/398))
jupyterjazz marked this conversation as resolved.
Show resolved Hide resolved
4 changes: 4 additions & 0 deletions finetuner/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ def fit(
learning_rate: float = 0.001,
epochs: int = 20,
batch_size: int = 8,
scheduler_step: str = 'batch',
freeze: bool = False,
output_dim: Optional[int] = None,
multi_modal: bool = False,
Expand All @@ -61,6 +62,8 @@ def fit(
:param learning_rate: learning rate for the optimizer.
:param epochs: Number of epochs for fine-tuning.
:param batch_size: Number of items to include in a batch.
:param scheduler_step: At which interval should the learning rate sheduler's
step function be called. Valid options are "batch" and "epoch".
:param freeze: If set to True, will freeze all layers except the last one.
:param output_dim: The expected output dimension.
If set, will attach a projection head.
Expand All @@ -84,6 +87,7 @@ def fit(
learning_rate=learning_rate,
epochs=epochs,
batch_size=batch_size,
scheduler_step=scheduler_step,
freeze=freeze,
output_dim=output_dim,
multi_modal=multi_modal,
Expand Down
2 changes: 1 addition & 1 deletion finetuner/client/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def list_runs(self, experiment_name: Optional[str] = None) -> List[dict]:
response = []
for experiment_name in target_experiments:
url = self._base_url / API_VERSION / EXPERIMENTS / experiment_name / RUNS
response.append(self._handle_request(url=url, method=GET))
response.extend(self._handle_request(url=url, method=GET))
return response

def delete_run(self, experiment_name: str, run_name: str) -> dict:
Expand Down
1 change: 1 addition & 0 deletions finetuner/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,4 @@
EXPERIMENT_NAME = 'experiment_name'
RUN_NAME = 'run_name'
OPTIMIZER_OPTIONS = 'optimizer_options'
SCHEDULER_STEP = 'scheduler_step'
6 changes: 6 additions & 0 deletions finetuner/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
OPTIMIZER_OPTIONS,
OUTPUT_DIM,
RUN_NAME,
SCHEDULER_STEP,
TEXT_MODALITY,
TRAIN_DATA,
)
Expand Down Expand Up @@ -61,6 +62,10 @@ def __init__(
def name(self) -> str:
return self._name

@property
def status(self) -> str:
return self._status

def get_run(self, name: str) -> Run:
"""Get a run by its name.

Expand Down Expand Up @@ -201,6 +206,7 @@ def _create_config_for_run(
BATCH_SIZE: kwargs.get(BATCH_SIZE),
LEARNING_RATE: kwargs.get(LEARNING_RATE),
EPOCHS: kwargs.get(EPOCHS),
SCHEDULER_STEP: kwargs.get(SCHEDULER_STEP),
},
EXPERIMENT_NAME: experiment_name,
RUN_NAME: run_name,
Expand Down
4 changes: 2 additions & 2 deletions finetuner/finetuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def list_runs(self, experiment_name: Optional[str] = None) -> List[Run]:
if not experiment_name:
experiments = self.list_experiments()
else:
experiments = [experiment_name]
experiments = [self.get_experiment(name=experiment_name)]
runs = []
for experiment in experiments:
runs.extend(experiment.list_runs())
Expand Down Expand Up @@ -182,6 +182,6 @@ def delete_runs(self, experiment_name: Optional[str] = None):
if not experiment_name:
experiments = self.list_experiments()
else:
experiments = [experiment_name]
experiments = [self.get_experiment(name=experiment_name)]
for experiment in experiments:
experiment.delete_runs()
8 changes: 3 additions & 5 deletions tests/integration/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@

import finetuner
import hubble
from finetuner.client import FinetunerV1Client


@pytest.fixture()
Expand All @@ -30,17 +29,16 @@ def generate_random_data(num_classes, images_per_class):


@pytest.fixture()
def test_client(mocker):
def finetuner_mocker(mocker):
def hubble_login_mocker():
print('Successfully logged in to Hubble!')

def get_auth_token():
return os.environ.get('HUBBLE_STAGING_TOKEN')
return os.environ.get('JINA_AUTH_TOKEN')

mocker.patch.object(hubble, 'login', hubble_login_mocker)
mocker.patch.object(hubble.Auth, 'get_auth_token', get_auth_token)

finetuner.login()
client = FinetunerV1Client()

return client
return finetuner.ft
40 changes: 20 additions & 20 deletions tests/integration/test_experiments.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,27 @@
def test_experiments(
test_client, first_exp_name='first experiment', second_exp_name='second experiment'
finetuner_mocker,
first_exp_name='first experiment',
second_exp_name='second experiment',
):
# delete experiments in case there are any
test_client.delete_experiments()
finetuner_mocker.delete_experiments()
# create an experiment and retrieve it
test_client.create_experiment(name=first_exp_name)
response = test_client.get_experiment(name=first_exp_name)
assert response['name'] == first_exp_name
assert response['status'] == 'ACTIVE'
finetuner_mocker.create_experiment(name=first_exp_name)
exp1 = finetuner_mocker.get_experiment(name=first_exp_name)
assert exp1.name == first_exp_name
assert exp1.status == 'ACTIVE'
# create another experiment and list all experiments
test_client.create_experiment(second_exp_name)
response = test_client.list_experiments()
assert len(response) == 2
assert (
response[0]['name'] == first_exp_name and response[1]['name'] == second_exp_name
)
assert response[0]['status'] == response[1]['status'] == 'ACTIVE'
finetuner_mocker.create_experiment(second_exp_name)
exps = finetuner_mocker.list_experiments()
assert len(exps) == 2
assert exps[0].name == first_exp_name and exps[1].name == second_exp_name
assert exps[0].status == exps[1].status == 'ACTIVE'
# delete the first experiment
test_client.delete_experiment(first_exp_name)
response = test_client.list_experiments()
assert len(response) == 1
assert response[0]['name'] == second_exp_name
finetuner_mocker.delete_experiment(first_exp_name)
exps = finetuner_mocker.list_experiments()
assert len(exps) == 1
assert exps[0].name == second_exp_name
# delete all experiments
test_client.delete_experiments()
response = test_client.list_experiments()
assert not response
finetuner_mocker.delete_experiments()
exps = finetuner_mocker.list_experiments()
assert not exps
89 changes: 45 additions & 44 deletions tests/integration/test_runs.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
def test_runs(
test_client,
finetuner_mocker,
get_image_data,
experiment_name='exp',
first_run='run1',
Expand All @@ -8,55 +8,54 @@ def test_runs(
# get preprocessed data
train_data, eval_data = get_image_data
# delete experiments if there are any
test_client.delete_experiments()
finetuner_mocker.delete_experiments()
# create an experiment and retrieve it
test_client.create_experiment(experiment_name)
response = test_client.get_experiment(name=experiment_name)
assert response['name'] == experiment_name
assert response['status'] == 'ACTIVE'
finetuner_mocker.create_experiment(experiment_name)
exp = finetuner_mocker.get_experiment(name=experiment_name)
assert exp.name == experiment_name
assert exp.status == 'ACTIVE'
# create a first run
test_client.create_run(
finetuner_mocker.create_run(
model='resnet50',
train_data=train_data,
eval_data=eval_data,
experiment_name=experiment_name,
run_name=first_run,
epochs=1,
)
# get the first run
response = test_client.get_run(experiment_name=experiment_name, run_name=first_run)
assert response['name'] == first_run
run = finetuner_mocker.get_run(experiment_name=experiment_name, run_name=first_run)
assert run.name == first_run
# create another run
test_client.create_run(
finetuner_mocker.create_run(
model='resnet50',
train_data=train_data,
eval_data=eval_data,
experiment_name=experiment_name,
run_name=second_run,
epochs=1,
)
# list all runs
response = test_client.list_runs(experiment_name=experiment_name)
assert len(response) == 1
exp_runs = response[0]
assert exp_runs[0]['name'] == first_run and exp_runs[1]['name'] == second_run
runs = finetuner_mocker.list_runs(experiment_name=experiment_name)
assert len(runs) == 2
assert runs[0].name == first_run and runs[1].name == second_run
# delete the first run
test_client.delete_run(experiment_name=experiment_name, run_name=first_run)
response = test_client.list_runs(experiment_name=experiment_name)
exp_runs = response[0]
assert len(exp_runs) == 1
assert exp_runs[0]['name'] == second_run
finetuner_mocker.delete_run(experiment_name=experiment_name, run_name=first_run)
runs = finetuner_mocker.list_runs(experiment_name=experiment_name)
assert len(runs) == 1
assert runs[0].name == second_run
# delete all existing runs
test_client.delete_runs(experiment_name=experiment_name)
response = test_client.list_runs(experiment_name=experiment_name)
exp_runs = response[0]
assert not exp_runs
finetuner_mocker.delete_runs(experiment_name=experiment_name)
runs = finetuner_mocker.list_runs(experiment_name=experiment_name)
assert not runs
# delete experiment
test_client.delete_experiments()
response = test_client.list_experiments()
assert not response
finetuner_mocker.delete_experiments()
exps = finetuner_mocker.list_experiments()
assert not exps


def test_list_runs(
test_client,
finetuner_mocker,
get_image_data,
first_exp='exp1',
second_exp='exp2',
Expand All @@ -66,38 +65,40 @@ def test_list_runs(
# get preprocessed data
train_data, eval_data = get_image_data
# delete experiments if there are any
test_client.delete_experiments()
finetuner_mocker.delete_experiments()
# create two experiments and list them
test_client.create_experiment(name=first_exp)
test_client.create_experiment(name=second_exp)
response = test_client.list_experiments()
assert len(response) == 2
assert response[0]['name'] == first_exp and response[1]['name'] == second_exp
finetuner_mocker.create_experiment(name=first_exp)
finetuner_mocker.create_experiment(name=second_exp)
exps = finetuner_mocker.list_experiments()
assert len(exps) == 2
assert exps[0].name == first_exp and exps[1].name == second_exp
# create a run for each experiment
test_client.create_run(
finetuner_mocker.create_run(
model='resnet50',
train_data=train_data,
eval_data=eval_data,
experiment_name=first_exp,
run_name=first_run,
epochs=1,
)
test_client.create_run(
finetuner_mocker.create_run(
model='resnet50',
train_data=train_data,
eval_data=eval_data,
experiment_name=second_exp,
run_name=second_run,
epochs=1,
)
# list all runs without specifying a target experiment
# which should list all runs across all existing experiments
response = test_client.list_runs()
assert len(response) == 2
assert response[0][0]['name'] == first_run and response[1][0]['name'] == second_run
runs = finetuner_mocker.list_runs()
assert len(runs) == 2
assert runs[0].name == first_run and runs[1].name == second_run
# list all runs of only first experiment
response = test_client.list_runs(experiment_name=first_exp)
assert len(response) == 1
assert response[0][0]['name'] == first_run
runs = finetuner_mocker.list_runs(experiment_name=first_exp)
assert len(runs) == 1
assert runs[0].name == first_run
# delete experiments
test_client.delete_experiments()
response = test_client.list_experiments()
assert not response
finetuner_mocker.delete_experiments()
exps = finetuner_mocker.list_experiments()
assert not exps
2 changes: 1 addition & 1 deletion tests/unit/mocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def hubble_login_mocker():
print('Successfully logged in to Hubble!')

def get_auth_token():
return os.environ.get('HUBBLE_STAGING_TOKEN')
return os.environ.get('JINA_AUTH_TOKEN')

mocker.patch.object(hubble, 'login', hubble_login_mocker)
mocker.patch.object(hubble.Auth, 'get_auth_token', get_auth_token)
Expand Down
11 changes: 0 additions & 11 deletions tests/unit/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,17 +86,6 @@ def test_get_run(client_mocker, experiment_name='exp', run_name='run1'):
assert sent_request['method'] == GET


def test_list_runs(client_mocker, experiment_name='exp'):
# Note: we'll test the case when experiment_name
# is not specified in integration tests
sent_request = client_mocker.list_runs(experiment_name=experiment_name)[0]
assert (
sent_request['url']
== client_mocker._base_url / API_VERSION / EXPERIMENTS / experiment_name / RUNS
)
assert sent_request['method'] == GET


def test_delete_run(client_mocker, experiment_name='exp', run_name='run1'):
sent_request = client_mocker.delete_run(
experiment_name=experiment_name, run_name=run_name
Expand Down
3 changes: 3 additions & 0 deletions tests/unit/test_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
OPTIMIZER_OPTIONS,
OUTPUT_DIM,
RUN_NAME,
SCHEDULER_STEP,
STARTED,
TEXT_MODALITY,
TRAIN_DATA,
Expand Down Expand Up @@ -104,6 +105,7 @@ def test_create_run_config():
BATCH_SIZE: 8,
LEARNING_RATE: 0.001,
EPOCHS: 20,
SCHEDULER_STEP: 'batch',
},
EXPERIMENT_NAME: 'exp name',
RUN_NAME: 'run name',
Expand All @@ -121,6 +123,7 @@ def test_create_run_config():
learning_rate=0.001,
epochs=20,
batch_size=8,
scheduler_step='batch',
freeze=False,
output_dim=None,
multi_modal=False,
Expand Down