Skip to content

Commit

Permalink
remove accidently added prompt tuning from gpt2 and make style
Browse files Browse the repository at this point in the history
  • Loading branch information
TimoImhof committed Nov 7, 2024
1 parent 1dbd412 commit cf4f6a7
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 13 deletions.
7 changes: 4 additions & 3 deletions tests/test_impl/core/test_adapter_hub.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os
from pathlib import Path
import unittest
from pathlib import Path

import numpy as np

Expand All @@ -21,8 +21,9 @@


current_file_path = os.path.abspath(__file__)
fixtures_dir = Path(current_file_path).parent.parent.parent / 'fixtures'
SAMPLE_INDEX = str(fixtures_dir / 'hub-index.sample.json')
fixtures_dir = Path(current_file_path).parent.parent.parent / "fixtures"
SAMPLE_INDEX = str(fixtures_dir / "hub-index.sample.json")


@require_torch
class AdapterHubTest(unittest.TestCase):
Expand Down
1 change: 1 addition & 0 deletions tests/test_impl/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from adapters import ADAPTER_MODEL_MAPPING, init
from transformers.testing_utils import torch_device


global_rng = random.Random()


Expand Down
10 changes: 0 additions & 10 deletions tests/test_methods/test_gpt2.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,16 +80,6 @@ class PrefixTuning(
pass


@require_torch
@pytest.mark.prompt_tuning
class PromptTuning(
GPT2AdapterTestBase,
PromptTuningTestMixin,
unittest.TestCase,
):
pass


@require_torch
@pytest.mark.reft
class ReFT(
Expand Down

0 comments on commit cf4f6a7

Please sign in to comment.