Skip to content
This repository has been archived by the owner on Dec 16, 2022. It is now read-only.

Commit

Permalink
Remove dependency on the overrides package (#311)
Browse files Browse the repository at this point in the history
* Removes dependency on overrides

* Changelog

* Fixes tests

* Remove obsolete parameter
  • Loading branch information
dirkgr committed Nov 30, 2021
1 parent 05dd4e6 commit 1591777
Show file tree
Hide file tree
Showing 106 changed files with 107 additions and 413 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

- Following a breaking change in the NLTK API, we now depend on the most recent version only.

### Removed

- Removed the dependency on the `overrides` package


## [v2.8.0](https://github.com/allenai/allennlp-models/releases/tag/v2.8.0) - 2021-11-05

Expand Down
4 changes: 1 addition & 3 deletions allennlp_models/classification/dataset_readers/boolq.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from typing import Optional, Iterable, Dict

from allennlp.common.file_utils import cached_path
from overrides import overrides

from allennlp.data import DatasetReader, Tokenizer, TokenIndexer, Instance, Field
from allennlp.data.tokenizers import WhitespaceTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer
Expand Down Expand Up @@ -38,7 +38,6 @@ def __init__(
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}

@overrides
def _read(self, file_path) -> Iterable[Instance]:
file_path = cached_path(file_path, extract_archive=True)
with open(file_path) as f:
Expand All @@ -51,7 +50,6 @@ def _read(self, file_path) -> Iterable[Instance]:
label=record.get("label"),
)

@overrides
def text_to_instance( # type: ignore
self, passage: str, question: str, label: Optional[bool] = None
) -> Instance:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import logging

from allennlp.data import Tokenizer
from overrides import overrides

from nltk.tree import Tree

from allennlp.common.file_utils import cached_path
Expand Down Expand Up @@ -72,7 +72,6 @@ def __init__(
)
self._granularity = granularity

@overrides
def _read(self, file_path):
with open(cached_path(file_path), "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
Expand All @@ -91,7 +90,6 @@ def _read(self, file_path):
if instance is not None:
yield instance

@overrides
def text_to_instance(self, tokens: List[str], sentiment: str = None) -> Optional[Instance]:
"""
We take `pre-tokenized` input here, because we might not have a tokenizer in this class.
Expand Down Expand Up @@ -154,6 +152,5 @@ def make_token(t: Union[str, Token]):
fields["label"] = LabelField(sentiment)
return Instance(fields)

@overrides
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["tokens"].token_indexers = self._token_indexers
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from typing import Dict, Union

import numpy
from overrides import overrides

import torch
from torch import nn
import torch.nn.functional as F
Expand Down Expand Up @@ -201,7 +201,6 @@ def __init__(
self.loss = torch.nn.CrossEntropyLoss()
initializer(self)

@overrides
def forward(
self, # type: ignore
tokens: TextFieldTensors,
Expand Down Expand Up @@ -319,7 +318,6 @@ def forward(

return output_dict

@overrides
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
Expand All @@ -333,7 +331,6 @@ def make_output_human_readable(
output_dict["label"] = labels
return output_dict

@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()
Expand Down
3 changes: 0 additions & 3 deletions allennlp_models/coref/dataset_readers/conll.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import collections
from typing import Dict, List, Optional, Tuple, DefaultDict

from overrides import overrides

from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
Expand Down Expand Up @@ -69,7 +68,6 @@ def __init__(
self._max_sentences = max_sentences
self._remove_singleton_clusters = remove_singleton_clusters

@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
Expand All @@ -90,7 +88,6 @@ def _read(self, file_path: str):

yield self.text_to_instance([s.words for s in sentences], list(clusters.values()))

@overrides
def text_to_instance(
self, # type: ignore
sentences: List[List[str]],
Expand Down
3 changes: 0 additions & 3 deletions allennlp_models/coref/dataset_readers/preco.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import logging
from typing import Dict, List, Optional, Tuple

from overrides import overrides

from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
Expand Down Expand Up @@ -65,7 +64,6 @@ def __init__(
self._max_sentences = max_sentences
self._remove_singleton_clusters = remove_singleton_clusters

@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
Expand All @@ -75,7 +73,6 @@ def _read(self, file_path: str):
example = json.loads(line)
yield self.text_to_instance(example["sentences"], example["mention_clusters"])

@overrides
def text_to_instance(
self, # type: ignore
sentences: List[List[str]],
Expand Down
3 changes: 0 additions & 3 deletions allennlp_models/coref/dataset_readers/winobias.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import collections
from typing import Any, Dict, List, Optional, Tuple, DefaultDict

from overrides import overrides

from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
Expand Down Expand Up @@ -67,7 +66,6 @@ def __init__(
self._max_span_width = max_span_width
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}

@overrides
def _read(self, file_path: str):

for sentence in open(cached_path(file_path), "r"):
Expand Down Expand Up @@ -104,7 +102,6 @@ def _read(self, file_path: str):

yield self.text_to_instance([Token(x) for x in words], [x for x in clusters.values()])

@overrides
def text_to_instance(
self, # type: ignore
sentence: List[Token],
Expand Down
5 changes: 1 addition & 4 deletions allennlp_models/coref/metrics/conll_coref_scores.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from typing import Any, Dict, List, Tuple
from collections import Counter

from overrides import overrides

from scipy.optimize import linear_sum_assignment
import numpy as np
import torch
Expand All @@ -20,7 +20,6 @@ def __init__(self, allow_singletons=False) -> None:
self.scorers = [Scorer(m) for m in (Scorer.muc, Scorer.b_cubed, Scorer.ceafe)]
self.allow_singletons = allow_singletons

@overrides
def __call__(
self, # type: ignore
top_spans: torch.Tensor,
Expand Down Expand Up @@ -64,7 +63,6 @@ def __call__(
predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold
)

@overrides
def get_metric(self, reset: bool = False) -> Tuple[float, float, float]:
metrics = (lambda e: e.get_precision(), lambda e: e.get_recall(), lambda e: e.get_f1())
precision, recall, f1_score = tuple(
Expand All @@ -74,7 +72,6 @@ def get_metric(self, reset: bool = False) -> Tuple[float, float, float]:
self.reset()
return precision, recall, f1_score

@overrides
def reset(self):
self.scorers = [Scorer(metric) for metric in (Scorer.muc, Scorer.b_cubed, Scorer.ceafe)]

Expand Down
5 changes: 1 addition & 4 deletions allennlp_models/coref/metrics/mention_recall.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from typing import Any, Dict, List, Set, Tuple
from overrides import overrides


import torch
from allennlp.nn.util import dist_reduce_sum
Expand All @@ -13,7 +13,6 @@ def __init__(self) -> None:
self._num_gold_mentions = 0
self._num_recalled_mentions = 0

@overrides
def __call__(
self, # type: ignore
batched_top_spans: torch.Tensor,
Expand All @@ -34,7 +33,6 @@ def __call__(
self._num_gold_mentions += dist_reduce_sum(num_gold_mentions)
self._num_recalled_mentions += dist_reduce_sum(num_recalled_mentions)

@overrides
def get_metric(self, reset: bool = False) -> float:
if self._num_gold_mentions == 0:
recall = 0.0
Expand All @@ -44,7 +42,6 @@ def get_metric(self, reset: bool = False) -> float:
self.reset()
return recall

@overrides
def reset(self):
self._num_gold_mentions = 0
self._num_recalled_mentions = 0
5 changes: 1 addition & 4 deletions allennlp_models/coref/models/coref.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import torch
import torch.nn.functional as F
from overrides import overrides


from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
Expand Down Expand Up @@ -133,7 +133,6 @@ def __init__(
self._lexical_dropout = lambda x: x
initializer(self)

@overrides
def forward(
self, # type: ignore
text: TextFieldTensors,
Expand Down Expand Up @@ -382,7 +381,6 @@ def forward(
output_dict["document"] = [x["original_text"] for x in metadata]
return output_dict

@overrides
def make_output_human_readable(self, output_dict: Dict[str, torch.Tensor]):
"""
Converts the list of spans and predicted antecedent indices into clusters
Expand Down Expand Up @@ -465,7 +463,6 @@ def make_output_human_readable(self, output_dict: Dict[str, torch.Tensor]):
output_dict["clusters"] = batch_clusters
return output_dict

@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
mention_recall = self._mention_recall.get_metric(reset)
coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset)
Expand Down
4 changes: 1 addition & 3 deletions allennlp_models/coref/predictors/coref.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from typing import List, Dict

from overrides import overrides

from spacy.tokens import Doc
import numpy

Expand Down Expand Up @@ -79,7 +79,6 @@ def predict_tokenized(self, tokenized_document: List[str]) -> JsonDict:
instance = self._words_list_to_instance(tokenized_document)
return self.predict_instance(instance)

@overrides
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
Expand Down Expand Up @@ -193,7 +192,6 @@ def _words_list_to_instance(self, words: List[str]) -> Instance:
instance = self._dataset_reader.text_to_instance(sentences)
return instance

@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"document": "string of document text"}`
Expand Down
4 changes: 0 additions & 4 deletions allennlp_models/generation/dataset_readers/cnn_dm.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import hashlib
import ftfy

from overrides import overrides

from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
Expand Down Expand Up @@ -120,7 +119,6 @@ def _read_story(story_path: str):
def _strip_extension(filename: str) -> str:
return os.path.splitext(filename)[0]

@overrides
def _read(self, file_path: str):
# Reset exceeded counts
self._source_max_exceeded = 0
Expand Down Expand Up @@ -157,7 +155,6 @@ def _read(self, file_path: str):

yield self.text_to_instance(article, summary)

@overrides
def text_to_instance(
self, source_sequence: str, target_sequence: str = None
) -> Instance: # type: ignore
Expand All @@ -183,7 +180,6 @@ def text_to_instance(
else:
return Instance({"source_tokens": source_field})

@overrides
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["source_tokens"]._token_indexers = self._source_token_indexers # type: ignore
if "target_tokens" in instance.fields:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import warnings

import torch
from overrides import overrides


from allennlp.common.file_utils import cached_path
from allennlp.common.util import START_SYMBOL, END_SYMBOL
Expand Down Expand Up @@ -118,7 +118,6 @@ def __init__(
UserWarning,
)

@overrides
def _read(self, file_path):
with open(cached_path(file_path), "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
Expand All @@ -144,7 +143,6 @@ def _tokens_to_ids(tokens: List[Token]) -> List[int]:
out.append(ids.setdefault(token.text, len(ids)))
return out

@overrides
def text_to_instance(
self,
source_string: str,
Expand Down Expand Up @@ -207,7 +205,6 @@ def text_to_instance(

return Instance(fields_dict)

@overrides
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["source_tokens"]._token_indexers = self._source_token_indexers # type: ignore
if "target_tokens" in instance.fields:
Expand Down
4 changes: 0 additions & 4 deletions allennlp_models/generation/dataset_readers/seq2seq.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import logging
import copy

from overrides import overrides

from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
Expand Down Expand Up @@ -119,7 +118,6 @@ def __init__(
self._target_max_exceeded = 0
self.quoting = quoting

@overrides
def _read(self, file_path: str):
# Reset exceeded counts
self._source_max_exceeded = 0
Expand Down Expand Up @@ -150,7 +148,6 @@ def _read(self, file_path: str):
self._target_max_tokens,
)

@overrides
def text_to_instance(
self, source_string: str, target_string: str = None
) -> Instance: # type: ignore
Expand All @@ -177,7 +174,6 @@ def text_to_instance(
else:
return Instance({"source_tokens": source_field})

@overrides
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["source_tokens"]._token_indexers = self._source_token_indexers # type: ignore
if "target_tokens" in instance.fields:
Expand Down
Loading

0 comments on commit 1591777

Please sign in to comment.