Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Transformer.predict: do not broadcast to listeners #345

Merged
merged 6 commits into from
Jan 30, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ jobs:
condition: and(startsWith(variables['imageName'], 'ubuntu'), eq(variables['python.version'], '3.9'))

- script: |
pip install https://github.com/explosion/spacy-models/releases/download/en_core_web_trf-3.4.0/en_core_web_trf-3.4.0-py3-none-any.whl --no-deps
pip install https://github.com/explosion/spacy-models/releases/download/en_core_web_trf-3.5.0/en_core_web_trf-3.5.0-py3-none-any.whl --no-deps
python -c "import spacy; nlp = spacy.load('en_core_web_trf'); doc = nlp('test')"
displayName: 'Test backwards compatibility for v1.1 models'
condition: and(startsWith(variables['imageName'], 'ubuntu'), eq(variables['python.version'], '3.9'))
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
spacy>=3.4.0,<4.0.0
spacy>=3.5.0,<4.0.0
numpy>=1.15.0
transformers>=3.4.0,<4.26.0
torch>=1.6.0
Expand Down
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ zip_safe = false
include_package_data = true
python_requires = >=3.6
install_requires =
spacy>=3.4.0,<4.0.0
spacy>=3.5.0,<4.0.0
numpy>=1.15.0
transformers>=3.4.0,<4.26.0
torch>=1.6.0
Expand Down
26 changes: 20 additions & 6 deletions spacy_transformers/layers/listener.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from typing import Optional, Callable, List
from thinc.api import Model
from spacy.errors import Errors
from spacy.tokens import Doc
from ..data_classes import TransformerData

Expand Down Expand Up @@ -58,16 +59,29 @@ def verify_inputs(self, inputs):

def forward(model: TransformerListener, docs, is_train):
if is_train:
model.verify_inputs(docs)
return model._outputs, model.backprop_and_clear
# This might occur during training when the transformer layer is frozen / hasn't been updated.
# In that case, it should be set to "annotating" so we can retrieve the embeddings from the doc.
if model._batch_id is None:
outputs = []
for doc in docs:
if doc._.trf_data is None:
raise ValueError(Errors.E203.format(name="transformer"))
else:
outputs.append(doc._.trf_data)
return outputs, _empty_backprop
else:
model.verify_inputs(docs)
return model._outputs, model.backprop_and_clear
else:
width = model.get_dim("nO")
outputs = []
for doc in docs:
if doc._.trf_data is None:
outputs.append(
TransformerData.zeros(len(doc), width, xp=model.ops.xp)
)
outputs.append(TransformerData.zeros(len(doc), width, xp=model.ops.xp))
else:
outputs.append(doc._.trf_data)
return outputs, lambda d_data: []
return outputs, _empty_backprop


def _empty_backprop(dX):
return []
3 changes: 0 additions & 3 deletions spacy_transformers/pipeline_component.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,9 +227,6 @@ def predict(self, docs: Iterable[Doc]) -> FullTransformerBatch:
activations = FullTransformerBatch.empty(len(docs))
else:
activations = self.model.predict(docs)
batch_id = TransformerListener.get_batch_id(docs)
for listener in self.listeners:
listener.receive(batch_id, activations.doc_data, None)
return activations

def set_annotations(
Expand Down
26 changes: 25 additions & 1 deletion spacy_transformers/tests/test_pipeline_component.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,9 +459,33 @@ def test_frozen_listener():
# train further with frozen listener
for i in range(2):
losses = {}
nlp.update(examples, sgd=optimizer, losses=losses, exclude=["transformer"])
nlp.update(
examples,
sgd=optimizer,
losses=losses,
exclude=["transformer"],
annotates=["transformer"],
)
doc = nlp(text)

# only tagger was updated
assert nlp.get_pipe("transformer").to_bytes() == transformer_bytes
assert nlp.get_pipe("tagger").to_bytes() != tagger_bytes


def test_no_update_listener_in_predict():
orig_config = Config().from_str(cfg_string)
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
listener = nlp.get_pipe("tagger").model.get_ref("tok2vec").get_ref("listener")
transformer = nlp.get_pipe("transformer")

text = "This is awesome"
examples = [Example.from_dict(nlp.make_doc(text), {"tags": ["A", "B", "C"]})]
docs = [eg.predicted for eg in examples]
nlp.initialize(lambda: examples)

transformer.update(examples)
assert listener._backprop is not None

transformer.predict(docs)
assert listener._backprop is not None