-
Notifications
You must be signed in to change notification settings - Fork 2.2k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
PiperOrigin-RevId: 464639642
- Loading branch information
Showing
6 changed files
with
70 additions
and
6 deletions.
There are no files selected for viewing
2 changes: 1 addition & 1 deletion
2
tensorflow_serving/servables/tensorflow/testdata/tf_text_regression/01/keras_metadata.pb
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,2 @@ | ||
|
||
�root"_tf_keras_model*�{"name": "tf_text_ops", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "must_restore_from_config": false, "class_name": "TfTextOps", "config": {"layer was saved without config": true}, "is_graph_network": false, "full_save_spec": {"class_name": "__tuple__", "items": [[{"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [1]}, "float32", "input_1"]}], {}]}, "save_spec": {"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [1]}, "float32", "input_1"]}, "keras_version": "2.6.0", "backend": "tensorflow", "model_config": {"class_name": "TfTextOps"}}2 | ||
�root"_tf_keras_model*�{"name": "tf_text_ops", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "must_restore_from_config": false, "class_name": "TfTextOps", "config": {"layer was saved without config": true}, "is_graph_network": false, "full_save_spec": {"class_name": "__tuple__", "items": [[{"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [1]}, "float32", "input_1"]}], {}]}, "save_spec": {"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [1]}, "float32", "input_1"]}, "keras_version": "2.7.0", "backend": "tensorflow", "model_config": {"class_name": "TfTextOps"}}2 |
Binary file modified
BIN
-460 KB
(91%)
tensorflow_serving/servables/tensorflow/testdata/tf_text_regression/01/saved_model.pb
Binary file not shown.
Binary file modified
BIN
+0 Bytes
(100%)
...rvables/tensorflow/testdata/tf_text_regression/01/variables/variables.data-00000-of-00001
Binary file not shown.
Binary file modified
BIN
+0 Bytes
(100%)
...low_serving/servables/tensorflow/testdata/tf_text_regression/01/variables/variables.index
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,14 +1,78 @@ | ||
diff --git a/tensorflow_text/core/kernels/fast_wordpiece_tokenizer.cc b/tensorflow_text/core/kernels/fast_wordpiece_tokenizer.cc | ||
index c1f0e4e..a92ecd8 100644 | ||
--- a/tensorflow_text/core/kernels/fast_wordpiece_tokenizer.cc | ||
+++ b/tensorflow_text/core/kernels/fast_wordpiece_tokenizer.cc | ||
@@ -32,7 +32,7 @@ namespace { | ||
template <bool kGetPieces> | ||
int GetCurrentOutputSize(std::vector<std::string>* output_pieces, | ||
std::vector<int>* output_ids) { | ||
- if constexpr (kGetPieces) { | ||
+ if (kGetPieces) { | ||
return output_pieces->size(); | ||
} else { | ||
return output_ids->size(); | ||
@@ -538,10 +538,10 @@ void FastWordpieceTokenizer::AppendTokenToOutput( | ||
std::vector<int>* output_end_offsets) const { | ||
auto token_id = | ||
fast_wordpiece_tokenizer_utils::GetTokenId(encoded_token_value); | ||
- if constexpr (kGetIds) { | ||
+ if (kGetIds) { | ||
output_ids->push_back(token_id); | ||
} | ||
- if constexpr (kGetPieces || kGetOffsets) { | ||
+ if (kGetPieces || kGetOffsets) { | ||
// For suffix tokens, the length below is without the suffix indicator. | ||
int token_substr_length = | ||
fast_wordpiece_tokenizer_utils::GetTokenLength(encoded_token_value); | ||
@@ -553,7 +553,7 @@ void FastWordpieceTokenizer::AppendTokenToOutput( | ||
// to adjust and add the length of the suffix indicator string. | ||
token_substr_length += config_->suffix_indicator()->size(); | ||
} | ||
- if constexpr (kGetPieces) { | ||
+ if (kGetPieces) { | ||
// If token id is unk_token_id, it means that it is a dummy node for | ||
// punctuations that are not contained in the vocabulary, we append | ||
// the unk_token in this case. Otherwise, we | ||
@@ -569,7 +569,7 @@ void FastWordpieceTokenizer::AppendTokenToOutput( | ||
? absl::StrCat(config_->suffix_indicator()->str(), subword_str) | ||
: subword_str); | ||
} | ||
- if constexpr (kGetOffsets) { | ||
+ if (kGetOffsets) { | ||
// Record the offsets relative to the start of the whole text. | ||
output_start_offsets->push_back(input_word_offset_in_text + | ||
cur_offset_in_input_word); | ||
@@ -646,15 +646,15 @@ void FastWordpieceTokenizer::ResetOutputAppendUnknownToken( | ||
std::vector<std::string>* output_pieces, std::vector<int>* output_ids, | ||
std::vector<int>* output_start_offsets, | ||
std::vector<int>* output_end_offsets) const { | ||
- if constexpr (kGetPieces) { | ||
+ if (kGetPieces) { | ||
output_pieces->resize(original_num_tokens + 1); | ||
output_pieces->back() = config_->unk_token()->str(); | ||
} | ||
- if constexpr (kGetIds) { | ||
+ if (kGetIds) { | ||
output_ids->resize(original_num_tokens + 1); | ||
output_ids->back() = config_->unk_token_id(); | ||
} | ||
- if constexpr (kGetOffsets) { | ||
+ if (kGetOffsets) { | ||
output_start_offsets->resize(original_num_tokens + 1); | ||
output_start_offsets->back() = input_word_offset_in_text; | ||
|
||
diff --git a/tensorflow_text/tftext.bzl b/tensorflow_text/tftext.bzl | ||
index 96cb329..1bb98ec 100644 | ||
index ff40480..b649cfc 100644 | ||
--- a/tensorflow_text/tftext.bzl | ||
+++ b/tensorflow_text/tftext.bzl | ||
@@ -126,8 +126,7 @@ def tf_cc_library( | ||
@@ -123,8 +123,8 @@ def tf_cc_library( | ||
"@org_tensorflow//tensorflow/core:portable_tensorflow_lib_lite", | ||
], | ||
"//conditions:default": [ | ||
- "@local_config_tf//:libtensorflow_framework", | ||
- "@local_config_tf//:tf_header_lib", | ||
+ "@org_tensorflow//tensorflow/core:tensorflow_opensource", | ||
+ "@org_tensorflow//tensorflow/lite/kernels/shim:status_macros", "@org_tensorflow//tensorflow/lite/kernels/shim:tf_op_shim", "@org_tensorflow//tensorflow/lite/kernels/shim:op_kernel", "@org_tensorflow//tensorflow/lite/kernels/shim:tensor_view", "@org_tensorflow//tensorflow/lite/kernels/shim:shape", | ||
] + tf_deps + oss_deps, | ||
}) | ||
native.cc_library( |