From 1da7332349d5b1196ccfa6dc719b839876f1e83e Mon Sep 17 00:00:00 2001 From: Kevin Chen <45886021+kevinch-nv@users.noreply.github.com> Date: Tue, 14 Jun 2022 10:28:57 -0700 Subject: [PATCH] TensorRT 8.4 GA ONNX Parser Release (#851) * TensorRT 8.4 EA ONNX Parser Release * support protobuf >= 3.11 (#812) Signed-off-by: George Wu * missed other sections for supporting >= protobuf 3.11 (#817) * missed one other section for supporting >= protobuf 3.11 Signed-off-by: George Wu * one more section Signed-off-by: George Wu * fix ident Signed-off-by: George Wu * TensorRT 8.4-GA ONNX Parser Release * TensorRT 8.4.1.5 updates (#849) Signed-off-by: Rajeev Rao Co-authored-by: George Wu Co-authored-by: Rajeev Rao --- CMakeLists.txt | 3 +- ConditionalHelpers.cpp | 29 +- ConditionalHelpers.hpp | 9 +- ImporterContext.cpp | 110 ++ ImporterContext.hpp | 104 +- LICENSE | 30 + ModelImporter.cpp | 103 +- NvOnnxParser.h | 5 +- OnnxAttrs.cpp | 2 +- README.md | 14 +- ShapeTensor.cpp | 56 +- ShapeTensor.hpp | 20 +- ShapedWeights.cpp | 117 +- ShapedWeights.hpp | 13 +- TensorOrWeights.hpp | 5 + builtin_op_importers.cpp | 485 +++--- docs/Changelog.md | 18 +- docs/operators.md | 23 +- half.h | 42 + ieee_half.h | 3094 +++++++++++++++++++++++++++++++++++++ onnx2trt.hpp | 10 +- onnx2trt_common.hpp | 3 +- onnx2trt_utils.cpp | 173 +-- onnx2trt_utils.hpp | 85 +- onnx_tensorrt/__init__.py | 2 +- onnx_utils.hpp | 8 +- 26 files changed, 3948 insertions(+), 615 deletions(-) create mode 100644 ImporterContext.cpp create mode 100644 half.h create mode 100644 ieee_half.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 80c51182..2836e2ca 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,7 +28,7 @@ add_definitions("-DSOURCE_LENGTH=${SOURCE_LENGTH}") # Version information #-------------------------------------------------- set(ONNX2TRT_MAJOR 8) -set(ONNX2TRT_MINOR 2) +set(ONNX2TRT_MINOR 4) set(ONNX2TRT_PATCH 1) set(ONNX2TRT_VERSION "${ONNX2TRT_MAJOR}.${ONNX2TRT_MINOR}.${ONNX2TRT_PATCH}" CACHE STRING "ONNX2TRT version") @@ -42,6 +42,7 @@ set(IMPORTER_SOURCES builtin_op_importers.cpp onnx2trt_utils.cpp onnxErrorRecorder.cpp + ImporterContext.cpp ShapedWeights.cpp ShapeTensor.cpp LoopHelpers.cpp diff --git a/ConditionalHelpers.cpp b/ConditionalHelpers.cpp index d4ba4d36..d900c726 100644 --- a/ConditionalHelpers.cpp +++ b/ConditionalHelpers.cpp @@ -71,13 +71,23 @@ Status addConditionalInputLayer(IImporterContext* ctx, nvinfer1::IIfConditional* // Take a snapshot of the network before and after parsing the subgraph and return a list // of newly added network layers. -Status importSubgraph( - IImporterContext* ctx, const ::ONNX_NAMESPACE::GraphProto& subgraph, std::vector& newLayers) +Status importSubgraph(IImporterContext* ctx, ::ONNX_NAMESPACE::GraphProto const& subgraph, + std::vector& newLayers, StringMap& subgraphTensors) { auto net = ctx->network(); int32_t beforeSubgraph = net->getNbLayers(); + + // Establish scope for names local to the subgraph. + NameScope nameScope(*ctx); + CHECK(onnx2trt::parseGraph(ctx, subgraph)); + for (int32_t i = 0; i < subgraph.output_size(); ++i) + { + std::string name = subgraph.output(i).name(); + subgraphTensors.emplace(std::make_pair(name, ctx->tensors().at(name))); + } + for (int32_t i = beforeSubgraph; i < net->getNbLayers(); i++) { newLayers.push_back(net->getLayer(i)); @@ -135,8 +145,9 @@ Status addIfInputLayers(IImporterContext* ctx, nvinfer1::IIfConditional* conditi // Add an IConditionalOutputLayer to `layer`'s outputs. Status addIfOutputLayers(IImporterContext* ctx, nvinfer1::IIfConditional* conditional, - const ::ONNX_NAMESPACE::GraphProto& thenGraph, const std::vector& thenLayers, - const ::ONNX_NAMESPACE::GraphProto& elseGraph, const std::vector& elseLayers, + ::ONNX_NAMESPACE::GraphProto const& thenGraph, std::vector const& thenLayers, + StringMap const& thenSubgraphTensors, ::ONNX_NAMESPACE::GraphProto const& elseGraph, + std::vector const& elseLayers, StringMap const& elseSubgraphTensors, std::vector& graphOutputs) { // Reported outputs are outputs that the ONNX model reports as subgraph outputs. This list is @@ -166,7 +177,8 @@ Status addIfOutputLayers(IImporterContext* ctx, nvinfer1::IIfConditional* condit // Retrieve the output tensors of a subgraph (tensors exiting the subgraph). auto getSubgraphOutputTensors = [](IImporterContext* ctx, std::vector& sgOutputs, SubgraphPortsMap& subgraphOutputs, - const ::ONNX_NAMESPACE::GraphProto& subgraph, std::vector subgraphLayers) { + ::ONNX_NAMESPACE::GraphProto const& subgraph, std::vector subgraphLayers, + StringMap const& subgraphTensors) { for (const auto& layer : subgraphLayers) { const auto layerName = layer->getName(); @@ -184,17 +196,18 @@ Status addIfOutputLayers(IImporterContext* ctx, nvinfer1::IIfConditional* condit for (int32_t outIdx = 0; outIdx < nbOutputs; outIdx++) { const auto thenName = subgraph.output(outIdx).name(); - auto* thenTensor = &convertToTensor(ctx->tensors().at(thenName), ctx); + TensorOrWeights tw = subgraphTensors.at(thenName); + auto* thenTensor = &convertToTensor(tw, ctx); sgOutputs.push_back(thenTensor); } } }; std::vector thenOutputTensors; - getSubgraphOutputTensors(ctx, thenOutputTensors, thenOutputs, thenGraph, thenLayers); + getSubgraphOutputTensors(ctx, thenOutputTensors, thenOutputs, thenGraph, thenLayers, thenSubgraphTensors); std::vector elseSGOutputTensors; - getSubgraphOutputTensors(ctx, elseSGOutputTensors, elseOutputs, elseGraph, elseLayers); + getSubgraphOutputTensors(ctx, elseSGOutputTensors, elseOutputs, elseGraph, elseLayers, elseSubgraphTensors); ASSERT(thenOutputTensors.size() == elseSGOutputTensors.size() && "The then/else branches of an If operator must have the same number of outputs.", diff --git a/ConditionalHelpers.hpp b/ConditionalHelpers.hpp index a320a218..fb7d6feb 100644 --- a/ConditionalHelpers.hpp +++ b/ConditionalHelpers.hpp @@ -34,8 +34,8 @@ Status getSubgraphOutputs(const std::vector& newLayers, // Take a snapshot of the network before and after parsing the subgraph and return a list // of newly added network layers. -Status importSubgraph( - IImporterContext* ctx, const ::ONNX_NAMESPACE::GraphProto& subgraph, std::vector& newLayers); +Status importSubgraph(IImporterContext* ctx, ::ONNX_NAMESPACE::GraphProto const& subgraph, + std::vector& newLayers, StringMap& subgraphTensors); using InputsMap = std::unordered_map; @@ -45,8 +45,9 @@ onnx2trt::Status addIfInputLayers(IImporterContext* ctx, nvinfer1::IIfConditiona // Add IIfConditionalOutputLayers to the outputs of the subgraph indicated by `subgraph`. onnx2trt::Status addIfOutputLayers(IImporterContext* ctx, nvinfer1::IIfConditional* conditional, - const ::ONNX_NAMESPACE::GraphProto& thenGraph, const std::vector& thenLayers, - const ::ONNX_NAMESPACE::GraphProto& elseGraph, const std::vector& elseLayers, + ::ONNX_NAMESPACE::GraphProto const& thenGraph, std::vector const& thenLayers, + StringMap const& thenSubgraphTensors, ::ONNX_NAMESPACE::GraphProto const& elseGraph, + std::vector const& elseLayers, StringMap const& elseSubgraphTensors, std::vector& graphOutputs); } // namespace onnx2trt diff --git a/ImporterContext.cpp b/ImporterContext.cpp new file mode 100644 index 00000000..43474456 --- /dev/null +++ b/ImporterContext.cpp @@ -0,0 +1,110 @@ +#include "ImporterContext.hpp" + +namespace onnx2trt +{ + +void ImporterContext::pushBaseNameScope() +{ + mBaseNameScopeStack.push_back({}); +} + +void ImporterContext::popBaseNameScope() +{ + auto& tensorMap = tensors(); + for (auto& binding : mBaseNameScopeStack.back()) + { + if (binding.second.first) + { + tensorMap.erase(binding.first); + } + else + { + tensorMap.at(binding.first) = std::move(binding.second.second); + } + } + mBaseNameScopeStack.pop_back(); +} + +void ImporterContext::registerTensor(TensorOrWeights tensor, std::string const& basename) +{ + // TRT requires unique tensor names. + std::string const& uniqueName = generateUniqueName(mTensorNames, basename); + + if (tensor) + { + if (tensor.is_tensor()) + { + tensor.tensor().setName(uniqueName.c_str()); + // Logging macro refers to ctx. + auto* ctx = this; + LOG_VERBOSE("Registering tensor: " << uniqueName << " for ONNX tensor: " << basename); + } + else if (tensor.is_weights()) + { + auto const& weights = tensor.weights(); + if (tensor.weights().type == ::ONNX_NAMESPACE::TensorProto::INT64) + { + tensor = ShapedWeights{::ONNX_NAMESPACE::TensorProto::INT32, + convertINT64(reinterpret_cast(weights.values), weights.shape, this), weights.shape}; + } + // It may be possible for nested subgraphs to have different values for the same initializer. + // For multiple name scopes - use unique name to keep track of weights. + if (!mBaseNameScopeStack.empty()) + { + tensor.weights().setName(uniqueName.c_str()); + } + else + { + tensor.weights().setName(basename.c_str()); + } + } + } + + auto const p = this->tensors().emplace(basename, TensorOrWeights{}); + bool nameIsDuplicate = false; + if (!mBaseNameScopeStack.empty()) + { + // Remember original binding so it can be restored when scope is popped. + auto const q + = mBaseNameScopeStack.back().emplace(basename, std::make_pair(p.second, std::move(p.first->second))); + // Check that scope did not already have a binding for basename. + nameIsDuplicate = !q.second; + } + else + { + // The condition here accounts for ModelImporter::importModel reserving + // output names by registering null tensors. + nameIsDuplicate = !p.second && !p.first->second.isNullTensor(); + } + if (nameIsDuplicate) + { + throw std::runtime_error("ONNX graph has duplicate tensor name: " + basename); + } + p.first->second = std::move(tensor); +} + +void ImporterContext::registerLayer(nvinfer1::ILayer* layer, std::string const& basename) +{ + // No layer will be added for Constant nodes in ONNX. + if (layer) + { + std::string const name = basename.empty() ? layer->getName() : basename; + std::string const& uniqueName = generateUniqueName(mLayerNames, name); + + auto* ctx = this; // To enable logging. + LOG_VERBOSE("Registering layer: " << uniqueName << " for ONNX node: " << basename); + + layer->setName(uniqueName.c_str()); + if (layer->getType() == nvinfer1::LayerType::kCONSTANT) + { + if (basename != uniqueName) + { + LOG_ERROR("Constant layer: " << uniqueName << " can be a duplicate of: " << basename); + assert(!"Internal error: duplicate constant layers for the same weights"); + } + mConstantLayers.insert({uniqueName, static_cast(layer)}); + } + } +} + +} // namespace onnx2trt diff --git a/ImporterContext.hpp b/ImporterContext.hpp index 88273607..4bf235e6 100644 --- a/ImporterContext.hpp +++ b/ImporterContext.hpp @@ -9,7 +9,9 @@ #include "onnxErrorRecorder.hpp" #include "onnx/common/stl_backports.h" #include +#include #include +#include namespace onnx2trt { @@ -84,8 +86,24 @@ class ImporterContext final : public IImporterContext int64_t mSuffixCounter{0}; // increasing suffix counter used to uniquify layer names. std::unordered_set mUnsupportedShapeTensors; // Container to hold output tensor names of layers that produce shape tensor outputs but do not natively support them. StringMap mLoopTensors; // Container to map subgraph tensors to their original outer graph names. - std::string mOnnxFileLocation; // Keep track of the directory of the parsed ONNX file + std::string mOnnxFileLocation; // Keep track of the directory of the parsed ONNX file std::unique_ptr mErrorWrapper; // error recorder to control TRT errors + StringMap mConstantLayers; + + //! Stack of names defined by nested ONNX graphs, with information about how to + //! restore their associated values when popping back to the surrounding scope. + //! + //! The stack is empty when processing the top-level ONNX graph. + //! back() corresponds to the innermost ONNX graph being processed. + //! + //! For each entry {name, {bool, TensorOrWeights}}: + //! + //! * If the bool is true, the name was newly introduced by the scope. + //! + //! * If the bool is false, the name shadows a name in a surrounding scope, + //! and TensorOrWeights was the name's value before being shadowed. + //! + std::vector>> mBaseNameScopeStack; public: ImporterContext(nvinfer1::INetworkDefinition* network, nvinfer1::ILogger* logger) @@ -134,52 +152,15 @@ class ImporterContext final : public IImporterContext { return mOnnxFileLocation; } - // This actually handles weights as well, but is named this way to be consistent with the tensors() - void registerTensor(TensorOrWeights tensor, const std::string& basename) override - { - // TRT requires unique tensor names. - const std::string uniqueName = generateUniqueName(mTensorNames, basename); - if (tensor) - { - auto* ctx = this; // To enable logging. - if (tensor.is_tensor()) - { - tensor.tensor().setName(uniqueName.c_str()); + void pushBaseNameScope() override; - LOG_VERBOSE("Registering tensor: " << uniqueName << " for ONNX tensor: " << basename); - } - else if (tensor.is_weights()) - { - const auto& weights = tensor.weights(); - if (tensor.weights().type == ::ONNX_NAMESPACE::TensorProto::INT64) - { - tensor = ShapedWeights{::ONNX_NAMESPACE::TensorProto::INT32, - convertINT64(reinterpret_cast(weights.values), weights.shape, ctx), weights.shape}; - } - tensor.weights().setName(basename.c_str()); - } + void popBaseNameScope() override; - } - // Overwrite previous tensors registered with the same name (this only happens when there are subgraphs, - // and in that case, overwriting is the desired behavior). - this->tensors()[basename] = std::move(tensor); - } - - void registerLayer(nvinfer1::ILayer* layer, const std::string& basename) override - { - // No layer will be added for Constant nodes in ONNX. - if (layer) - { - const std::string name = basename.empty() ? layer->getName() : basename; - const std::string uniqueName = generateUniqueName(mLayerNames, name); - - auto* ctx = this; // To enable logging. - LOG_VERBOSE("Registering layer: " << uniqueName << " for ONNX node: " << basename); + // This actually handles weights as well, but is named this way to be consistent with the tensors() + void registerTensor(TensorOrWeights tensor, std::string const& basename) override; - layer->setName(uniqueName.c_str()); - } - } + void registerLayer(nvinfer1::ILayer* layer, std::string const& basename) override; nvinfer1::ILogger& logger() override { @@ -188,16 +169,10 @@ class ImporterContext final : public IImporterContext ShapedWeights createTempWeights(ShapedWeights::DataType type, nvinfer1::Dims shape, uint8_t value = 0) override { + std::string const& name = generateUniqueName(mTensorNames, "tmp_weight"); ShapedWeights weights(type, nullptr, shape); - // Need special logic for handling scalars. - if (shape.nbDims == 0) - { - mTempBufs.push_back(std::vector(getDtypeSize(type), value)); - } - else - { - mTempBufs.push_back(std::vector(weights.size_bytes(), value)); - } + weights.setName(name.c_str()); + mTempBufs.push_back(std::vector(weights.size_bytes(), value)); weights.values = mTempBufs.back().data(); return weights; } @@ -256,8 +231,13 @@ class ImporterContext final : public IImporterContext { return mOpsets.begin()->second; } + else if (mOpsets.count(domain)) + { + return mOpsets.at(domain); + } else { + domain = "ai.onnx"; assert(mOpsets.count(domain)); return mOpsets.at(domain); } @@ -271,8 +251,22 @@ class ImporterContext final : public IImporterContext { return mErrorWrapper ? mErrorWrapper->getErrorRecorder() : nullptr; } + nvinfer1::IConstantLayer* getConstantLayer(const char* name) const final + { + if (name == nullptr) + { + return nullptr; + } + auto const iter = mConstantLayers.find(name); + if (iter == mConstantLayers.end()) + { + return nullptr; + } + return iter->second; + } + private: - std::string generateUniqueName(std::set& namesSet, const std::string& basename) + std::string const& generateUniqueName(std::set& namesSet, const std::string& basename) { std::string candidate = basename; @@ -283,8 +277,8 @@ class ImporterContext final : public IImporterContext } namesSet.insert(candidate); - - return candidate; + // Return reference to newly inserted string to avoid any c_str()'s going out of scope + return *namesSet.find(candidate); } }; diff --git a/LICENSE b/LICENSE index c5458119..6141a5b9 100644 --- a/LICENSE +++ b/LICENSE @@ -200,3 +200,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + + + PORTIONS LICENSED AS FOLLOWS + + > ieee_half.h + > half.h + + The MIT License + + Copyright (c) 2012-2017 Christian Rau + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + diff --git a/ModelImporter.cpp b/ModelImporter.cpp index 01bff224..93c56144 100644 --- a/ModelImporter.cpp +++ b/ModelImporter.cpp @@ -15,6 +15,7 @@ #include #include #include +#include namespace onnx2trt { @@ -244,8 +245,8 @@ Status importInput(ImporterContext* ctx, ::ONNX_NAMESPACE::ValueInfoProto const& { LOG_VERBOSE( "Adding network input: " << input.name() << " with dtype: " << trtDtype << ", dimensions: " << trt_dims); - ASSERT_INPUT( (*tensor = ctx->network()->addInput(input.name().c_str(), trtDtype, trt_dims)) && "Failed to add input to the network.", - ErrorCode::kUNSUPPORTED_NODE, input.name()); + *tensor = ctx->network()->addInput(input.name().c_str(), trtDtype, trt_dims); + ASSERT_INPUT(*tensor && "Failed to add input to the network.", ErrorCode::kUNSUPPORTED_NODE, input.name()); } // Fill in field `tensor` for any dimensions that had names in the ONNX. @@ -358,13 +359,13 @@ Status deserialize_onnx_model(void const* serialized_onnx_model, size_t serializ else { google::protobuf::io::CodedInputStream coded_input(&raw_input); - #if GOOGLE_PROTOBUF_VERSION >= 3011000 +#if GOOGLE_PROTOBUF_VERSION >= 3011000 // Starting Protobuf 3.11 accepts only single parameter. coded_input.SetTotalBytesLimit(std::numeric_limits::max()); - #else +#else // Note: This WARs the very low default size limit (64MB) coded_input.SetTotalBytesLimit(std::numeric_limits::max(), std::numeric_limits::max() / 4); - #endif +#endif ASSERT( (model->ParseFromCodedStream(&coded_input)) && "Failed to parse the ONNX model.", ErrorCode::kMODEL_DESERIALIZE_FAILED); } return Status::success(); @@ -380,24 +381,22 @@ Status deserialize_onnx_model(int fd, bool is_serialized_as_text, ::ONNX_NAMESPA else { google::protobuf::io::CodedInputStream coded_input(&raw_input); - #if GOOGLE_PROTOBUF_VERSION >= 3011000 + // Note: This WARs the very low default size limit (64MB) +#if GOOGLE_PROTOBUF_VERSION >= 3011000 // Starting Protobuf 3.11 accepts only single parameter. coded_input.SetTotalBytesLimit(std::numeric_limits::max()); - #else - // Note: This WARs the very low default size limit (64MB) - coded_input.SetTotalBytesLimit(std::numeric_limits::max(), std::numeric_limits::max() / 4); - #endif +#else // Note: This WARs the very low default size limit (64MB) + coded_input.SetTotalBytesLimit(std::numeric_limits::max(), std::numeric_limits::max()/4); +#endif ASSERT( (model->ParseFromCodedStream(&coded_input)) && "Failed to parse the ONNX model.", ErrorCode::kMODEL_DESERIALIZE_FAILED); } return Status::success(); } -bool ModelImporter::supportsModel( - void const* serialized_onnx_model, size_t serialized_onnx_model_size, SubGraphCollection_t& sub_graph_collection, - const char* model_path) +bool ModelImporter::supportsModel(void const* serialized_onnx_model, size_t serialized_onnx_model_size, + SubGraphCollection_t& sub_graph_collection, const char* model_path) { - ::ONNX_NAMESPACE::ModelProto model; bool is_serialized_as_text = false; Status status @@ -470,13 +469,11 @@ bool ModelImporter::supportsModel( // Add the node to the subgraph if: // 1. There is an importer function registered for the operator type // 2. It is not directly connected to an unsupported input - // 3. It did not illegally produce a shape tensor output - // 4. The importer function did not throw an assertion + // 3. The importer function did not throw an assertion bool registered = supportsOperator(node.op_type().c_str()); bool unsupportedInput = (input_node.empty()) ? false : checkForInput(node); - bool unsupportedShapeTensor = ctx->unsupportedShapeTensors().count(node.name()) > 0 ? true : false; bool unsuccessfulParse = node_idx == error_node; - if (registered && !unsupportedInput && !unsupportedShapeTensor && !unsuccessfulParse) + if (registered && !unsupportedInput && !unsuccessfulParse) { if (newSubGraph) { @@ -508,17 +505,20 @@ bool ModelImporter::supportsModel( // Mark experimental ops as unsupported, mark plugin ops as supported bool ModelImporter::supportsOperator(const char* op_name) const { - if (std::string(op_name) == "NonMaxSuppression") + auto is = [op_name](const char* name) { return std::strcmp(op_name, name) == 0; }; + + if (is("NonMaxSuppression")) { return false; } - if (std::string(op_name) == "EfficientNMS_TRT" || std::string(op_name) == "PyramidROIAlign_TRT" || std::string(op_name) == "MultilevelCropAndResize_TRT" - || std::string(op_name) == "DisentangledAttention_TRT") + if (is("EfficientNMS_TRT") || is("PyramidROIAlign_TRT") || is("MultilevelCropAndResize_TRT") + || is("DisentangledAttention_TRT")) { return true; } return _op_importers.count(op_name); } + bool ModelImporter::parseWithWeightDescriptors(void const* serialized_onnx_model, size_t serialized_onnx_model_size) { _current_node = -1; @@ -547,6 +547,12 @@ bool ModelImporter::parseWithWeightDescriptors(void const* serialized_onnx_model bool ModelImporter::parse(void const* serialized_onnx_model, size_t serialized_onnx_model_size, const char* model_path) { + auto* const ctx = &_importer_ctx; + if (ctx->network()->getNbLayers() > 0) + { + LOG_ERROR("Parse was called with a non-empty network definition"); + return false; + } if (model_path) { _importer_ctx.setOnnxFileLocation(model_path); @@ -554,43 +560,6 @@ bool ModelImporter::parse(void const* serialized_onnx_model, size_t serialized_o return this->parseWithWeightDescriptors(serialized_onnx_model, serialized_onnx_model_size); } -void removeShapeTensorCasts(IImporterContext* ctx) -{ - // Removes any casts on shape tensors, as TensorRT does not support them. - for (int i = 0, e = ctx->network()->getNbLayers(); i < e; ++i) - { - nvinfer1::ILayer* layer = ctx->network()->getLayer(i); - if (layer->getNbOutputs() > 0 && layer->getOutput(0)->isShapeTensor()) - { - layer->resetOutputType(0); - nvinfer1::ITensor& t = *layer->getOutput(0); - // Assume that boolean tensors were not cast, and thus have their type correctly set. - const nvinfer1::DataType shapeTensorType = t.getType() == nvinfer1::DataType::kBOOL ? nvinfer1::DataType::kBOOL : nvinfer1::DataType::kINT32; - layer->setOutputType(0, shapeTensorType); - // Set type only if necessary, to avoid TensorRT warnings - // about setting type of non-input/output tensors. - if (t.getType() != shapeTensorType) - { - t.setType(shapeTensorType); - } - // Some layers do not support shape tensor outputs. Keep track of these tensor names - // for supportsModel(). - auto type = layer->getType(); - auto elementwiseOp = type == nvinfer1::LayerType::kELEMENTWISE ? (static_cast(layer))->getOperation() : nvinfer1::ElementWiseOperation::kSUM; - auto reduceOp = type == nvinfer1::LayerType::kREDUCE ? (static_cast(layer))->getOperation() : nvinfer1::ReduceOperation::kSUM; - auto fillOp = type == nvinfer1::LayerType::kFILL - ? (static_cast(layer))->getOperation() - : nvinfer1::FillOperation::kLINSPACE; - if (!supportsShapeTensor(type, elementwiseOp, reduceOp, fillOp)) - { - auto name = layer->getName(); - ctx->unsupportedShapeTensors().insert(name); - LOG_ERROR("Found unsupported shape-tensor producing layer:" << name); - } - } - } -} - Status ModelImporter::importModel( ::ONNX_NAMESPACE::ModelProto const& model) { @@ -738,15 +707,27 @@ Status ModelImporter::importModel( } } - removeShapeTensorCasts(ctx); return Status::success(); } bool ModelImporter::parseFromFile(const char* onnxModelFile, int32_t verbosity) { + auto* ctx = &_importer_ctx; + + // Define S_ISREG macro for Windows +#if !defined(S_ISREG) +# define S_ISREG(mode) (((mode) & S_IFMT) == S_IFREG) +#endif + + struct stat sb; + if (stat(onnxModelFile, &sb) == 0 && !S_ISREG(sb.st_mode)) + { + LOG_ERROR("Input is not a regular file: " << onnxModelFile); + return false; + } + GOOGLE_PROTOBUF_VERIFY_VERSION; ::ONNX_NAMESPACE::ModelProto onnx_model; - auto* ctx = &_importer_ctx; const bool is_binary = ParseFromFile_WAR(&onnx_model, onnxModelFile); if (!is_binary && !ParseFromTextFile(&onnx_model, onnxModelFile)) @@ -772,7 +753,7 @@ bool ModelImporter::parseFromFile(const char* onnxModelFile, int32_t verbosity) { //...Read input file, parse it std::ifstream onnx_file(onnxModelFile, std::ios::binary | std::ios::ate); - const std::streamsize file_size = onnx_file.tellg(); + auto const file_size = onnx_file.tellg(); onnx_file.seekg(0, std::ios::beg); std::vector onnx_buf(file_size); if (!onnx_file.read(onnx_buf.data(), onnx_buf.size())) diff --git a/NvOnnxParser.h b/NvOnnxParser.h index e7f97cf9..132c12d9 100644 --- a/NvOnnxParser.h +++ b/NvOnnxParser.h @@ -135,7 +135,9 @@ class IParser */ virtual bool parseFromFile(const char* onnxModelFile, int verbosity) = 0; - /** \brief Check whether TensorRT supports a particular ONNX model + /** \brief Check whether TensorRT supports a particular ONNX model. + * If the function returns True, one can proceed to engine building + * without having to call \p parse or \p parseFromFile. * * \param serialized_onnx_model Pointer to the serialized ONNX model * \param serialized_onnx_model_size Size of the serialized ONNX model @@ -218,6 +220,7 @@ namespace * because correctness of the translation may rely on those constants. * Changing a dynamic input dimension, i.e. one that translates to -1 in * TensorRT, to a constant is okay if the constant is consistent with the model. + * Each instance of the parser is designed to only parse one ONNX model once. * * \see IParser */ diff --git a/OnnxAttrs.cpp b/OnnxAttrs.cpp index ac372264..2ab625a5 100644 --- a/OnnxAttrs.cpp +++ b/OnnxAttrs.cpp @@ -308,7 +308,7 @@ nvinfer1::ResizeMode OnnxAttrs::get(const std::string& key { return nvinfer1::ResizeMode::kNEAREST; } - if (mode == "linear") + if (mode == "linear" || mode == "bilinear") { return nvinfer1::ResizeMode::kLINEAR; } diff --git a/README.md b/README.md index 1d8052b3..0bba7a93 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ For press and other inquiries, please contact Hector Marinez at hmarinez@nvidia. ## Supported TensorRT Versions -Development on the `main` branch is for the latest version of [TensorRT 8.2.3.0](https://developer.nvidia.com/nvidia-tensorrt-download) with full-dimensions and dynamic shape support. For those using TensorRT 8.4 EA, checkout and build on the `8.4-EA` branch. +Development on the `main` branch is for the latest version of [TensorRT 8.4.1.5](https://developer.nvidia.com/nvidia-tensorrt-download) with full-dimensions and dynamic shape support. For previous versions of TensorRT, refer to their respective branches. @@ -36,8 +36,8 @@ Python builder.create_network(explicit_batch) For examples of usage of these APIs see: -* [sampleONNXMNIST](https://github.com/NVIDIA/TensorRT/tree/main/samples/sampleOnnxMNIST) -* [sampleDynamicReshape](https://github.com/NVIDIA/TensorRT/tree/main/samples/sampleDynamicReshape) +* [sampleONNXMNIST](https://github.com/NVIDIA/TensorRT/tree/main/samples/opensource/sampleOnnxMNIST) +* [sampleDynamicReshape](https://github.com/NVIDIA/TensorRT/tree/main/samples/opensource/sampleDynamicReshape) ## Supported Operators @@ -47,9 +47,9 @@ Current supported ONNX operators are found in the [operator support matrix](docs ### Dependencies - - [Protobuf >= 3.0.x](https://github.com/google/protobuf/releases) - - [TensorRT 8.2.3.0](https://developer.nvidia.com/tensorrt) - - [TensorRT 8.2.3.0 open source libaries (main branch)](https://github.com/NVIDIA/TensorRT/) + - [Protobuf >= 3.0.x, <= 3.11.x](https://github.com/google/protobuf/releases) + - [TensorRT 8.4.1.5](https://developer.nvidia.com/tensorrt) + - [TensorRT 8.4.1.5 open source libaries (main branch)](https://github.com/NVIDIA/TensorRT/) ### Building @@ -101,7 +101,7 @@ Python bindings for the ONNX-TensorRT parser are packaged in the shipped `.whl` python3 -m pip install /python/tensorrt-8.x.x.x-cp-none-linux_x86_64.whl -TensorRT 8.2.1.8 supports ONNX release 1.8.0. Install it with: +TensorRT 8.4.1.5 supports ONNX release 1.8.0. Install it with: python3 -m pip install onnx==1.8.0 diff --git a/ShapeTensor.cpp b/ShapeTensor.cpp index 9362e239..e9bc34a7 100644 --- a/ShapeTensor.cpp +++ b/ShapeTensor.cpp @@ -11,11 +11,7 @@ namespace onnx2trt { -//! If true, tolerate bug where scalar constant of type FLOAT is missing its value, -//! and a shape tensor is expected. -static const bool gTolerateTRT_12408 = true; - -ShapeTensor::ShapeTensor(int rank_, std::vector&& values_) +ShapeTensor::ShapeTensor(int32_t rank_, std::vector&& values_) : mDepth(0) , mAllValuesKnown(true) , mRank(rank_) @@ -26,8 +22,32 @@ ShapeTensor::ShapeTensor(int rank_, std::vector&& values_) assert(rank_ > 0 || mValues.size() == 1); } +//! +//! Construct a shape tensor representation for float values. +//! This is a hack to get shape tensor working with float values with minimal changes. +//! The constructed ShapeTensor has the following properties: +//! 1. mAllValuesKnown == false +//! 2. mValues.size() == 0 +//! 3. mIsFloat == true +//! +//! Float shape tensor does not have any constant folding in parser as available to int32_t shape tensors. +//! Instead, rely on builder for constant folding and build-time simplifications. +//! +ShapeTensor::ShapeTensor(int32_t rank_, std::vector&& values_) + : mDepth(0) + , mAllValuesKnown(false) + , mRank(rank_) + , mSize(values_.size()) + , mValues({}) + , mIsFloat{true} +{ + assert((rank_ == 0 || rank_ == 1) && "shape tensor must have rank 0 or 1"); + assert(mValues.size() == 0 && "non-empty floating-point shape tensor with known values not supported"); +} + ShapeTensor::ShapeTensor(IImporterContext* ctx, TensorOrWeights& t) : mDepth(0) + , mIsFloat(t.isFp32()) { if (t.is_tensor()) { @@ -36,13 +56,10 @@ ShapeTensor::ShapeTensor(IImporterContext* ctx, TensorOrWeights& t) else { const nvinfer1::Dims d = t.shape(); - const auto& weights = t.weights(); - if (gTolerateTRT_12408 && weights.type == ::ONNX_NAMESPACE::TensorProto::FLOAT && d.nbDims == 0 && weights.count() == 0) + auto const& weights = t.weights(); + if (isFloat()) { - LOG_WARNING("Scalar constant of type FLOAT with no value encountered where ONNX specification requires tensor describing a shape. Assuming it's an INT64 empty vector."); - mRank = 1; - mSize = 0; - mAllValuesKnown = true; + *this = ShapeTensor(convertToTensor(t, ctx)); return; } assert(0 <= d.nbDims); @@ -67,13 +84,17 @@ ShapeTensor::ShapeTensor(nvinfer1::ITensor& t, int depth) : mDepth(depth) , mRank(1) , mTensor(&t) + // The check for depth == 0 is needed because when depth > 0, it means *this represents the shape of mTensor, and + // shapes always have integral type. + , mIsFloat(depth == 0 && t.getType() == nvinfer1::DataType::kFLOAT) { const nvinfer1::Dims dims = t.getDimensions(); + assert((!isFloat() || mDepth == 0) && "floating-point shape tensor must have depth == 0"); switch (mDepth) { case 0: - assert(t.getType() == nvinfer1::DataType::kINT32); + assert(t.getType() == nvinfer1::DataType::kINT32 || t.getType() == nvinfer1::DataType::kFLOAT); mRank = dims.nbDims; if (mRank == 0) { @@ -227,6 +248,7 @@ static ShapeTensor op(IImporterContext* ctx, const ShapeTensor& x, const ShapeTe } if (x.allValuesKnown() && y.allValuesKnown()) { + assert(!x.isFloat() && !y.isFloat()); std::vector values(std::max(x.size(), y.size())); for (size_t i = 0; i < values.size(); ++i) { @@ -333,6 +355,14 @@ ShapeTensor gather(IImporterContext* ctx, const ShapeTensor& data, const ShapeTe return ShapeTensor(*ctx->network()->addGather(data.tensor(ctx), indices.tensor(ctx), 0)->getOutput(0)); } +ShapeTensor castToInt32(IImporterContext* ctx, ShapeTensor const& x) +{ + nvinfer1::ILayer* identity = ctx->network()->addIdentity(x.tensor(ctx)); + assert(identity != nullptr); + identity->setOutputType(0, nvinfer1::DataType::kINT32); + return ShapeTensor(*identity->getOutput(0)); +} + ShapeTensor shapeOf(nvinfer1::ITensor& tensor) { return ShapeTensor(tensor, 1); @@ -360,7 +390,7 @@ ShapeTensor shapeOf(const ShapeTensor& t) // ShapeTensor is either a scalar or vector. // shape of a scalar is an empty tensor. // shape of a vector is a one-element tensor containing the length of the vector. - return t.rank() == 0 ? ShapeTensor(0, {}) : ShapeTensor(1, {t.size()}); + return t.rank() == 0 ? ShapeTensor(0, std::vector{}) : ShapeTensor(1, std::vector{t.size()}); } ShapeTensor convertTo1D(IImporterContext* ctx, const ShapeTensor& tensor) diff --git a/ShapeTensor.hpp b/ShapeTensor.hpp index ae04132c..c23070df 100644 --- a/ShapeTensor.hpp +++ b/ShapeTensor.hpp @@ -22,8 +22,11 @@ class ShapeTensor //! Create undefined ShapeTensor. ShapeTensor() = default; - //! Create ShapeTensor with known rank and values. - ShapeTensor(int rank_, std::vector&& values_); + //! Create ShapeTensor with known rank and int64_t values. + ShapeTensor(int32_t rank_, std::vector&& values_); + + //! Create ShapeTensor with known rank and float values. + ShapeTensor(int32_t rank_, std::vector&& values_); //! Create ShapeTensor representing value of TensorOrWeights. ShapeTensor(IImporterContext* ctx, TensorOrWeights& t); @@ -67,6 +70,12 @@ class ShapeTensor //! True if all element values equal the given value. bool isAll(int64_t value) const; + //! True if floating-point shape tensor. + bool isFloat() const + { + return mIsFloat; + } + using const_iterator = std::vector::const_iterator; //! Iterator pointing to beginning of sequence of element values. @@ -134,7 +143,9 @@ class ShapeTensor //! and mValues.size() == mSize. //! When mAllValuesKnown==false, only the non-negative values in mValues //! are guaranteed to be correct, and only so if mValues.size() == mSize. - std::vector mValues; + std::vector mValues{}; + + bool mIsFloat{false}; }; //! Print ShapeTensor. Unknown values are printed as _. @@ -191,6 +202,9 @@ ShapeTensor gather(IImporterContext* ctx, const ShapeTensor& data, const ShapeTe //! Concatenation of two 1D tensors ShapeTensor concat(IImporterContext* ctx, const ShapeTensor& x, const ShapeTensor& y); +//! Cast to int32_t shape tensor. +ShapeTensor castToInt32(IImporterContext* ctx, ShapeTensor const& x); + //! Return gather(concat(x,y),subscripts) inline ShapeTensor interlace( IImporterContext* ctx, const ShapeTensor& x, const ShapeTensor& y, const ShapeTensor& subscripts) diff --git a/ShapedWeights.cpp b/ShapedWeights.cpp index d42e4631..0147d6b3 100644 --- a/ShapedWeights.cpp +++ b/ShapedWeights.cpp @@ -13,11 +13,7 @@ namespace onnx2trt size_t ShapedWeights::count() const { - if (this->values == nullptr && this->shape.nbDims <= 0) - { - return 0; - } - // TRT supports scalars, so 0D tensors should have a count of 1. + assert(shape.nbDims >= 0); size_t c = 1; for (int i = 0; i < this->shape.nbDims; ++i) { @@ -28,13 +24,7 @@ size_t ShapedWeights::count() const ShapedWeights ShapedWeights::empty(DataType type) { - return ShapedWeights(type, nullptr, nvinfer1::Dims{0}); -} - -ShapedWeights::ShapedWeights() - : values(nullptr) - , shape{0} -{ + return ShapedWeights(type, nullptr, nvinfer1::Dims{1, {0}}); } ShapedWeights::ShapedWeights(DataType type_, void* values_, nvinfer1::Dims shape_) @@ -71,108 +61,9 @@ const char* ShapedWeights::getName() const return this->name; } -void ShapedWeights::setName(const char* name) -{ - this->name = name; -} - -template -void transpose4DWeights(ShapedWeights const& weights, nvinfer1::Permutation const perm, ShapedWeights* result) +void ShapedWeights::setName(const char* n) { - nvinfer1::Dims original_shape = weights.shape; - nvinfer1::Dims new_shape = result->shape; - int nbDims = new_shape.nbDims; - DType const* src = reinterpret_cast(weights.values); - DType* dst = reinterpret_cast(result->values); - - nvinfer1::Dims expanded_original_shape{4, {1, 1, 1, 1}}; - nvinfer1::Dims expanded_new_shape{4, {1, 1, 1, 1}}; - nvinfer1::Permutation expanded_perm{0, 1, 2, 3}; - - int pad = 4 - nbDims; - for (int i = 0; i < nbDims; ++i) - { - expanded_original_shape.d[pad + i] = original_shape.d[i]; - expanded_new_shape.d[pad + i] = new_shape.d[i]; - expanded_perm.order[pad + i] = perm.order[i] + pad; - } - - - int src_strides[4] = {1, 1, 1, 1}; - int dst_strides[4] = {1, 1, 1, 1}; - - for (int i = 2; i >= 0; --i) - { - src_strides[i] = expanded_original_shape.d[i + 1] * src_strides[i + 1]; - dst_strides[i] = expanded_new_shape.d[i + 1] * dst_strides[i + 1]; - } - - for (int n = 0; n < expanded_original_shape.d[0]; ++n) - { - for (int c = 0; c < expanded_original_shape.d[1]; ++c) - { - for (int h = 0; h < expanded_original_shape.d[2]; ++h) - { - for (int w = 0; w < expanded_original_shape.d[3]; ++w) - { - int src_index = 0; - int dst_index = 0; - int src_coord[4] = {n, c, h, w}; - int dst_coord[4]; - for (int i = 0 ; i < 4; ++i) - { - dst_coord[i] = src_coord[expanded_perm.order[i]]; - src_index += src_coord[i] * src_strides[i]; - dst_index += dst_coord[i] * dst_strides[i]; - } - dst[dst_index] = src[src_index]; - } - } - } - } -} - -bool transposeWeights(ShapedWeights const& weights, nvinfer1::Permutation const& perm, ShapedWeights* result, IImporterContext* ctx) -{ - nvinfer1::Dims shape = weights.shape; - int nbDims = shape.nbDims; - nvinfer1::Dims new_shape; - new_shape.nbDims = nbDims; - for (int d = 0; d < nbDims; ++d) - { - new_shape.d[d] = shape.d[perm.order[d]]; - result->shape.d[d] = new_shape.d[d]; - } - - - if (shape.nbDims <= 4) - { - if (weights.type == ::ONNX_NAMESPACE::TensorProto::FLOAT) - { - transpose4DWeights(weights, perm, result); - } - else if (weights.type == ::ONNX_NAMESPACE::TensorProto::FLOAT16) - { - transpose4DWeights(weights, perm, result); - } - else - { - return false; - } - } - else - { - // TODO: Implement general transposes and multiple data types - // Unsupported weights transpose - return false; - } - nvinfer1::Dims permDims{nbDims, {}}; - std::copy_n(perm.order, nbDims, permDims.d); - LOG_WARNING("Weights " - << weights.getName() << " has been transposed with permutation of " << permDims - << "! If you plan on overwriting the weights with the Refitter API, the new weights must be pre-transposed."); - result->setName(weights.getName()); - return true; + this->name = n; } } // namespace onnx2trt diff --git a/ShapedWeights.hpp b/ShapedWeights.hpp index 7ee9d280..3675aaf2 100644 --- a/ShapedWeights.hpp +++ b/ShapedWeights.hpp @@ -15,9 +15,12 @@ class ShapedWeights public: using DataType = int32_t; + //! Create 1D zero-length ShapedWeights of given type, count()==0, and values=nullptr. static ShapedWeights empty(DataType type); - ShapedWeights(); + //! Construct ShapedWeights that is not expected to be usuable, + //! except with `operator=` and method `setName()`. + ShapedWeights() = default; explicit ShapedWeights(DataType type, void* values, nvinfer1::Dims shape_); @@ -29,6 +32,7 @@ class ShapedWeights void setName(const char* name); + //! True if values exist. explicit operator bool() const; operator nvinfer1::Weights() const; @@ -48,13 +52,12 @@ class ShapedWeights } public: - DataType type; - void* values; - nvinfer1::Dims shape; + DataType type{static_cast(-1)}; + void* values{nullptr}; + nvinfer1::Dims shape{-1, {}}; const char* name{}; }; class IImporterContext; -bool transposeWeights(ShapedWeights const& weights, nvinfer1::Permutation const& perm, ShapedWeights* result, IImporterContext* ctx); } // namespace onnx2trt diff --git a/TensorOrWeights.hpp b/TensorOrWeights.hpp index 7515fe73..fbde7523 100644 --- a/TensorOrWeights.hpp +++ b/TensorOrWeights.hpp @@ -81,6 +81,11 @@ class TensorOrWeights { return is_tensor() ? _tensor != nullptr : static_cast(_weights); } + bool isFp32() const + { + return is_tensor() ? _tensor->getType() == nvinfer1::DataType::kFLOAT + : _weights.type == ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT; + } bool isInt32() const { return is_tensor() ? _tensor->getType() == nvinfer1::DataType::kINT32 : _weights.type == ::ONNX_NAMESPACE::TensorProto_DataType_INT32; diff --git a/builtin_op_importers.cpp b/builtin_op_importers.cpp index 69778047..9854f9b3 100644 --- a/builtin_op_importers.cpp +++ b/builtin_op_importers.cpp @@ -6,11 +6,13 @@ #include "ConditionalHelpers.hpp" #include "LoopHelpers.hpp" #include "ModelImporter.hpp" +#include "NvInfer.h" #include "NvInferPlugin.h" #include "NvInferRuntime.h" #include "OnnxAttrs.hpp" #include "RNNHelpers.hpp" #include "ShapeTensor.hpp" +#include "half.h" #include "onnx2trt_utils.hpp" #include // For std::min, std::max @@ -82,13 +84,24 @@ namespace do \ { \ nvinfer1::ILayer* layer_ptr = layer; \ - ASSERT(layer_ptr && "The input layer is null.", ErrorCode::kUNSUPPORTED_NODE); \ + ASSERT(layer_ptr && "The input layer is null.", ErrorCode::kUNSUPPORTED_NODE); \ std::vector outputs; \ for (int i = 0; i < layer_ptr->getNbOutputs(); ++i) \ outputs.push_back(layer_ptr->getOutput(i)); \ return {outputs}; \ } while (0) +void assertIsWeights(const TensorOrWeights& input, const std::string& specificMsg) +{ + if (!input.is_weights()) + { + std::ostringstream msg; + msg << specificMsg; + msg << " Try applying constant folding on the model using Polygraphy: https://github.com/NVIDIA/TensorRT/tree/master/tools/Polygraphy/examples/cli/surgeon/02_folding_constants"; + throw std::runtime_error(msg.str()); + } +} + bool registerBuiltinOpImporter(std::string op, NodeImporter const& importer) { bool inserted = getBuiltinOpImporterMap().insert({op, importer}).second; @@ -259,7 +272,7 @@ DEFINE_BUILTIN_OP_IMPORTER(BatchNormalization) } return scaleHelper(ctx, node, *tensorPtr, nvinfer1::ScaleMode::kCHANNEL, combinedBias, combinedScale, - ShapedWeights::empty(scale.type), bias.getName(), scale.getName()); + ShapedWeights::empty(scale.type), combinedBias.getName(), combinedScale.getName()); } DEFINE_BUILTIN_OP_IMPORTER(Cast) @@ -313,14 +326,14 @@ DEFINE_BUILTIN_OP_IMPORTER(Celu) std::vector inputTensors; int maxNbDims = -1; - for (auto input : newInputs) + for (auto i : newInputs) { - maxNbDims = std::max(maxNbDims, input.shape().nbDims); + maxNbDims = std::max(maxNbDims, i.shape().nbDims); } - for (auto input : newInputs) + for (auto i : newInputs) { - auto* tensor_ptr = &convertToTensor(input, ctx); + auto* tensor_ptr = &convertToTensor(i, ctx); // Broadcast all input tensors to size of maxNbDims broadcastTensor(ctx, tensor_ptr, maxNbDims); @@ -333,10 +346,10 @@ DEFINE_BUILTIN_OP_IMPORTER(Celu) std::vector tempInputs{newInputs[0], newInputs[3]}; ASSERT(elementwiseCheck(tempInputs, eOp::kDIV) && "Elementwise layer does not support the given inputs and operator.", ErrorCode::kUNSUPPORTED_NODE); nvinfer1::ITensor* combined = inputTensors.at(0); - auto* layer = ctx->network()->addElementWise(*combined, *inputTensors.at(3), eOp::kDIV); - ctx->registerLayer(layer, getNodeName(node)); - ASSERT(layer && "Failed to register layer.", ErrorCode::kUNSUPPORTED_NODE); - combined = layer->getOutput(0); + auto* divLayer = ctx->network()->addElementWise(*combined, *inputTensors.at(3), eOp::kDIV); + ctx->registerLayer(divLayer, getNodeName(node)); + ASSERT(divLayer && "Failed to register layer.", ErrorCode::kUNSUPPORTED_NODE); + combined = divLayer->getOutput(0); // Calculate exp(x/alpha) -> 4 nvinfer1::IUnaryLayer* uLayer = ctx->network()->addUnary(*combined, uOp::kEXP); @@ -344,22 +357,19 @@ DEFINE_BUILTIN_OP_IMPORTER(Celu) combined = uLayer->getOutput(0); inputTensors.push_back(combined); - - std::vector operations { + std::vector operations{ // max(0,x) -> 5 eOpInstuctor(0, 1, eOp::kMAX), // (exp(x/alpha)-1)) -> 6 eOpInstuctor(4, 2, eOp::kSUB), // alpha*(exp(x/alpha)-1) -> 7 - eOpInstuctor(3, 6, eOp::kPOW), + eOpInstuctor(3, 6, eOp::kPROD), // min(0,alpha*(exp(x/alpha)-1)) -> 8 eOpInstuctor(1, 7, eOp::kMIN), // max(0,x) + min(0,alpha*(exp(x/alpha)-1)) -> 9 eOpInstuctor(5, 8, eOp::kSUM), }; - - for (auto it : operations) { nvinfer1::ITensor* firstTensor = inputTensors.at(std::get<0>(it)); @@ -606,9 +616,7 @@ DEFINE_BUILTIN_OP_IMPORTER(Conv) { if (inputs.size() == 3) { - ASSERT( - inputs.at(2).is_weights() && "The bias tensor is required to be an initializer for the Conv operator", - ErrorCode::kUNSUPPORTED_NODE); + assertIsWeights(inputs.at(2), "The bias tensor is required to be an initializer for the Conv operator."); } // Handle Multi-input convolution return convMultiInput(ctx, node, inputs); @@ -644,7 +652,7 @@ DEFINE_BUILTIN_OP_IMPORTER(Conv) nvinfer1::Weights bias_weights; if (inputs.size() == 3) { - ASSERT(inputs.at(2).is_weights() && "The bias tensor is required to be an initializer for the Conv operator.", ErrorCode::kUNSUPPORTED_NODE); + assertIsWeights(inputs.at(2), "The bias tensor is required to be an initializer for the Conv operator."); auto shapedBiasWeights = inputs.at(2).weights(); // Unsqueeze scalar weights to 1D if (shapedBiasWeights.shape.nbDims == 0) @@ -890,13 +898,6 @@ DEFINE_BUILTIN_OP_IMPORTER(ConvTranspose) ctx->network()->setWeightsName(kernelWeights, inputs.at(1).weights().getName()); } - // Check that 3D deconvolution paddings is valid - if (nbSpatialDims == 3) - { - ASSERT(begPadding == endPadding && "TensorRT does not support asymmetrical padding for 3D deconvolutions!", - ErrorCode::kUNSUPPORTED_NODE); - } - layer->setPaddingMode(paddingMode); layer->setPrePadding(begPadding); layer->setPostPadding(endPadding); @@ -989,7 +990,7 @@ DEFINE_BUILTIN_OP_IMPORTER(CumSum) nvinfer1::ITensor* input = &convertToTensor(inputs.at(0), ctx); auto dims = input->getDimensions(); - ASSERT(inputs.at(1).is_weights() && "Axis input for CumSum must be an initializer!", ErrorCode::kUNSUPPORTED_NODE); + assertIsWeights(inputs.at(1), "Axis input for CumSum must be an initializer!"); ShapedWeights axisWeights = inputs.at(1).weights(); int32_t axis = static_cast(axisWeights.values)[0]; CHECK(convertAxis(axis, dims.nbDims)); @@ -1124,12 +1125,17 @@ NodeImportResult QuantDequantLinearHelper( IImporterContext* ctx, ::ONNX_NAMESPACE::NodeProto const& node, std::vector& inputs, bool isDQ) { auto addConstantLayer - = [](nvinfer1::INetworkDefinition& network, const ShapedWeights& weights) -> nvinfer1::ITensor* { + = [ctx](nvinfer1::INetworkDefinition& network, const ShapedWeights& weights) -> nvinfer1::ITensor* { nvinfer1::IConstantLayer* constLayer = network.addConstant(weights.shape, weights); + ctx->registerLayer(constLayer, weights.getName()); network.setWeightsName(weights, weights.getName()); return constLayer->getOutput(0); }; + auto newConstantInput = [&](int32_t i) { + return inputs.at(i).is_weights() && (ctx->getConstantLayer(inputs.at(i).weights().getName()) == nullptr); + }; + ASSERT((inputs.size() == 3) && "This version of TensorRT requires 3 inputs for the DequantizeLinear operator.", nvonnxparser::ErrorCode::kINVALID_NODE); @@ -1138,9 +1144,8 @@ NodeImportResult QuantDequantLinearHelper( nvinfer1::ITensor& dataInput = convertToTensor(inputs.at(0), ctx); // Input 1 initializes the layer's scale weights. - auto scaleIsWeights = inputs.at(1).is_weights(); nvinfer1::ITensor* scaleInput = nullptr; - if (scaleIsWeights) + if (newConstantInput(1)) { // Scale is concrete so verify it now. auto scale = inputs.at(1).weights(); @@ -1160,9 +1165,8 @@ NodeImportResult QuantDequantLinearHelper( const auto scaleSize = volume(scaleInput->getDimensions()); // Input 2 initializes the layer's zero-point. - auto zeroPtIsWeights = inputs.at(2).is_weights(); nvinfer1::ITensor* zeroPointInput = nullptr; - if (zeroPtIsWeights) + if (newConstantInput(2)) { // Zero-point verification. auto zeroPoint = inputs.at(2).weights(); @@ -1216,7 +1220,6 @@ NodeImportResult QuantDequantLinearHelper( nvinfer1::IDequantizeLayer* dq = ctx->network()->addDequantize(dataInput, *scaleInput); ASSERT(dq && "Failed to create Dequantize layer.", ErrorCode::kUNSUPPORTED_NODE); dq->setAxis(axis); - nodeName += std::string("_dequantize_scale_node"); layer = dq; } else @@ -1225,7 +1228,6 @@ NodeImportResult QuantDequantLinearHelper( nvinfer1::IQuantizeLayer* q = ctx->network()->addQuantize(dataInput, *scaleInput); ASSERT(q && "Failed to create Quantize layer.", ErrorCode::kUNSUPPORTED_NODE); q->setAxis(axis); - nodeName += std::string("_quantize_scale_node"); layer = q; } @@ -1590,6 +1592,7 @@ DEFINE_BUILTIN_OP_IMPORTER(Gemm) DEFINE_BUILTIN_OP_IMPORTER(GlobalAveragePool) { + LOG_VERBOSE("GlobalAveragePool operators are implemented via Reduce layers rather than Pooling layers"); return {{globalPoolingHelper(ctx, node, convertToTensor(inputs.at(0), ctx), nvinfer1::ReduceOperation::kAVG)}}; } @@ -1606,7 +1609,7 @@ DEFINE_BUILTIN_OP_IMPORTER(GlobalLpPool) nvinfer1::Dims scalarDims{dims.nbDims}; std::fill(scalarDims.d, scalarDims.d + scalarDims.nbDims, 1); auto& pTensor = *addConstantScalar(ctx, p, ::ONNX_NAMESPACE::TensorProto::FLOAT, scalarDims)->getOutput(0); - auto& pInvTensor = *addConstantScalar(ctx, 1.f / p, ::ONNX_NAMESPACE::TensorProto::FLOAT, scalarDims)->getOutput(0); + auto& pInvTensor = *addConstantScalar(ctx, 1.F / p, ::ONNX_NAMESPACE::TensorProto::FLOAT, scalarDims)->getOutput(0); // firstPow = pow(x, p) auto* firstPow = ctx->network()->addElementWise(tensor, pTensor, nvinfer1::ElementWiseOperation::kPOW)->getOutput(0); @@ -1620,6 +1623,7 @@ DEFINE_BUILTIN_OP_IMPORTER(GlobalLpPool) DEFINE_BUILTIN_OP_IMPORTER(GlobalMaxPool) { + LOG_VERBOSE("GlobalMaxPool operators are implemented via Reduce layers rather than Pooling layers"); return {{globalPoolingHelper(ctx, node, convertToTensor(inputs.at(0), ctx), nvinfer1::ReduceOperation::kMAX)}}; } @@ -2052,8 +2056,13 @@ DEFINE_BUILTIN_OP_IMPORTER(If) // For constant conditions, parse only the selected subgraph if (cond.is_weights() && cond.weights().count() == 1) { - const auto value = *(static_cast(cond.weights().values)); + // Boolean weights are stored as uint8_t + auto const value = *(static_cast(cond.weights().values)); const ::ONNX_NAMESPACE::GraphProto& body = value == 1 ? thenGraph : elseGraph; + + // Establish scope for names local to the subgraph. + NameScope nameScope(*ctx); + CHECK(onnx2trt::parseGraph(ctx, body)); for (auto i = 0; i < nbOutputs; i++) { @@ -2075,22 +2084,17 @@ DEFINE_BUILTIN_OP_IMPORTER(If) conditional->setCondition(*condTensor); std::vector thenLayers, elseLayers; - CHECK(importSubgraph(ctx, thenGraph, thenLayers)); - CHECK(importSubgraph(ctx, elseGraph, elseLayers)); - - // Names must be unique - for (auto i = 0; i < nbOutputs; i++) - { - const auto thenName = thenGraph.output(i).name(); - const auto elseName = elseGraph.output(i).name(); - ASSERT(thenName != elseName && "TensorRT requires conditional subgraphs to have different output tensor names!", ErrorCode::kUNSUPPORTED_NODE); - } + StringMap thenSubgraphTensors; + StringMap elseSubgraphTensors; + CHECK(importSubgraph(ctx, thenGraph, thenLayers, thenSubgraphTensors)); + CHECK(importSubgraph(ctx, elseGraph, elseLayers, elseSubgraphTensors)); using InputsMap = std::unordered_map; InputsMap inputsMap; CHECK(addIfInputLayers(ctx, conditional, inputsMap, thenLayers)); CHECK(addIfInputLayers(ctx, conditional, inputsMap, elseLayers)); - CHECK(addIfOutputLayers(ctx, conditional, thenGraph, thenLayers, elseGraph, elseLayers, graphOutputs)); + CHECK(addIfOutputLayers(ctx, conditional, thenGraph, thenLayers, thenSubgraphTensors, elseGraph, elseLayers, + elseSubgraphTensors, graphOutputs)); return {graphOutputs}; } @@ -2238,6 +2242,10 @@ DEFINE_BUILTIN_OP_IMPORTER(Loop) auto loop = ctx->network()->addLoop(); loop->setName(getNodeName(node).c_str()); + + // Establish scope for names local to the subgraph. + NameScope nameScope(*ctx); + // Trip count and condition are optional inputs. nvinfer1::ITensor* tripLimit{nullptr}; if (inputs[0]) @@ -2640,7 +2648,7 @@ DEFINE_BUILTIN_OP_IMPORTER(LpNormalization) CHECK(convertAxis(axis, nbDims)); ASSERT((p == 1 || p == 2) && "Only L1 and L2 normalization are supported.", ErrorCode::kINVALID_NODE); - nvinfer1::ITensor* norm; + nvinfer1::ITensor* norm{nullptr}; TensorOrWeights zeros = ctx->createTempWeights(::ONNX_NAMESPACE::TensorProto::FLOAT, {0,{}}); nvinfer1::ITensor* zerosTensor = &convertToTensor(zeros, ctx); broadcastTensor(ctx, zerosTensor, nbDims); @@ -2727,7 +2735,7 @@ DEFINE_BUILTIN_OP_IMPORTER(LpPool) nvinfer1::ITensor* kernelSzTensor = addConstantScalar(ctx, kernelSz, ::ONNX_NAMESPACE::TensorProto::FLOAT, scalarDims)->getOutput(0); - nvinfer1::ITensor* output; + nvinfer1::ITensor* output{nullptr}; if (p == 1) { // x' = abs(x) nvinfer1::IUnaryLayer* absLayer = ctx->network()->addUnary(*input, uOp::kABS); @@ -2771,37 +2779,8 @@ DEFINE_BUILTIN_OP_IMPORTER(MatMul) nvinfer1::ITensor* inputB = &convertToTensor(inputs.at(1), ctx); // TRT does not support INT32 input types for this node ASSERT(inputA->getType() != nvinfer1::DataType::kINT32 && inputB->getType() != nvinfer1::DataType::kINT32 - && "TensorRT doesn't support INT32 inputs for MatMul!", ErrorCode::kUNSUPPORTED_NODE); - nvinfer1::Dims inputADims = inputA->getDimensions(); - nvinfer1::Dims inputBDims = inputB->getDimensions(); - - // Use FC if possible as FC can be fused with later activation and bias layers. - // Input unsqueezed from (N, C) to (N, 1, 1, C). - // Output squeezed from (N, K, 1, 1) to (N, K). - bool canUseFC = inputs.at(0).is_tensor() && inputs.at(1).is_weights() && - inputADims.nbDims == 2 && inputBDims.nbDims == 2; - if (canUseFC) - { - LOG_VERBOSE("GEMM: using FC layer instead of MM because all criteria were met."); - const std::vector axesInput{2, 3}; - nvinfer1::ITensor* inputAExtendDim = unsqueezeTensor(ctx, node, *inputA, axesInput); - - ShapedWeights weights = inputs.at(1).weights(); - auto transposedWeights = ctx->createTempWeights(weights.type, weights.shape); - ASSERT(transposeWeights(weights, {1, 0}, &transposedWeights, ctx) && "Failed to transpose input tensor B.", ErrorCode::kUNSUPPORTED_NODE); - weights = transposedWeights; - - // Create empty bias weights as MatMul op does not have bias addition. - auto biasWeights = ShapedWeights::empty(::ONNX_NAMESPACE::TensorProto::FLOAT); - nvinfer1::IFullyConnectedLayer* fc - = ctx->network()->addFullyConnected(*inputAExtendDim, inputBDims.d[1], transposedWeights, biasWeights); - // Register layer name and kernel weights for FC. - ctx->registerLayer(fc, getNodeName(node)); - // Always set names for weights passed to the network, i.e., the transposed weights. - ctx->network()->setWeightsName(weights, inputs.at(1).weights().getName()); - const std::vector axesOutput{2, 3}; - return {{squeezeTensor(ctx, node, *fc->getOutput(0), axesOutput)}}; - } + && "TensorRT doesn't support INT32 inputs for MatMul!", + ErrorCode::kUNSUPPORTED_NODE); bool needSqueezeHead = false; bool needSqueezeTail = false; @@ -2879,6 +2858,7 @@ DEFINE_BUILTIN_OP_IMPORTER(Mean) static_cast(scale_weights.values)[0] = scale_value; auto* constant_layer = ctx->network()->addConstant(scale_weights.shape, scale_weights); ASSERT(constant_layer && "Failed to create the scalar tensor.", ErrorCode::kUNSUPPORTED_NODE); + ctx->network()->setWeightsName(scale_weights, scale_weights.getName()); nvinfer1::ITensor& scale_constant = *constant_layer->getOutput(0); RETURN_FIRST_OUTPUT( ctx->network()->addElementWise(sum_tensor, scale_constant, nvinfer1::ElementWiseOperation::kPROD)); @@ -3020,14 +3000,28 @@ DEFINE_BUILTIN_OP_IMPORTER(Pad) } if (inputs.size() == 3) { + bool isValueSet = false; if (inputs.at(2).is_weights()) { - const auto padWeight = inputs.at(2).weights(); + auto const padWeight = inputs.at(2).weights(); ASSERT((padWeight.count() == 1) && "The input constant_value is required to be a scalar.", ErrorCode::kINVALID_NODE); - value = static_cast(padWeight.values)[0]; + switch (padWeight.type) + { + case ::ONNX_NAMESPACE::TensorProto::FLOAT: + value = static_cast(padWeight.values)[0]; + isValueSet = true; + break; + case ::ONNX_NAMESPACE::TensorProto::FLOAT16: + value = float(reinterpret_cast(padWeight.values)[0]); + isValueSet = true; + break; + default: + // we use trt constant layer to do the data type convertion + break; + } } - else + if (!isValueSet) { valuePtr = &convertToTensor(inputs.at(2), ctx); } @@ -3109,15 +3103,14 @@ DEFINE_BUILTIN_OP_IMPORTER(Pad) case nvinfer1::DataType::kFLOAT: case nvinfer1::DataType::kHALF: case nvinfer1::DataType::kINT8: - fillValue = addConstant(ctx, std::vector{value}, ::ONNX_NAMESPACE::TensorProto::FLOAT, - nvinfer1::Dims{ - 0, {0}})->getOutput(0); + fillValue = addConstant( + ctx, std::vector{value}, ::ONNX_NAMESPACE::TensorProto::FLOAT, nvinfer1::Dims{0, {0}}) + ->getOutput(0); break; default: fillValue = addConstant(ctx, std::vector{static_cast(value)}, - ::ONNX_NAMESPACE::TensorProto::INT32, - nvinfer1::Dims{ - 0, {0}})->getOutput(0); + ::ONNX_NAMESPACE::TensorProto::INT32, nvinfer1::Dims{0, {0}}) + ->getOutput(0); break; } ASSERT(fillValue && "Could not create layer for constant_value", ErrorCode::kUNSUPPORTED_NODE); @@ -3237,36 +3230,10 @@ DEFINE_BUILTIN_OP_IMPORTER(RandomUniformLike) return randomUniformHelper(ctx, node, inputShape, attrs, dType); } -NodeImportResult staticFloatRangeImporter(IImporterContext* ctx, const ::ONNX_NAMESPACE::NodeProto& node, const std::vector& inputs) -{ - const float start = static_cast(inputs.at(0).weights().values)[0]; - const float limit = static_cast(inputs.at(1).weights().values)[0]; - const float delta = static_cast(inputs.at(2).weights().values)[0]; - const float size = std::max(std::ceil((limit - start) / delta), 0.0f); - ASSERT(size != 0 && "Zero-sized range operators are not supported!", ErrorCode::kUNSUPPORTED_NODE); - ASSERT(size <= std::numeric_limits::max() && "Range operator size must fit in int32!", - ErrorCode::kUNSUPPORTED_NODE); - nvinfer1::IFillLayer* layer - = addFill(ctx, shapeVector(static_cast(size)), nvinfer1::FillOperation::kLINSPACE); - ctx->registerLayer(layer, getNodeName(node)); - layer->setAlpha(start); - layer->setBeta(delta); - RETURN_FIRST_OUTPUT(layer); -} - DEFINE_BUILTIN_OP_IMPORTER(Range) { - if (inputs.at(0).is_weights() && inputs.at(0).weights().type == ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT) - { - // Floating-point case supported by TensorRT only if all inputs are static. - if (inputs.at(0).is_weights() && inputs.at(1).is_weights() && inputs.at(2).is_weights()) - { - return staticFloatRangeImporter(ctx, node, inputs); - } - } - - ASSERT(inputs.at(0).isInt32() - && "For range operator with dynamic inputs, this version of TensorRT only supports INT32!", + ASSERT((inputs.at(0).isInt32() || inputs.at(0).isFp32()) + && "This version of TensorRT only supports int32 and float input types for Range!", ErrorCode::kUNSUPPORTED_NODE); // "start : T @@ -3275,9 +3242,13 @@ DEFINE_BUILTIN_OP_IMPORTER(Range) // Scalar. Exclusive upper limit for the range of output values. // delta : T // Scalar. Value to step by." - const ShapeTensor start{ctx, inputs.at(0)}; - const ShapeTensor limit{ctx, inputs.at(1)}; - const ShapeTensor delta{ctx, inputs.at(2)}; + ShapeTensor const start{ctx, inputs.at(0)}; + ShapeTensor const limit{ctx, inputs.at(1)}; + ShapeTensor const delta{ctx, inputs.at(2)}; + + ASSERT((start.isFloat() == limit.isFloat() && start.isFloat() == delta.isFloat()) + && "For range operator types for start, limit, and delta must be identical.", + ErrorCode::kUNSUPPORTED_NODE); // "number_of_elements = max( ceil( (limit - start) / delta ) , 0 )" // @@ -3285,8 +3256,10 @@ DEFINE_BUILTIN_OP_IMPORTER(Range) // shape tensors, rewrite as: // "number_of_elements = max(0 - floor((start - limit) / delta), 0) // - const ShapeTensor zero = shapeScalar(0); - const ShapeTensor numberOfElements = max(ctx, sub(ctx, zero, floorDiv(ctx, sub(ctx, start, limit), delta)), zero); + ShapeTensor const zero = shapeScalar(0); + ShapeTensor const fQuotient = floorDiv(ctx, sub(ctx, start, limit), delta); + ShapeTensor const quotient = start.isFloat() ? castToInt32(ctx, fQuotient) : fQuotient; + ShapeTensor const numberOfElements = max(ctx, sub(ctx, zero, quotient), zero); nvinfer1::IFillLayer* layer = addFill(ctx, convertTo1D(ctx, numberOfElements), nvinfer1::FillOperation::kLINSPACE); ctx->registerLayer(layer, getNodeName(node)); @@ -3296,8 +3269,12 @@ DEFINE_BUILTIN_OP_IMPORTER(Range) { layer->setAlpha(start[0]); layer->setBeta(delta[0]); - // Set layer output type to INT32 for statically-known ranges. - layer->setOutputType(0, nvinfer1::DataType::kINT32); + if (!start.isFloat()) + { + // Set output type to INT32 for ranges that should be INT32, since TRT only accepts + // double type for setAlpha and setBeta + layer->setOutputType(0, nvinfer1::DataType::kINT32); + } } else { @@ -3523,21 +3500,31 @@ DEFINE_BUILTIN_OP_IMPORTER(Resize) // Resizes that use scale factors have the same import logic between opsets auto scales = ctx->getOpsetVersion() >= 11 ? inputs.at(2) : inputs.at(1); - ASSERT(scales.is_weights() && "Resize scales must be an initializer!", ErrorCode::kUNSUPPORTED_NODE); - ShapedWeights scales_weights = scales.weights(); - ASSERT( (scales_weights.shape.nbDims == 1) && "The scales input must be 1D.", ErrorCode::kUNSUPPORTED_NODE); - int scaleSize = scales_weights.shape.d[0]; - ASSERT( (scaleSize == inputRank) && "The shape of input scales must align with the input rank.", ErrorCode::kINVALID_NODE); - float const* scaleValues = static_cast(scales_weights.values); - if (resizeMode == nvinfer1::ResizeMode::kLINEAR) - { - ASSERT(canUseLinearResize(scaleSize, scaleValues) - && "This version of TensorRT only supports linear resizing on the outermost 3 dimensions.", - ErrorCode::kUNSUPPORTED_NODE); + + if (scales.is_weights()) + { + // TRT-15340: Remove this and use else path when safety support nbDims == 1. + ShapedWeights scales_weights = scales.weights(); + ASSERT((scales_weights.shape.nbDims == 1) && "The scales input must be 1D.", ErrorCode::kUNSUPPORTED_NODE); + int32_t scaleSize = scales_weights.shape.d[0]; + ASSERT((scaleSize == inputRank) && "The shape of input scales must align with the input rank.", + ErrorCode::kINVALID_NODE); + float const* scaleValues = static_cast(scales_weights.values); + if (resizeMode == nvinfer1::ResizeMode::kLINEAR) + { + ASSERT(canUseLinearResize(scaleSize, scaleValues) + && "This version of TensorRT only supports linear resizing on the outermost 3 dimensions.", + ErrorCode::kUNSUPPORTED_NODE); + } + layer->setScales(scaleValues, inputRank); + } + else + { + nvinfer1::ITensor* resizeShape = resizeShapeTensor(ctx, input, scales); + layer->setInput(1, *resizeShape); } layer->setResizeMode(resizeMode); - layer->setScales(scaleValues, inputRank); LOG_VERBOSE("Running resize layer with: \n" << "Transformation mode: " << transformationMode << "\n" @@ -3928,6 +3915,9 @@ DEFINE_BUILTIN_OP_IMPORTER(Scan) nvinfer1::ITensor* tripLimit = getAxisLength(ctx, &convertToTensor(inputs.back(), ctx), scanInputAxes.back()); loop->addTripLimit(*tripLimit, nvinfer1::TripLimit::kCOUNT); + // Establish scope for names local to the subgraph. + NameScope nameScope(*ctx); + // Add initial state inputs using recurrent layers, and scan inputs using iterators. std::vector stateVars{}; for (int i = 0; i < nbStateVars; ++i) @@ -4134,7 +4124,11 @@ DEFINE_BUILTIN_OP_IMPORTER(Slice) decodeOnnxStartsAndEnds(ctx, dims, steps, starts, ends); // TensorRT uses sizes of the output dimensions instead of ends. - const ShapeTensor sizes = computeSliceSizes(ctx, starts, ends, steps, dims); + ShapeTensor sizes = computeSliceSizes(ctx, starts, ends, steps, dims); + + // Negative sizes signifies an empty slice, so clamp sizes to 0 + const ShapeTensor zeros = similar(ctx, dims, 0); + sizes = max(ctx, zeros, sizes); nvinfer1::ISliceLayer* slice = addSlice(ctx, data, starts, sizes, steps); @@ -4294,7 +4288,7 @@ DEFINE_BUILTIN_OP_IMPORTER(Split) { if (splitList.empty()) { - sizeSliceAxis = gather(ctx, splitSizesTensor, ShapeTensor(1, {i})); + sizeSliceAxis = gather(ctx, splitSizesTensor, ShapeTensor(1, std::vector{i})); } else { @@ -4327,7 +4321,7 @@ DEFINE_BUILTIN_OP_IMPORTER(Squeeze) { if (inputs.size() == 2) { - ASSERT(inputs.at(1).is_weights() && "Unsqueeze axes input must an initializer!", ErrorCode::kUNSUPPORTED_NODE); + assertIsWeights(inputs.at(1), "Squeeze axes input must be an initializer!"); // Map weights value to axes auto axesWeights = inputs.at(1).weights(); int32_t* axesValues = static_cast(axesWeights.values); @@ -4434,8 +4428,9 @@ DEFINE_BUILTIN_OP_IMPORTER(TopK) // Don't support TopK with k as a tensor if (ctx->getOpsetVersion() >= 10) { - ASSERT( (inputs.at(1).is_weights()) && "This version of TensorRT only supports input K as an initializer.", ErrorCode::kUNSUPPORTED_NODE); - ASSERT( (inputs.at(1).weights().count() == 1) && "The input K must contain exactly 1 value.", ErrorCode::kUNSUPPORTED_NODE); + assertIsWeights(inputs.at(1), "This version of TensorRT only supports input K as an initializer."); + ASSERT((inputs.at(1).weights().count() == 1) && "The input K must contain exactly 1 value.", + ErrorCode::kUNSUPPORTED_NODE); k = *static_cast(inputs.at(1).weights().values); } else @@ -4489,21 +4484,10 @@ DEFINE_BUILTIN_OP_IMPORTER(Transpose) default_perm.order[i] = ndim - 1 - i; } nvinfer1::Permutation perm = attrs.get("perm", default_perm); - if (input.is_tensor()) - { - nvinfer1::ITensor* output_tensor = transposeTensor(ctx, node, input.tensor(), perm); - ASSERT(output_tensor && "Failed to transpose the input.", ErrorCode::kUNSUPPORTED_NODE); - return {{output_tensor}}; - } - else - { - auto weights = input.weights(); - auto new_weights = ctx->createTempWeights(weights.type, weights.shape); - ASSERT(transposeWeights(weights, perm, &new_weights, ctx) && "Failed to transpose the input.", ErrorCode::kUNSUPPORTED_NODE); - weights = new_weights; - - return {{weights}}; - } + nvinfer1::ITensor& itensor = input.is_tensor() ? input.tensor() : convertToTensor(input, ctx); + nvinfer1::ITensor* output_tensor = transposeTensor(ctx, node, itensor, perm); + ASSERT(output_tensor && "Failed to transpose the input.", ErrorCode::kUNSUPPORTED_NODE); + return {{output_tensor}}; } DEFINE_BUILTIN_OP_IMPORTER(Unsqueeze) @@ -4556,57 +4540,92 @@ DEFINE_BUILTIN_OP_IMPORTER(Upsample) { nvinfer1::ITensor& tensor = convertToTensor(inputs.at(0), ctx); // TRT does not support BOOL input types for this node - ASSERT( (tensor.getType() != nvinfer1::DataType::kINT32 && tensor.getType() != nvinfer1::DataType::kBOOL) - && "This version of TensorRT does not support INT32 or BOOL input for the Upsample operator.", ErrorCode::kUNSUPPORTED_NODE); - const int nbDims = tensor.getDimensions().nbDims; - ASSERT( (nbDims > 0) && "The input tensor cannot be a scalar.", ErrorCode::kUNSUPPORTED_NODE); + ASSERT((tensor.getType() != nvinfer1::DataType::kINT32 && tensor.getType() != nvinfer1::DataType::kBOOL) + && "This version of TensorRT does not support INT32 or BOOL input for the Upsample operator.", + ErrorCode::kUNSUPPORTED_NODE); + const int32_t nbDims = tensor.getDimensions().nbDims; + ASSERT((nbDims > 0) && "The input tensor cannot be a scalar.", ErrorCode::kUNSUPPORTED_NODE); OnnxAttrs attrs(node, ctx); - std::vector scale_factors(nbDims, 1.0f); + + nvinfer1::IResizeLayer* const layer = ctx->network()->addResize(tensor); + auto mode = attrs.get("mode", "nearest"); + ASSERT((mode == "nearest" || mode == "linear" || mode == "bilinear") + && "The attribute mode can only be nearest, linear, or bilinear.", + ErrorCode::kUNSUPPORTED_NODE); + // Set default resize mode. Nearest resize support N-D (where 0 < N <= 8) resize. + nvinfer1::ResizeMode resizeMode + = (mode == "linear" || mode == "bilinear") ? nvinfer1::ResizeMode::kLINEAR : nvinfer1::ResizeMode::kNEAREST; + if (ctx->getOpsetVersion() >= 9) { // Get scale factors from inputs[1] - ASSERT( (inputs.size() == 2) && "Operator Upsample requires exactly 2 inputs.", ErrorCode::kINVALID_NODE); + ASSERT((inputs.size() == 2) && "Operator Upsample requires exactly 2 inputs.", ErrorCode::kINVALID_NODE); auto scales_input = inputs.at(1); - // Retrieve and validate scale factors. - ASSERT( (scales_input.is_weights()) && "The scales input must be an initializer.", ErrorCode::kUNSUPPORTED_NODE); - ShapedWeights scales_weights = scales_input.weights(); - ASSERT( (scales_weights.shape.nbDims == 1) && "The scales input must be 1D.", ErrorCode::kUNSUPPORTED_NODE); - // Scale factors has batch dimension. - ASSERT( (scales_weights.count() == static_cast(nbDims)) && "The shape of the scales input must aligin with the dimensions of the input.", ErrorCode::kUNSUPPORTED_NODE); - ASSERT( (scales_weights.type == ::ONNX_NAMESPACE::TensorProto::FLOAT) && "This version of TensorRT only supports FLOAT scales input.", ErrorCode::kINVALID_NODE); - float const* scales_ptr = static_cast(scales_weights.values); - for (int i = 0; i < nbDims; i++) + if (scales_input.is_weights()) + { + // TRT-15340: Remove this and use else path when safety support nbDims == 1. + ShapedWeights scales_weights = scales_input.weights(); + ASSERT((scales_weights.shape.nbDims == 1) && "The scales input must be 1D.", ErrorCode::kUNSUPPORTED_NODE); + // Scale factors has batch dimension. + ASSERT((scales_weights.count() == static_cast(nbDims)) + && "The shape of the scales input must aligin with the dimensions of the input.", + ErrorCode::kUNSUPPORTED_NODE); + ASSERT((scales_weights.type == ::ONNX_NAMESPACE::TensorProto::FLOAT) + && "This version of TensorRT only supports FLOAT scales input.", + ErrorCode::kINVALID_NODE); + float const* scales_ptr = static_cast(scales_weights.values); + std::vector scale_factors(nbDims, 1.0F); + for (int32_t i = 0; i < nbDims; i++) + { + scale_factors[i] = scales_ptr[i]; + } + if (mode == "linear" || mode == "bilinear") + { + ASSERT(canUseLinearResize(scale_factors.size(), &scale_factors.front()) + && "This version of TensorRT only supports linear resizing on the outermost 3 dimensions", + ErrorCode::kUNSUPPORTED_NODE); + } + layer->setScales(scale_factors.data(), nbDims); + } + else { - scale_factors[i] = scales_ptr[i]; + nvinfer1::ITensor* resizeShape = resizeShapeTensor(ctx, tensor, scales_input); + nvinfer1::Dims const outDims = resizeShape->getDimensions(); + ASSERT((outDims.nbDims == 1) && "The scales input must be 1D.", ErrorCode::kUNSUPPORTED_NODE); + // Scale factors has batch dimension. + ASSERT((outDims.d[0] == nbDims) + && "The shape of the scales input must aligin with the dimensions of the input.", + ErrorCode::kUNSUPPORTED_NODE); + ASSERT( + (resizeShape->getType() == nvinfer1::DataType::kINT32) && "Resize output shape type must be integral.", + ErrorCode::kINVALID_NODE); + layer->setInput(1, *resizeShape); } } else { + // TRT-15340: Adapt to use resizeShapeTensor instead when safety support nbDims == 1. ASSERT(attrs.count("scales") && "Attribute scales is missing.", ErrorCode::kUNSUPPORTED_NODE); // Get scale factors from OnnxAttrs. auto scales = attrs.get>("scales"); // Scale factors has batch dimension. - ASSERT( (static_cast(scales.size()) == nbDims) && "The shape of the scales input must aligin with the dimensions of the input.", ErrorCode::kUNSUPPORTED_NODE); - for (int i = 0; i < nbDims; i++) + ASSERT((static_cast(scales.size()) == nbDims) + && "The shape of the scales input must aligin with the dimensions of the input.", + ErrorCode::kUNSUPPORTED_NODE); + std::vector scale_factors(nbDims, 1.0F); + for (int32_t i = 0; i < nbDims; i++) { scale_factors[i] = scales[i]; } + if (mode == "linear" || mode == "bilinear") + { + ASSERT(canUseLinearResize(scale_factors.size(), &scale_factors.front()) + && "This version of TensorRT only supports linear resizing on the outermost 3 dimensions", + ErrorCode::kUNSUPPORTED_NODE); + } + layer->setScales(scale_factors.data(), nbDims); } - auto mode = attrs.get("mode", "nearest"); - ASSERT( (mode == "nearest" || mode == "linear") && "The attribute mode can only be nearest or linear.", ErrorCode::kUNSUPPORTED_NODE); - // Set default resize mode. Nearest resize support N-D (where 0 < N <= 8) resize. - nvinfer1::ResizeMode resizeMode = nvinfer1::ResizeMode::kNEAREST; - if (mode == "linear") - { - ASSERT(canUseLinearResize(scale_factors.size(), &scale_factors.front()) - && "This version of TensorRT only supports linear resizing on the outermost 3 dimensions", - ErrorCode::kUNSUPPORTED_NODE); - resizeMode = nvinfer1::ResizeMode::kLINEAR; - } - // Add resize layer - nvinfer1::IResizeLayer* const layer = ctx->network()->addResize(tensor); ctx->registerLayer(layer, getNodeName(node)); - layer->setScales(scale_factors.data(), nbDims); layer->setResizeMode(resizeMode); layer->setSelectorForSinglePixel(nvinfer1::ResizeSelector::kFORMULA); layer->setNearestRounding(nvinfer1::ResizeRoundMode::kFLOOR); @@ -4775,6 +4794,89 @@ std::vector loadFields(string_map>& return fields; } +DEFINE_BUILTIN_OP_IMPORTER(Xor) +{ + return elementwiseHelper(ctx, node, inputs, nvinfer1::ElementWiseOperation::kXOR); +} + +DEFINE_BUILTIN_OP_IMPORTER(Shrink) +{ + // TRT does not support BOOL input types for this node + nvinfer1::ITensor* x = &convertToTensor(inputs.at(0), ctx); + + auto originalType = x->getType(); + ASSERT( + (originalType != nvinfer1::DataType::kBOOL) && "BOOL is unsupported in Shrink", ErrorCode::kUNSUPPORTED_NODE); + ASSERT( + (originalType != nvinfer1::DataType::kINT8) && "INT8 is unsupported in Shrink.", ErrorCode::kUNSUPPORTED_NODE); + x = castHelper(ctx, x, nvinfer1::DataType::kFLOAT); + + // get attrs + OnnxAttrs attrs(node, ctx); + const float lambd = attrs.get("lambd", 0.5F); + const float bias = attrs.get("bias", 0.0F); + + // prepare Constant Tensors + nvinfer1::ITensor* lambdTensor + = addConstant(ctx, std::vector{lambd}, ::ONNX_NAMESPACE::TensorProto::FLOAT, {0, {1}})->getOutput(0); + CHECK(broadcastTensors(ctx, lambdTensor, x)); // align rank + + nvinfer1::ITensor* negLambdTensor + = addConstant(ctx, std::vector{-lambd}, ::ONNX_NAMESPACE::TensorProto::FLOAT, {0, {1}})->getOutput(0); + CHECK(broadcastTensors(ctx, negLambdTensor, x)); + + nvinfer1::ITensor* biasTensor + = addConstant(ctx, std::vector{bias}, ::ONNX_NAMESPACE::TensorProto::FLOAT, {0, {1}})->getOutput(0); + CHECK(broadcastTensors(ctx, biasTensor, x)); + + nvinfer1::ITensor* zeroTensor + = addConstant(ctx, std::vector{0.}, ::ONNX_NAMESPACE::TensorProto::FLOAT, {0, {1}})->getOutput(0); + CHECK(broadcastTensors(ctx, zeroTensor, x)); + + // If x > lambd, y = x - bias; Otherwise, y = 0 + nvinfer1::ITensor* xGreaterThanLambd + = &elementwiseHelper(ctx, node, {x, lambdTensor}, nvinfer1::ElementWiseOperation::kGREATER) + .value() + .at(0) + .tensor(); + nvinfer1::ITensor* xMinusBias + = &elementwiseHelper(ctx, node, {x, biasTensor}, nvinfer1::ElementWiseOperation::kSUB).value().at(0).tensor(); + nvinfer1::ITensor* output = ctx->network()->addSelect(*xGreaterThanLambd, *xMinusBias, *zeroTensor)->getOutput(0); + + // If x < -lambd, y = x + bias; + nvinfer1::ITensor* xLessThanMinusLambd + = &elementwiseHelper(ctx, node, {x, negLambdTensor}, nvinfer1::ElementWiseOperation::kLESS) + .value() + .at(0) + .tensor(); + nvinfer1::ITensor* xAddBias + = &elementwiseHelper(ctx, node, {x, biasTensor}, nvinfer1::ElementWiseOperation::kSUM).value().at(0).tensor(); + + auto* layer = ctx->network()->addSelect(*xLessThanMinusLambd, *xAddBias, *output); + ctx->registerLayer(layer, getNodeName(node)); + + // cast back to originalType + return {{castHelper(ctx, layer->getOutput(0), originalType)}}; +} + + +DEFINE_BUILTIN_OP_IMPORTER(HardSwish) +{ + // TRT does not support BOOL input types for this node + nvinfer1::ITensor* x = &convertToTensor(inputs.at(0), ctx); + ASSERT((x->getType() != nvinfer1::DataType::kBOOL) && "BOOL is unsupported in this version of TensorRT.", + ErrorCode::kUNSUPPORTED_NODE); + + // activationHelper does not support const and constexpr (compile failed) + float kALPHA{1.F / 6}; + float kBETA{0.5F}; + nvinfer1::ITensor* hardSigmoid = + &activationHelper(ctx, node, inputs, nvinfer1::ActivationType::kHARD_SIGMOID, &kALPHA, &kBETA).value().at(0).tensor(); + + return elementwiseHelper(ctx, node, {x, hardSigmoid}, nvinfer1::ElementWiseOperation::kPROD); +} + + // Any ops that are not supported will attempt to import as plugins. DEFINE_BUILTIN_OP_IMPORTER(FallbackPluginImporter) { @@ -4823,7 +4925,7 @@ DEFINE_BUILTIN_OP_IMPORTER(TRT_Scale) nvinfer1::ScaleMode mode = attrs.get("mode"); - // check if there's no weigths at all + // check if there's no weights at all // if no weights, just choose datatype of the input tensor // This is based on the assumption that weights should be // the same datatype as inputs @@ -4872,7 +4974,7 @@ DEFINE_BUILTIN_OP_IMPORTER(TRT_Shuffle) if (inputs.size() == 1) { - if (attrs.count("reshape_dims") > 0) + if (attrs.count("reshape_dims")) { nvinfer1::Dims reshapeDims = attrs.get("reshape_dims"); layer->setReshapeDimensions(reshapeDims); @@ -5199,8 +5301,9 @@ DEFINE_BUILTIN_OP_IMPORTER(TRT_Resize) } else { + // TRT-15340: Adapt to use resizeShapeTensor instead when safety support nbDims == 1. auto scales = attrs.get>("scales"); - ASSERT( (scales.size() > 0) && "Attribute scales is missing." , nvonnxparser::ErrorCode::kINVALID_NODE); + ASSERT((scales.size() > 0) && "Attribute scales is missing.", nvonnxparser::ErrorCode::kINVALID_NODE); layer->setScales(&scales[0], scales.size()); } } diff --git a/docs/Changelog.md b/docs/Changelog.md index 257d9b83..2e7c4b09 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -2,9 +2,23 @@ # ONNX-TensorRT Changelog -## 22.02 Container Release - 2021-02-03 +## TensorRT 8.4 GA Release -2022-6-6 + +### Added + +For more details, see the 8.4 GA release notes for new features added in TensorRT 8.4 + +- Added native FP16 support for importing and manipulating FP16 initializers +- Added support for `Shrink` +- Added support for `Xor` +- Added dynamic shape support for `ArgMax` and `ArgMin` +- Added dynamic shape support for `Range` for floating point types + ### Fixes - - Fixed naming issue in parsing `If` conditonal graphs +- Fixed an issue in tensor name scoping in ONNX models with nested subgraphs +- Fixed misc issues when dealing with empty tensors +- Fixed the operations in the `Celu` importer function +- Removed unnecessary reshapes in the `GEMM` importer function ## TensorRT 8.2 GA Release - 2021-11-23 diff --git a/docs/operators.md b/docs/operators.md index 49984e5d..fa05c281 100644 --- a/docs/operators.md +++ b/docs/operators.md @@ -2,7 +2,7 @@ # Supported ONNX Operators -TensorRT 8.2 supports operators up to Opset 13. Latest information of ONNX operators can be found [here](https://github.com/onnx/onnx/blob/main/docs/Operators.md) +TensorRT 8.4 supports operators up to Opset 17. Latest information of ONNX operators can be found [here](https://github.com/onnx/onnx/blob/master/docs/Operators.md) TensorRT supports the following ONNX data types: DOUBLE, FLOAT32, FLOAT16, INT8, and BOOL @@ -27,7 +27,9 @@ See below for the support matrix of ONNX operators in ONNX-TensorRT. | Atanh | Y | FP32, FP16 | | AveragePool | Y | FP32, FP16, INT8, INT32 | 2D or 3D Pooling only | | BatchNormalization | Y | FP32, FP16 | +| Bernoulli | N | | BitShift | N | +| BlackmanWindow | N | | Cast | Y | FP32, FP16, INT32, INT8, BOOL | | | Ceil | Y | FP32, FP16 | | Celu | Y | FP32, FP16 | @@ -43,6 +45,7 @@ See below for the support matrix of ONNX operators in ONNX-TensorRT. | Cos | Y | FP32, FP16 | | Cosh | Y | FP32, FP16 | | CumSum | Y | FP32, FP16 | `axis` must be an initializer | +| DFT | N | | DepthToSpace | Y | FP32, FP16, INT32 | | DequantizeLinear | Y | INT8 | `x_zero_point` must be zero | | Det | N | @@ -67,7 +70,11 @@ See below for the support matrix of ONNX operators in ONNX-TensorRT. | GlobalMaxPool | Y | FP32, FP16, INT8 | | Greater | Y | FP32, FP16, INT32 | | GreaterOrEqual | Y | FP32, FP16, INT32 | +| GridSample | N | | GRU | Y | FP32, FP16 | For bidirectional GRUs, activation functions must be the same for both the forward and reverse pass +| HammingWindow | N | +| HannWindow | N | +| HardSwish | N | | HardSigmoid | Y | FP32, FP16, INT8 | | Hardmax | N | | Identity | Y | FP32, FP16, INT32, INT8, BOOL | @@ -76,6 +83,7 @@ See below for the support matrix of ONNX operators in ONNX-TensorRT. | InstanceNormalization | Y | FP32, FP16 | Scales `scale` and biases `B` must be initializers. Input rank must be >=3 & <=5 | | IsInf | N | | IsNaN | Y | FP32, FP16, INT32 | +| LayerNormalization | N | | LeakyRelu | Y | FP32, FP16, INT8 | | Less | Y | FP32, FP16, INT32 | | LessOrEqual | Y | FP32, FP16, INT32 | @@ -94,6 +102,7 @@ See below for the support matrix of ONNX operators in ONNX-TensorRT. | MaxUnpool | N | | Mean | Y | FP32, FP16, INT32 | | MeanVarianceNormalization | N | +| MelWeightMatrix | N | | Min | Y | FP32, FP16, INT32 | | Mod | N | | Mul | Y | FP32, FP16, INT32 | @@ -104,6 +113,9 @@ See below for the support matrix of ONNX operators in ONNX-TensorRT. | NonZero | N | | Not | Y | BOOL | | OneHot | N | +| Optional | N | +| OptionalGetElement | N | +| OptionalHasElement | N | | Or | Y | BOOL | | Pad | Y | FP32, FP16, INT8, INT32 | | ParametricSoftplus | Y | FP32, FP16, INT8 | @@ -116,7 +128,7 @@ See below for the support matrix of ONNX operators in ONNX-TensorRT. | RandomNormalLike | N | | RandomUniform | Y | FP32, FP16 | `seed` value is ignored by TensorRT | RandomUniformLike | Y | FP32, FP16 | `seed` value is ignored by TensorRT -| Range | Y | FP32, FP16, INT32 | Floating point inputs are only supported if `start`, `limit`, and `delta` inputs are initializers | +| Range | Y | FP32, FP16, INT32 | | Reciprocal | N | | ReduceL1 | Y | FP32, FP16 | | ReduceL2 | Y | FP32, FP16 | @@ -135,6 +147,7 @@ See below for the support matrix of ONNX operators in ONNX-TensorRT. | RNN | Y | FP32, FP16 | For bidirectional RNNs, activation functions must be the same for both the forward and reverse pass | RoiAlign | N | | Round | Y | FP32, FP16, INT8 | +| STFT | N | | ScaledTanh | Y | FP32, FP16, INT8 | | Scan | Y | FP32, FP16 | | Scatter | Y | FP32, FP16, INT8, INT32 | @@ -147,8 +160,9 @@ See below for the support matrix of ONNX operators in ONNX-TensorRT. | SequenceErase | N | | SequenceInsert | N | | SequenceLength | N | +| SequenceMap | N | | Shape | Y | FP32, FP16, INT32, INT8, BOOL | -| Shrink | N | +| Shrink | Y | FP32, FP16, INT32 | | Sigmoid | Y | FP32, FP16, INT8 | | Sign | Y | FP32, FP16, INT8, INT32 | | Sin | Y | FP32, FP16 | @@ -174,8 +188,9 @@ See below for the support matrix of ONNX operators in ONNX-TensorRT. | Tile | Y | FP32, FP16, INT32, BOOL | | TopK | Y | FP32, FP16 | `K` input must be an initializer | Transpose | Y | FP32, FP16, INT32, INT8, BOOL | +| Trilu | N | | Unique | N | | Unsqueeze | Y | FP32, FP16, INT32, INT8, BOOL | `axes` must be a constant tensor | | Upsample | Y | FP32, FP16 | | Where | Y | FP32, FP16, INT32, BOOL | -| Xor | N | +| Xor | Y | BOOL diff --git a/half.h b/half.h new file mode 100644 index 00000000..8ca4891d --- /dev/null +++ b/half.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// +// Custom wrapper around external half-precision header +// +// Header has some "extra parentheses" warnings when different rounding modes are used. + +#if defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wparentheses" +#endif + + +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wmismatched-tags" +#endif + +#include "ieee_half.h" +typedef half_float::half float16; + +#if defined(__clang__) +#pragma clang diagnostic pop +#endif + +#if defined(__GNUC__) +#pragma GCC diagnostic pop +#endif diff --git a/ieee_half.h b/ieee_half.h new file mode 100644 index 00000000..dd4963c1 --- /dev/null +++ b/ieee_half.h @@ -0,0 +1,3094 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// half - IEEE 754-based half-precision floating point library. +// +// Copyright (c) 2012-2017 Christian Rau +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, +// modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Version 1.12.0 + +/// \file +/// Main header file for half precision functionality. + +#ifndef HALF_HALF_HPP +#define HALF_HALF_HPP + +/// Combined gcc version number. +#define HALF_GNUC_VERSION (__GNUC__*100+__GNUC_MINOR__) + +//check C++11 language features +#if defined(__clang__) //clang + #if __has_feature(cxx_static_assert) && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT) + #define HALF_ENABLE_CPP11_STATIC_ASSERT 1 + #endif + #if __has_feature(cxx_constexpr) && !defined(HALF_ENABLE_CPP11_CONSTEXPR) + #define HALF_ENABLE_CPP11_CONSTEXPR 1 + #endif + #if __has_feature(cxx_noexcept) && !defined(HALF_ENABLE_CPP11_NOEXCEPT) + #define HALF_ENABLE_CPP11_NOEXCEPT 1 + #endif + #if __has_feature(cxx_user_literals) && !defined(HALF_ENABLE_CPP11_USER_LITERALS) + #define HALF_ENABLE_CPP11_USER_LITERALS 1 + #endif + #if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L) && !defined(HALF_ENABLE_CPP11_LONG_LONG) + #define HALF_ENABLE_CPP11_LONG_LONG 1 + #endif +/*#elif defined(__INTEL_COMPILER) //Intel C++ + #if __INTEL_COMPILER >= 1100 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT) ???????? + #define HALF_ENABLE_CPP11_STATIC_ASSERT 1 + #endif + #if __INTEL_COMPILER >= 1300 && !defined(HALF_ENABLE_CPP11_CONSTEXPR) ???????? + #define HALF_ENABLE_CPP11_CONSTEXPR 1 + #endif + #if __INTEL_COMPILER >= 1300 && !defined(HALF_ENABLE_CPP11_NOEXCEPT) ???????? + #define HALF_ENABLE_CPP11_NOEXCEPT 1 + #endif + #if __INTEL_COMPILER >= 1100 && !defined(HALF_ENABLE_CPP11_LONG_LONG) ???????? + #define HALF_ENABLE_CPP11_LONG_LONG 1 + #endif*/ +#elif defined(__GNUC__) //gcc + #if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L + #if HALF_GNUC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT) + #define HALF_ENABLE_CPP11_STATIC_ASSERT 1 + #endif + #if HALF_GNUC_VERSION >= 406 && !defined(HALF_ENABLE_CPP11_CONSTEXPR) + #define HALF_ENABLE_CPP11_CONSTEXPR 1 + #endif + #if HALF_GNUC_VERSION >= 406 && !defined(HALF_ENABLE_CPP11_NOEXCEPT) + #define HALF_ENABLE_CPP11_NOEXCEPT 1 + #endif + #if HALF_GNUC_VERSION >= 407 && !defined(HALF_ENABLE_CPP11_USER_LITERALS) + #define HALF_ENABLE_CPP11_USER_LITERALS 1 + #endif + #if !defined(HALF_ENABLE_CPP11_LONG_LONG) + #define HALF_ENABLE_CPP11_LONG_LONG 1 + #endif + #endif +#elif defined(_MSC_VER) //Visual C++ + #if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_CONSTEXPR) + #define HALF_ENABLE_CPP11_CONSTEXPR 1 + #endif + #if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_NOEXCEPT) + #define HALF_ENABLE_CPP11_NOEXCEPT 1 + #endif + #if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_USER_LITERALS) + #define HALF_ENABLE_CPP11_USER_LITERALS 1 + #endif + #if _MSC_VER >= 1600 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT) + #define HALF_ENABLE_CPP11_STATIC_ASSERT 1 + #endif + #if _MSC_VER >= 1310 && !defined(HALF_ENABLE_CPP11_LONG_LONG) + #define HALF_ENABLE_CPP11_LONG_LONG 1 + #endif + #define HALF_POP_WARNINGS 1 + #pragma warning(push) + #pragma warning(disable : 4099 4127 4146) //struct vs class, constant in if, negative unsigned +#endif + +//check C++11 library features +#include +#if defined(_LIBCPP_VERSION) //libc++ + #if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103 + #ifndef HALF_ENABLE_CPP11_TYPE_TRAITS + #define HALF_ENABLE_CPP11_TYPE_TRAITS 1 + #endif + #ifndef HALF_ENABLE_CPP11_CSTDINT + #define HALF_ENABLE_CPP11_CSTDINT 1 + #endif + #ifndef HALF_ENABLE_CPP11_CMATH + #define HALF_ENABLE_CPP11_CMATH 1 + #endif + #ifndef HALF_ENABLE_CPP11_HASH + #define HALF_ENABLE_CPP11_HASH 1 + #endif + #endif +#elif defined(__GLIBCXX__) //libstdc++ + #if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103 + #ifdef __clang__ + #if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_TYPE_TRAITS) + #define HALF_ENABLE_CPP11_TYPE_TRAITS 1 + #endif + #if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_CSTDINT) + #define HALF_ENABLE_CPP11_CSTDINT 1 + #endif + #if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_CMATH) + #define HALF_ENABLE_CPP11_CMATH 1 + #endif + #if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_HASH) + #define HALF_ENABLE_CPP11_HASH 1 + #endif + #else + #if HALF_GNUC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_CSTDINT) + #define HALF_ENABLE_CPP11_CSTDINT 1 + #endif + #if HALF_GNUC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_CMATH) + #define HALF_ENABLE_CPP11_CMATH 1 + #endif + #if HALF_GNUC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_HASH) + #define HALF_ENABLE_CPP11_HASH 1 + #endif + #endif + #endif +#elif defined(_CPPLIB_VER) //Dinkumware/Visual C++ + #if _CPPLIB_VER >= 520 + #ifndef HALF_ENABLE_CPP11_TYPE_TRAITS + #define HALF_ENABLE_CPP11_TYPE_TRAITS 1 + #endif + #ifndef HALF_ENABLE_CPP11_CSTDINT + #define HALF_ENABLE_CPP11_CSTDINT 1 + #endif + #ifndef HALF_ENABLE_CPP11_HASH + #define HALF_ENABLE_CPP11_HASH 1 + #endif + #endif + #if _CPPLIB_VER >= 610 + #ifndef HALF_ENABLE_CPP11_CMATH + #define HALF_ENABLE_CPP11_CMATH 1 + #endif + #endif +#endif +#undef HALF_GNUC_VERSION + +//support constexpr +#if HALF_ENABLE_CPP11_CONSTEXPR + #define HALF_CONSTEXPR constexpr + #define HALF_CONSTEXPR_CONST constexpr +#else + #define HALF_CONSTEXPR + #define HALF_CONSTEXPR_CONST const +#endif + +//support noexcept +#if HALF_ENABLE_CPP11_NOEXCEPT + #define HALF_NOEXCEPT noexcept + #define HALF_NOTHROW noexcept +#else + #define HALF_NOEXCEPT + #define HALF_NOTHROW throw() +#endif + +#include +#include +#include +#include +#include +#include +#if HALF_ENABLE_CPP11_TYPE_TRAITS + #include +#endif +#if HALF_ENABLE_CPP11_CSTDINT + #include +#endif +#if HALF_ENABLE_CPP11_HASH + #include +#endif + + +/// Default rounding mode. +/// This specifies the rounding mode used for all conversions between [half](\ref half_float::half)s and `float`s as well as +/// for the half_cast() if not specifying a rounding mode explicitly. It can be redefined (before including half.hpp) to one +/// of the standard rounding modes using their respective constants or the equivalent values of `std::float_round_style`: +/// +/// `std::float_round_style` | value | rounding +/// ---------------------------------|-------|------------------------- +/// `std::round_indeterminate` | -1 | fastest (default) +/// `std::round_toward_zero` | 0 | toward zero +/// `std::round_to_nearest` | 1 | to nearest +/// `std::round_toward_infinity` | 2 | toward positive infinity +/// `std::round_toward_neg_infinity` | 3 | toward negative infinity +/// +/// By default this is set to `-1` (`std::round_indeterminate`), which uses truncation (round toward zero, but with overflows +/// set to infinity) and is the fastest rounding mode possible. It can even be set to `std::numeric_limits::round_style` +/// to synchronize the rounding mode with that of the underlying single-precision implementation. +/// For GIE-1275, changing it to 1 (to nearest) +#ifndef HALF_ROUND_STYLE + #define HALF_ROUND_STYLE 1 // = std::round_to_nearest +#endif + +/// Tie-breaking behaviour for round to nearest. +/// This specifies if ties in round to nearest should be resolved by rounding to the nearest even value. By default this is +/// defined to `0` resulting in the faster but slightly more biased behaviour of rounding away from zero in half-way cases (and +/// thus equal to the round() function), but can be redefined to `1` (before including half.hpp) if more IEEE-conformant +/// behaviour is needed. +#ifndef HALF_ROUND_TIES_TO_EVEN + #define HALF_ROUND_TIES_TO_EVEN 0 // ties away from zero +#endif + +/// Value signaling overflow. +/// In correspondence with `HUGE_VAL[F|L]` from `` this symbol expands to a positive value signaling the overflow of an +/// operation, in particular it just evaluates to positive infinity. +#define HUGE_VALH std::numeric_limits::infinity() + +/// Fast half-precision fma function. +/// This symbol is only defined if the fma() function generally executes as fast as, or faster than, a separate +/// half-precision multiplication followed by an addition. Due to the internal single-precision implementation of all +/// arithmetic operations, this is in fact always the case. +#define FP_FAST_FMAH 1 + +#ifndef FP_ILOGB0 + #define FP_ILOGB0 INT_MIN +#endif +#ifndef FP_ILOGBNAN + #define FP_ILOGBNAN INT_MAX +#endif +#ifndef FP_SUBNORMAL + #define FP_SUBNORMAL 0 +#endif +#ifndef FP_ZERO + #define FP_ZERO 1 +#endif +#ifndef FP_NAN + #define FP_NAN 2 +#endif +#ifndef FP_INFINITE + #define FP_INFINITE 3 +#endif +#ifndef FP_NORMAL + #define FP_NORMAL 4 +#endif + + +/// Main namespace for half precision functionality. +/// This namespace contains all the functionality provided by the library. +namespace half_float +{ + class half; + +#if HALF_ENABLE_CPP11_USER_LITERALS + /// Library-defined half-precision literals. + /// Import this namespace to enable half-precision floating point literals: + /// ~~~~{.cpp} + /// using namespace half_float::literal; + /// half_float::half = 4.2_h; + /// ~~~~ + namespace literal + { + half operator "" _h(long double); + } +#endif + + /// \internal + /// \brief Implementation details. + namespace detail + { + #if HALF_ENABLE_CPP11_TYPE_TRAITS + /// Conditional type. + template struct conditional : std::conditional {}; + + /// Helper for tag dispatching. + template struct bool_type : std::integral_constant {}; + using std::true_type; + using std::false_type; + + /// Type traits for floating point types. + template struct is_float : std::is_floating_point {}; + #else + /// Conditional type. + template struct conditional { typedef T type; }; + template struct conditional { typedef F type; }; + + /// Helper for tag dispatching. + template struct bool_type {}; + typedef bool_type true_type; + typedef bool_type false_type; + + /// Type traits for floating point types. + template struct is_float : false_type {}; + template struct is_float : is_float {}; + template struct is_float : is_float {}; + template struct is_float : is_float {}; + template<> struct is_float : true_type {}; + template<> struct is_float : true_type {}; + template<> struct is_float : true_type {}; + #endif + + /// Type traits for floating point bits. + template struct bits { typedef unsigned char type; }; + template struct bits : bits {}; + template struct bits : bits {}; + template struct bits : bits {}; + + #if HALF_ENABLE_CPP11_CSTDINT + /// Unsigned integer of (at least) 16 bits width. + typedef std::uint_least16_t uint16; + + /// Unsigned integer of (at least) 32 bits width. + template<> struct bits { typedef std::uint_least32_t type; }; + + /// Unsigned integer of (at least) 64 bits width. + template<> struct bits { typedef std::uint_least64_t type; }; + #else + /// Unsigned integer of (at least) 16 bits width. + typedef unsigned short uint16; + + /// Unsigned integer of (at least) 32 bits width. + template<> struct bits : conditional::digits>=32,unsigned int,unsigned long> {}; + + #if HALF_ENABLE_CPP11_LONG_LONG + /// Unsigned integer of (at least) 64 bits width. + template<> struct bits : conditional::digits>=64,unsigned long,unsigned long long> {}; + #else + /// Unsigned integer of (at least) 64 bits width. + template<> struct bits { typedef unsigned long type; }; + #endif + #endif + + /// Tag type for binary construction. + struct binary_t {}; + + /// Tag for binary construction. + HALF_CONSTEXPR_CONST binary_t binary = binary_t(); + + /// Temporary half-precision expression. + /// This class represents a half-precision expression which just stores a single-precision value internally. + struct expr + { + /// Conversion constructor. + /// \param f single-precision value to convert + explicit HALF_CONSTEXPR expr(float f) HALF_NOEXCEPT : value_(f) {} + + /// Conversion to single-precision. + /// \return single precision value representing expression value + HALF_CONSTEXPR operator float() const HALF_NOEXCEPT { return value_; } + + private: + /// Internal expression value stored in single-precision. + float value_; + }; + + /// SFINAE helper for generic half-precision functions. + /// This class template has to be specialized for each valid combination of argument types to provide a corresponding + /// `type` member equivalent to \a T. + /// \tparam T type to return + template struct enable {}; + template struct enable { typedef T type; }; + template struct enable { typedef T type; }; + template struct enable { typedef T type; }; + template struct enable { typedef T type; }; + template struct enable { typedef T type; }; + template struct enable { typedef T type; }; + template struct enable { typedef T type; }; + template struct enable { typedef T type; }; + template struct enable { typedef T type; }; + template struct enable { typedef T type; }; + template struct enable { typedef T type; }; + template struct enable { typedef T type; }; + template struct enable { typedef T type; }; + template struct enable { typedef T type; }; + + /// Return type for specialized generic 2-argument half-precision functions. + /// This class template has to be specialized for each valid combination of argument types to provide a corresponding + /// `type` member denoting the appropriate return type. + /// \tparam T first argument type + /// \tparam U first argument type + template struct result : enable {}; + template<> struct result { typedef half type; }; + + /// \name Classification helpers + /// \{ + + /// Check for infinity. + /// \tparam T argument type (builtin floating point type) + /// \param arg value to query + /// \retval true if infinity + /// \retval false else + template bool builtin_isinf(T arg) + { + #if HALF_ENABLE_CPP11_CMATH + return std::isinf(arg); + #elif defined(_MSC_VER) + return !::_finite(static_cast(arg)) && !::_isnan(static_cast(arg)); + #else + return arg == std::numeric_limits::infinity() || arg == -std::numeric_limits::infinity(); + #endif + } + + /// Check for NaN. + /// \tparam T argument type (builtin floating point type) + /// \param arg value to query + /// \retval true if not a number + /// \retval false else + template bool builtin_isnan(T arg) + { + #if HALF_ENABLE_CPP11_CMATH + return std::isnan(arg); + #elif defined(_MSC_VER) + return ::_isnan(static_cast(arg)) != 0; + #else + return arg != arg; + #endif + } + + /// Check sign. + /// \tparam T argument type (builtin floating point type) + /// \param arg value to query + /// \retval true if signbit set + /// \retval false else + template bool builtin_signbit(T arg) + { + #if HALF_ENABLE_CPP11_CMATH + return std::signbit(arg); + #else + return arg < T() || (arg == T() && T(1)/arg < T()); + #endif + } + + /// \} + /// \name Conversion + /// \{ + + /// Convert IEEE single-precision to half-precision. + /// Credit for this goes to [Jeroen van der Zijp](ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf). + /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding + /// \param value single-precision value + /// \return binary representation of half-precision value + template uint16 float2half_impl(float value, true_type) + { + typedef bits::type uint32; + uint32 bits;// = *reinterpret_cast(&value); //violating strict aliasing! + std::memcpy(&bits, &value, sizeof(float)); +/* uint16 hbits = (bits>>16) & 0x8000; + bits &= 0x7FFFFFFF; + int exp = bits >> 23; + if(exp == 255) + return hbits | 0x7C00 | (0x3FF&-static_cast((bits&0x7FFFFF)!=0)); + if(exp > 142) + { + if(R == std::round_toward_infinity) + return hbits | 0x7C00 - (hbits>>15); + if(R == std::round_toward_neg_infinity) + return hbits | 0x7BFF + (hbits>>15); + return hbits | 0x7BFF + (R!=std::round_toward_zero); + } + int g, s; + if(exp > 112) + { + g = (bits>>12) & 1; + s = (bits&0xFFF) != 0; + hbits |= ((exp-112)<<10) | ((bits>>13)&0x3FF); + } + else if(exp > 101) + { + int i = 125 - exp; + bits = (bits&0x7FFFFF) | 0x800000; + g = (bits>>i) & 1; + s = (bits&((1L<> (i+1); + } + else + { + g = 0; + s = bits != 0; + } + if(R == std::round_to_nearest) + #if HALF_ROUND_TIES_TO_EVEN + hbits += g & (s|hbits); + #else + hbits += g; + #endif + else if(R == std::round_toward_infinity) + hbits += ~(hbits>>15) & (s|g); + else if(R == std::round_toward_neg_infinity) + hbits += (hbits>>15) & (g|s); +*/ static const uint16 base_table[512] = { + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, + 0x0200, 0x0400, 0x0800, 0x0C00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000, 0x2400, 0x2800, 0x2C00, 0x3000, 0x3400, 0x3800, 0x3C00, + 0x4000, 0x4400, 0x4800, 0x4C00, 0x5000, 0x5400, 0x5800, 0x5C00, 0x6000, 0x6400, 0x6800, 0x6C00, 0x7000, 0x7400, 0x7800, 0x7C00, + 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, + 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, + 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, + 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, + 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, + 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, + 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8001, 0x8002, 0x8004, 0x8008, 0x8010, 0x8020, 0x8040, 0x8080, 0x8100, + 0x8200, 0x8400, 0x8800, 0x8C00, 0x9000, 0x9400, 0x9800, 0x9C00, 0xA000, 0xA400, 0xA800, 0xAC00, 0xB000, 0xB400, 0xB800, 0xBC00, + 0xC000, 0xC400, 0xC800, 0xCC00, 0xD000, 0xD400, 0xD800, 0xDC00, 0xE000, 0xE400, 0xE800, 0xEC00, 0xF000, 0xF400, 0xF800, 0xFC00, + 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, + 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, + 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, + 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, + 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, + 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, + 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00 }; + static const unsigned char shift_table[512] = { + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 13, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 13 }; + uint16 hbits = base_table[bits>>23] + static_cast((bits&0x7FFFFF)>>shift_table[bits>>23]); + if(R == std::round_to_nearest) + hbits += (((bits&0x7FFFFF)>>(shift_table[bits>>23]-1))|(((bits>>23)&0xFF)==102)) & ((hbits&0x7C00)!=0x7C00) + #if HALF_ROUND_TIES_TO_EVEN + & (((((static_cast(1)<<(shift_table[bits>>23]-1))-1)&bits)!=0)|hbits) + #endif + ; + else if(R == std::round_toward_zero) + hbits -= ((hbits&0x7FFF)==0x7C00) & ~shift_table[bits>>23]; + else if(R == std::round_toward_infinity) + hbits += ((((bits&0x7FFFFF&((static_cast(1)<<(shift_table[bits>>23]))-1))!=0)|(((bits>>23)<=102)& + ((bits>>23)!=0)))&(hbits<0x7C00)) - ((hbits==0xFC00)&((bits>>23)!=511)); + else if(R == std::round_toward_neg_infinity) + hbits += ((((bits&0x7FFFFF&((static_cast(1)<<(shift_table[bits>>23]))-1))!=0)|(((bits>>23)<=358)& + ((bits>>23)!=256)))&(hbits<0xFC00)&(hbits>>15)) - ((hbits==0x7C00)&((bits>>23)!=255)); + return hbits; + } + + /// Convert IEEE double-precision to half-precision. + /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding + /// \param value double-precision value + /// \return binary representation of half-precision value + template uint16 float2half_impl(double value, true_type) + { + typedef bits::type uint32; + typedef bits::type uint64; + uint64 bits;// = *reinterpret_cast(&value); //violating strict aliasing! + std::memcpy(&bits, &value, sizeof(double)); + uint32 hi = bits >> 32, lo = bits & 0xFFFFFFFF; + uint16 hbits = (hi>>16) & 0x8000; + hi &= 0x7FFFFFFF; + int exp = hi >> 20; + if(exp == 2047) + return hbits | 0x7C00 | (0x3FF&-static_cast((bits&0xFFFFFFFFFFFFF)!=0)); + if(exp > 1038) + { + if(R == std::round_toward_infinity) + return hbits | 0x7C00 - (hbits>>15); + if(R == std::round_toward_neg_infinity) + return hbits | 0x7BFF + (hbits>>15); + return hbits | 0x7BFF + (R!=std::round_toward_zero); + } + int g, s = lo != 0; + if(exp > 1008) + { + g = (hi>>9) & 1; + s |= (hi&0x1FF) != 0; + hbits |= ((exp-1008)<<10) | ((hi>>10)&0x3FF); + } + else if(exp > 997) + { + int i = 1018 - exp; + hi = (hi&0xFFFFF) | 0x100000; + g = (hi>>i) & 1; + s |= (hi&((1L<> (i+1); + } + else + { + g = 0; + s |= hi != 0; + } + if(R == std::round_to_nearest) + #if HALF_ROUND_TIES_TO_EVEN + hbits += g & (s|hbits); + #else + hbits += g; + #endif + else if(R == std::round_toward_infinity) + hbits += ~(hbits>>15) & (s|g); + else if(R == std::round_toward_neg_infinity) + hbits += (hbits>>15) & (g|s); + return hbits; + } + + /// Convert non-IEEE floating point to half-precision. + /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding + /// \tparam T source type (builtin floating point type) + /// \param value floating point value + /// \return binary representation of half-precision value + template uint16 float2half_impl(T value, false_type) + { + uint16 hbits = static_cast(builtin_signbit(value)) << 15; + if(value == T()) + return hbits; + if(builtin_isnan(value)) + return hbits | 0x7FFF; + if(builtin_isinf(value)) + return hbits | 0x7C00; + int exp; + std::frexp(value, &exp); + if(exp > 16) + { + if(R == std::round_toward_infinity) + return hbits | (0x7C00 - (hbits>>15)); + if(R == std::round_toward_neg_infinity) + return hbits | (0x7BFF + (hbits>>15)); + return hbits | (0x7BFF + (R!=std::round_toward_zero)); + } + if(exp < -13) + value = std::ldexp(value, 24); + else + { + value = std::ldexp(value, 11-exp); + hbits |= ((exp+13)<<10); + } + T ival, frac = std::modf(value, &ival); + hbits += static_cast(std::abs(static_cast(ival))); + if(R == std::round_to_nearest) + { + frac = std::abs(frac); + #if HALF_ROUND_TIES_TO_EVEN + hbits += (frac>T(0.5)) | ((frac==T(0.5))&hbits); + #else + hbits += frac >= T(0.5); + #endif + } + else if(R == std::round_toward_infinity) + hbits += frac > T(); + else if(R == std::round_toward_neg_infinity) + hbits += frac < T(); + return hbits; + } + + /// Convert floating point to half-precision. + /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding + /// \tparam T source type (builtin floating point type) + /// \param value floating point value + /// \return binary representation of half-precision value + template uint16 float2half(T value) + { + return float2half_impl(value, bool_type::is_iec559&&sizeof(typename bits::type)==sizeof(T)>()); + } + + /// Convert integer to half-precision floating point. + /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding + /// \tparam S `true` if value negative, `false` else + /// \tparam T type to convert (builtin integer type) + /// \param value non-negative integral value + /// \return binary representation of half-precision value + template uint16 int2half_impl(T value) + { + #if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS + static_assert(std::is_integral::value, "int to half conversion only supports builtin integer types"); + #endif + if(S) + value = -value; + uint16 bits = S << 15; + if(value > 0xFFFF) + { + if(R == std::round_toward_infinity) + bits |= 0x7C00 - S; + else if(R == std::round_toward_neg_infinity) + bits |= 0x7BFF + S; + else + bits |= 0x7BFF + (R!=std::round_toward_zero); + } + else if(value) + { + unsigned int m = value, exp = 24; + for(; m<0x400; m<<=1,--exp) ; + for(; m>0x7FF; m>>=1,++exp) ; + bits |= (exp<<10) + m; + if(exp > 24) + { + if(R == std::round_to_nearest) + bits += (value>>(exp-25)) & 1 + #if HALF_ROUND_TIES_TO_EVEN + & (((((1<<(exp-25))-1)&value)!=0)|bits) + #endif + ; + else if(R == std::round_toward_infinity) + bits += ((value&((1<<(exp-24))-1))!=0) & !S; + else if(R == std::round_toward_neg_infinity) + bits += ((value&((1<<(exp-24))-1))!=0) & S; + } + } + return bits; + } + + /// Convert integer to half-precision floating point. + /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding + /// \tparam T type to convert (builtin integer type) + /// \param value integral value + /// \return binary representation of half-precision value + template uint16 int2half(T value) + { + return (value<0) ? int2half_impl(value) : int2half_impl(value); + } + + /// Convert half-precision to IEEE single-precision. + /// Credit for this goes to [Jeroen van der Zijp](ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf). + /// \param value binary representation of half-precision value + /// \return single-precision value + inline float half2float_impl(uint16 value, float, true_type) + { + typedef bits::type uint32; +/* uint32 bits = static_cast(value&0x8000) << 16; + int abs = value & 0x7FFF; + if(abs) + { + bits |= 0x38000000 << static_cast(abs>=0x7C00); + for(; abs<0x400; abs<<=1,bits-=0x800000) ; + bits += static_cast(abs) << 13; + } +*/ static const uint32 mantissa_table[2048] = { + 0x00000000, 0x33800000, 0x34000000, 0x34400000, 0x34800000, 0x34A00000, 0x34C00000, 0x34E00000, 0x35000000, 0x35100000, 0x35200000, 0x35300000, 0x35400000, 0x35500000, 0x35600000, 0x35700000, + 0x35800000, 0x35880000, 0x35900000, 0x35980000, 0x35A00000, 0x35A80000, 0x35B00000, 0x35B80000, 0x35C00000, 0x35C80000, 0x35D00000, 0x35D80000, 0x35E00000, 0x35E80000, 0x35F00000, 0x35F80000, + 0x36000000, 0x36040000, 0x36080000, 0x360C0000, 0x36100000, 0x36140000, 0x36180000, 0x361C0000, 0x36200000, 0x36240000, 0x36280000, 0x362C0000, 0x36300000, 0x36340000, 0x36380000, 0x363C0000, + 0x36400000, 0x36440000, 0x36480000, 0x364C0000, 0x36500000, 0x36540000, 0x36580000, 0x365C0000, 0x36600000, 0x36640000, 0x36680000, 0x366C0000, 0x36700000, 0x36740000, 0x36780000, 0x367C0000, + 0x36800000, 0x36820000, 0x36840000, 0x36860000, 0x36880000, 0x368A0000, 0x368C0000, 0x368E0000, 0x36900000, 0x36920000, 0x36940000, 0x36960000, 0x36980000, 0x369A0000, 0x369C0000, 0x369E0000, + 0x36A00000, 0x36A20000, 0x36A40000, 0x36A60000, 0x36A80000, 0x36AA0000, 0x36AC0000, 0x36AE0000, 0x36B00000, 0x36B20000, 0x36B40000, 0x36B60000, 0x36B80000, 0x36BA0000, 0x36BC0000, 0x36BE0000, + 0x36C00000, 0x36C20000, 0x36C40000, 0x36C60000, 0x36C80000, 0x36CA0000, 0x36CC0000, 0x36CE0000, 0x36D00000, 0x36D20000, 0x36D40000, 0x36D60000, 0x36D80000, 0x36DA0000, 0x36DC0000, 0x36DE0000, + 0x36E00000, 0x36E20000, 0x36E40000, 0x36E60000, 0x36E80000, 0x36EA0000, 0x36EC0000, 0x36EE0000, 0x36F00000, 0x36F20000, 0x36F40000, 0x36F60000, 0x36F80000, 0x36FA0000, 0x36FC0000, 0x36FE0000, + 0x37000000, 0x37010000, 0x37020000, 0x37030000, 0x37040000, 0x37050000, 0x37060000, 0x37070000, 0x37080000, 0x37090000, 0x370A0000, 0x370B0000, 0x370C0000, 0x370D0000, 0x370E0000, 0x370F0000, + 0x37100000, 0x37110000, 0x37120000, 0x37130000, 0x37140000, 0x37150000, 0x37160000, 0x37170000, 0x37180000, 0x37190000, 0x371A0000, 0x371B0000, 0x371C0000, 0x371D0000, 0x371E0000, 0x371F0000, + 0x37200000, 0x37210000, 0x37220000, 0x37230000, 0x37240000, 0x37250000, 0x37260000, 0x37270000, 0x37280000, 0x37290000, 0x372A0000, 0x372B0000, 0x372C0000, 0x372D0000, 0x372E0000, 0x372F0000, + 0x37300000, 0x37310000, 0x37320000, 0x37330000, 0x37340000, 0x37350000, 0x37360000, 0x37370000, 0x37380000, 0x37390000, 0x373A0000, 0x373B0000, 0x373C0000, 0x373D0000, 0x373E0000, 0x373F0000, + 0x37400000, 0x37410000, 0x37420000, 0x37430000, 0x37440000, 0x37450000, 0x37460000, 0x37470000, 0x37480000, 0x37490000, 0x374A0000, 0x374B0000, 0x374C0000, 0x374D0000, 0x374E0000, 0x374F0000, + 0x37500000, 0x37510000, 0x37520000, 0x37530000, 0x37540000, 0x37550000, 0x37560000, 0x37570000, 0x37580000, 0x37590000, 0x375A0000, 0x375B0000, 0x375C0000, 0x375D0000, 0x375E0000, 0x375F0000, + 0x37600000, 0x37610000, 0x37620000, 0x37630000, 0x37640000, 0x37650000, 0x37660000, 0x37670000, 0x37680000, 0x37690000, 0x376A0000, 0x376B0000, 0x376C0000, 0x376D0000, 0x376E0000, 0x376F0000, + 0x37700000, 0x37710000, 0x37720000, 0x37730000, 0x37740000, 0x37750000, 0x37760000, 0x37770000, 0x37780000, 0x37790000, 0x377A0000, 0x377B0000, 0x377C0000, 0x377D0000, 0x377E0000, 0x377F0000, + 0x37800000, 0x37808000, 0x37810000, 0x37818000, 0x37820000, 0x37828000, 0x37830000, 0x37838000, 0x37840000, 0x37848000, 0x37850000, 0x37858000, 0x37860000, 0x37868000, 0x37870000, 0x37878000, + 0x37880000, 0x37888000, 0x37890000, 0x37898000, 0x378A0000, 0x378A8000, 0x378B0000, 0x378B8000, 0x378C0000, 0x378C8000, 0x378D0000, 0x378D8000, 0x378E0000, 0x378E8000, 0x378F0000, 0x378F8000, + 0x37900000, 0x37908000, 0x37910000, 0x37918000, 0x37920000, 0x37928000, 0x37930000, 0x37938000, 0x37940000, 0x37948000, 0x37950000, 0x37958000, 0x37960000, 0x37968000, 0x37970000, 0x37978000, + 0x37980000, 0x37988000, 0x37990000, 0x37998000, 0x379A0000, 0x379A8000, 0x379B0000, 0x379B8000, 0x379C0000, 0x379C8000, 0x379D0000, 0x379D8000, 0x379E0000, 0x379E8000, 0x379F0000, 0x379F8000, + 0x37A00000, 0x37A08000, 0x37A10000, 0x37A18000, 0x37A20000, 0x37A28000, 0x37A30000, 0x37A38000, 0x37A40000, 0x37A48000, 0x37A50000, 0x37A58000, 0x37A60000, 0x37A68000, 0x37A70000, 0x37A78000, + 0x37A80000, 0x37A88000, 0x37A90000, 0x37A98000, 0x37AA0000, 0x37AA8000, 0x37AB0000, 0x37AB8000, 0x37AC0000, 0x37AC8000, 0x37AD0000, 0x37AD8000, 0x37AE0000, 0x37AE8000, 0x37AF0000, 0x37AF8000, + 0x37B00000, 0x37B08000, 0x37B10000, 0x37B18000, 0x37B20000, 0x37B28000, 0x37B30000, 0x37B38000, 0x37B40000, 0x37B48000, 0x37B50000, 0x37B58000, 0x37B60000, 0x37B68000, 0x37B70000, 0x37B78000, + 0x37B80000, 0x37B88000, 0x37B90000, 0x37B98000, 0x37BA0000, 0x37BA8000, 0x37BB0000, 0x37BB8000, 0x37BC0000, 0x37BC8000, 0x37BD0000, 0x37BD8000, 0x37BE0000, 0x37BE8000, 0x37BF0000, 0x37BF8000, + 0x37C00000, 0x37C08000, 0x37C10000, 0x37C18000, 0x37C20000, 0x37C28000, 0x37C30000, 0x37C38000, 0x37C40000, 0x37C48000, 0x37C50000, 0x37C58000, 0x37C60000, 0x37C68000, 0x37C70000, 0x37C78000, + 0x37C80000, 0x37C88000, 0x37C90000, 0x37C98000, 0x37CA0000, 0x37CA8000, 0x37CB0000, 0x37CB8000, 0x37CC0000, 0x37CC8000, 0x37CD0000, 0x37CD8000, 0x37CE0000, 0x37CE8000, 0x37CF0000, 0x37CF8000, + 0x37D00000, 0x37D08000, 0x37D10000, 0x37D18000, 0x37D20000, 0x37D28000, 0x37D30000, 0x37D38000, 0x37D40000, 0x37D48000, 0x37D50000, 0x37D58000, 0x37D60000, 0x37D68000, 0x37D70000, 0x37D78000, + 0x37D80000, 0x37D88000, 0x37D90000, 0x37D98000, 0x37DA0000, 0x37DA8000, 0x37DB0000, 0x37DB8000, 0x37DC0000, 0x37DC8000, 0x37DD0000, 0x37DD8000, 0x37DE0000, 0x37DE8000, 0x37DF0000, 0x37DF8000, + 0x37E00000, 0x37E08000, 0x37E10000, 0x37E18000, 0x37E20000, 0x37E28000, 0x37E30000, 0x37E38000, 0x37E40000, 0x37E48000, 0x37E50000, 0x37E58000, 0x37E60000, 0x37E68000, 0x37E70000, 0x37E78000, + 0x37E80000, 0x37E88000, 0x37E90000, 0x37E98000, 0x37EA0000, 0x37EA8000, 0x37EB0000, 0x37EB8000, 0x37EC0000, 0x37EC8000, 0x37ED0000, 0x37ED8000, 0x37EE0000, 0x37EE8000, 0x37EF0000, 0x37EF8000, + 0x37F00000, 0x37F08000, 0x37F10000, 0x37F18000, 0x37F20000, 0x37F28000, 0x37F30000, 0x37F38000, 0x37F40000, 0x37F48000, 0x37F50000, 0x37F58000, 0x37F60000, 0x37F68000, 0x37F70000, 0x37F78000, + 0x37F80000, 0x37F88000, 0x37F90000, 0x37F98000, 0x37FA0000, 0x37FA8000, 0x37FB0000, 0x37FB8000, 0x37FC0000, 0x37FC8000, 0x37FD0000, 0x37FD8000, 0x37FE0000, 0x37FE8000, 0x37FF0000, 0x37FF8000, + 0x38000000, 0x38004000, 0x38008000, 0x3800C000, 0x38010000, 0x38014000, 0x38018000, 0x3801C000, 0x38020000, 0x38024000, 0x38028000, 0x3802C000, 0x38030000, 0x38034000, 0x38038000, 0x3803C000, + 0x38040000, 0x38044000, 0x38048000, 0x3804C000, 0x38050000, 0x38054000, 0x38058000, 0x3805C000, 0x38060000, 0x38064000, 0x38068000, 0x3806C000, 0x38070000, 0x38074000, 0x38078000, 0x3807C000, + 0x38080000, 0x38084000, 0x38088000, 0x3808C000, 0x38090000, 0x38094000, 0x38098000, 0x3809C000, 0x380A0000, 0x380A4000, 0x380A8000, 0x380AC000, 0x380B0000, 0x380B4000, 0x380B8000, 0x380BC000, + 0x380C0000, 0x380C4000, 0x380C8000, 0x380CC000, 0x380D0000, 0x380D4000, 0x380D8000, 0x380DC000, 0x380E0000, 0x380E4000, 0x380E8000, 0x380EC000, 0x380F0000, 0x380F4000, 0x380F8000, 0x380FC000, + 0x38100000, 0x38104000, 0x38108000, 0x3810C000, 0x38110000, 0x38114000, 0x38118000, 0x3811C000, 0x38120000, 0x38124000, 0x38128000, 0x3812C000, 0x38130000, 0x38134000, 0x38138000, 0x3813C000, + 0x38140000, 0x38144000, 0x38148000, 0x3814C000, 0x38150000, 0x38154000, 0x38158000, 0x3815C000, 0x38160000, 0x38164000, 0x38168000, 0x3816C000, 0x38170000, 0x38174000, 0x38178000, 0x3817C000, + 0x38180000, 0x38184000, 0x38188000, 0x3818C000, 0x38190000, 0x38194000, 0x38198000, 0x3819C000, 0x381A0000, 0x381A4000, 0x381A8000, 0x381AC000, 0x381B0000, 0x381B4000, 0x381B8000, 0x381BC000, + 0x381C0000, 0x381C4000, 0x381C8000, 0x381CC000, 0x381D0000, 0x381D4000, 0x381D8000, 0x381DC000, 0x381E0000, 0x381E4000, 0x381E8000, 0x381EC000, 0x381F0000, 0x381F4000, 0x381F8000, 0x381FC000, + 0x38200000, 0x38204000, 0x38208000, 0x3820C000, 0x38210000, 0x38214000, 0x38218000, 0x3821C000, 0x38220000, 0x38224000, 0x38228000, 0x3822C000, 0x38230000, 0x38234000, 0x38238000, 0x3823C000, + 0x38240000, 0x38244000, 0x38248000, 0x3824C000, 0x38250000, 0x38254000, 0x38258000, 0x3825C000, 0x38260000, 0x38264000, 0x38268000, 0x3826C000, 0x38270000, 0x38274000, 0x38278000, 0x3827C000, + 0x38280000, 0x38284000, 0x38288000, 0x3828C000, 0x38290000, 0x38294000, 0x38298000, 0x3829C000, 0x382A0000, 0x382A4000, 0x382A8000, 0x382AC000, 0x382B0000, 0x382B4000, 0x382B8000, 0x382BC000, + 0x382C0000, 0x382C4000, 0x382C8000, 0x382CC000, 0x382D0000, 0x382D4000, 0x382D8000, 0x382DC000, 0x382E0000, 0x382E4000, 0x382E8000, 0x382EC000, 0x382F0000, 0x382F4000, 0x382F8000, 0x382FC000, + 0x38300000, 0x38304000, 0x38308000, 0x3830C000, 0x38310000, 0x38314000, 0x38318000, 0x3831C000, 0x38320000, 0x38324000, 0x38328000, 0x3832C000, 0x38330000, 0x38334000, 0x38338000, 0x3833C000, + 0x38340000, 0x38344000, 0x38348000, 0x3834C000, 0x38350000, 0x38354000, 0x38358000, 0x3835C000, 0x38360000, 0x38364000, 0x38368000, 0x3836C000, 0x38370000, 0x38374000, 0x38378000, 0x3837C000, + 0x38380000, 0x38384000, 0x38388000, 0x3838C000, 0x38390000, 0x38394000, 0x38398000, 0x3839C000, 0x383A0000, 0x383A4000, 0x383A8000, 0x383AC000, 0x383B0000, 0x383B4000, 0x383B8000, 0x383BC000, + 0x383C0000, 0x383C4000, 0x383C8000, 0x383CC000, 0x383D0000, 0x383D4000, 0x383D8000, 0x383DC000, 0x383E0000, 0x383E4000, 0x383E8000, 0x383EC000, 0x383F0000, 0x383F4000, 0x383F8000, 0x383FC000, + 0x38400000, 0x38404000, 0x38408000, 0x3840C000, 0x38410000, 0x38414000, 0x38418000, 0x3841C000, 0x38420000, 0x38424000, 0x38428000, 0x3842C000, 0x38430000, 0x38434000, 0x38438000, 0x3843C000, + 0x38440000, 0x38444000, 0x38448000, 0x3844C000, 0x38450000, 0x38454000, 0x38458000, 0x3845C000, 0x38460000, 0x38464000, 0x38468000, 0x3846C000, 0x38470000, 0x38474000, 0x38478000, 0x3847C000, + 0x38480000, 0x38484000, 0x38488000, 0x3848C000, 0x38490000, 0x38494000, 0x38498000, 0x3849C000, 0x384A0000, 0x384A4000, 0x384A8000, 0x384AC000, 0x384B0000, 0x384B4000, 0x384B8000, 0x384BC000, + 0x384C0000, 0x384C4000, 0x384C8000, 0x384CC000, 0x384D0000, 0x384D4000, 0x384D8000, 0x384DC000, 0x384E0000, 0x384E4000, 0x384E8000, 0x384EC000, 0x384F0000, 0x384F4000, 0x384F8000, 0x384FC000, + 0x38500000, 0x38504000, 0x38508000, 0x3850C000, 0x38510000, 0x38514000, 0x38518000, 0x3851C000, 0x38520000, 0x38524000, 0x38528000, 0x3852C000, 0x38530000, 0x38534000, 0x38538000, 0x3853C000, + 0x38540000, 0x38544000, 0x38548000, 0x3854C000, 0x38550000, 0x38554000, 0x38558000, 0x3855C000, 0x38560000, 0x38564000, 0x38568000, 0x3856C000, 0x38570000, 0x38574000, 0x38578000, 0x3857C000, + 0x38580000, 0x38584000, 0x38588000, 0x3858C000, 0x38590000, 0x38594000, 0x38598000, 0x3859C000, 0x385A0000, 0x385A4000, 0x385A8000, 0x385AC000, 0x385B0000, 0x385B4000, 0x385B8000, 0x385BC000, + 0x385C0000, 0x385C4000, 0x385C8000, 0x385CC000, 0x385D0000, 0x385D4000, 0x385D8000, 0x385DC000, 0x385E0000, 0x385E4000, 0x385E8000, 0x385EC000, 0x385F0000, 0x385F4000, 0x385F8000, 0x385FC000, + 0x38600000, 0x38604000, 0x38608000, 0x3860C000, 0x38610000, 0x38614000, 0x38618000, 0x3861C000, 0x38620000, 0x38624000, 0x38628000, 0x3862C000, 0x38630000, 0x38634000, 0x38638000, 0x3863C000, + 0x38640000, 0x38644000, 0x38648000, 0x3864C000, 0x38650000, 0x38654000, 0x38658000, 0x3865C000, 0x38660000, 0x38664000, 0x38668000, 0x3866C000, 0x38670000, 0x38674000, 0x38678000, 0x3867C000, + 0x38680000, 0x38684000, 0x38688000, 0x3868C000, 0x38690000, 0x38694000, 0x38698000, 0x3869C000, 0x386A0000, 0x386A4000, 0x386A8000, 0x386AC000, 0x386B0000, 0x386B4000, 0x386B8000, 0x386BC000, + 0x386C0000, 0x386C4000, 0x386C8000, 0x386CC000, 0x386D0000, 0x386D4000, 0x386D8000, 0x386DC000, 0x386E0000, 0x386E4000, 0x386E8000, 0x386EC000, 0x386F0000, 0x386F4000, 0x386F8000, 0x386FC000, + 0x38700000, 0x38704000, 0x38708000, 0x3870C000, 0x38710000, 0x38714000, 0x38718000, 0x3871C000, 0x38720000, 0x38724000, 0x38728000, 0x3872C000, 0x38730000, 0x38734000, 0x38738000, 0x3873C000, + 0x38740000, 0x38744000, 0x38748000, 0x3874C000, 0x38750000, 0x38754000, 0x38758000, 0x3875C000, 0x38760000, 0x38764000, 0x38768000, 0x3876C000, 0x38770000, 0x38774000, 0x38778000, 0x3877C000, + 0x38780000, 0x38784000, 0x38788000, 0x3878C000, 0x38790000, 0x38794000, 0x38798000, 0x3879C000, 0x387A0000, 0x387A4000, 0x387A8000, 0x387AC000, 0x387B0000, 0x387B4000, 0x387B8000, 0x387BC000, + 0x387C0000, 0x387C4000, 0x387C8000, 0x387CC000, 0x387D0000, 0x387D4000, 0x387D8000, 0x387DC000, 0x387E0000, 0x387E4000, 0x387E8000, 0x387EC000, 0x387F0000, 0x387F4000, 0x387F8000, 0x387FC000, + 0x38000000, 0x38002000, 0x38004000, 0x38006000, 0x38008000, 0x3800A000, 0x3800C000, 0x3800E000, 0x38010000, 0x38012000, 0x38014000, 0x38016000, 0x38018000, 0x3801A000, 0x3801C000, 0x3801E000, + 0x38020000, 0x38022000, 0x38024000, 0x38026000, 0x38028000, 0x3802A000, 0x3802C000, 0x3802E000, 0x38030000, 0x38032000, 0x38034000, 0x38036000, 0x38038000, 0x3803A000, 0x3803C000, 0x3803E000, + 0x38040000, 0x38042000, 0x38044000, 0x38046000, 0x38048000, 0x3804A000, 0x3804C000, 0x3804E000, 0x38050000, 0x38052000, 0x38054000, 0x38056000, 0x38058000, 0x3805A000, 0x3805C000, 0x3805E000, + 0x38060000, 0x38062000, 0x38064000, 0x38066000, 0x38068000, 0x3806A000, 0x3806C000, 0x3806E000, 0x38070000, 0x38072000, 0x38074000, 0x38076000, 0x38078000, 0x3807A000, 0x3807C000, 0x3807E000, + 0x38080000, 0x38082000, 0x38084000, 0x38086000, 0x38088000, 0x3808A000, 0x3808C000, 0x3808E000, 0x38090000, 0x38092000, 0x38094000, 0x38096000, 0x38098000, 0x3809A000, 0x3809C000, 0x3809E000, + 0x380A0000, 0x380A2000, 0x380A4000, 0x380A6000, 0x380A8000, 0x380AA000, 0x380AC000, 0x380AE000, 0x380B0000, 0x380B2000, 0x380B4000, 0x380B6000, 0x380B8000, 0x380BA000, 0x380BC000, 0x380BE000, + 0x380C0000, 0x380C2000, 0x380C4000, 0x380C6000, 0x380C8000, 0x380CA000, 0x380CC000, 0x380CE000, 0x380D0000, 0x380D2000, 0x380D4000, 0x380D6000, 0x380D8000, 0x380DA000, 0x380DC000, 0x380DE000, + 0x380E0000, 0x380E2000, 0x380E4000, 0x380E6000, 0x380E8000, 0x380EA000, 0x380EC000, 0x380EE000, 0x380F0000, 0x380F2000, 0x380F4000, 0x380F6000, 0x380F8000, 0x380FA000, 0x380FC000, 0x380FE000, + 0x38100000, 0x38102000, 0x38104000, 0x38106000, 0x38108000, 0x3810A000, 0x3810C000, 0x3810E000, 0x38110000, 0x38112000, 0x38114000, 0x38116000, 0x38118000, 0x3811A000, 0x3811C000, 0x3811E000, + 0x38120000, 0x38122000, 0x38124000, 0x38126000, 0x38128000, 0x3812A000, 0x3812C000, 0x3812E000, 0x38130000, 0x38132000, 0x38134000, 0x38136000, 0x38138000, 0x3813A000, 0x3813C000, 0x3813E000, + 0x38140000, 0x38142000, 0x38144000, 0x38146000, 0x38148000, 0x3814A000, 0x3814C000, 0x3814E000, 0x38150000, 0x38152000, 0x38154000, 0x38156000, 0x38158000, 0x3815A000, 0x3815C000, 0x3815E000, + 0x38160000, 0x38162000, 0x38164000, 0x38166000, 0x38168000, 0x3816A000, 0x3816C000, 0x3816E000, 0x38170000, 0x38172000, 0x38174000, 0x38176000, 0x38178000, 0x3817A000, 0x3817C000, 0x3817E000, + 0x38180000, 0x38182000, 0x38184000, 0x38186000, 0x38188000, 0x3818A000, 0x3818C000, 0x3818E000, 0x38190000, 0x38192000, 0x38194000, 0x38196000, 0x38198000, 0x3819A000, 0x3819C000, 0x3819E000, + 0x381A0000, 0x381A2000, 0x381A4000, 0x381A6000, 0x381A8000, 0x381AA000, 0x381AC000, 0x381AE000, 0x381B0000, 0x381B2000, 0x381B4000, 0x381B6000, 0x381B8000, 0x381BA000, 0x381BC000, 0x381BE000, + 0x381C0000, 0x381C2000, 0x381C4000, 0x381C6000, 0x381C8000, 0x381CA000, 0x381CC000, 0x381CE000, 0x381D0000, 0x381D2000, 0x381D4000, 0x381D6000, 0x381D8000, 0x381DA000, 0x381DC000, 0x381DE000, + 0x381E0000, 0x381E2000, 0x381E4000, 0x381E6000, 0x381E8000, 0x381EA000, 0x381EC000, 0x381EE000, 0x381F0000, 0x381F2000, 0x381F4000, 0x381F6000, 0x381F8000, 0x381FA000, 0x381FC000, 0x381FE000, + 0x38200000, 0x38202000, 0x38204000, 0x38206000, 0x38208000, 0x3820A000, 0x3820C000, 0x3820E000, 0x38210000, 0x38212000, 0x38214000, 0x38216000, 0x38218000, 0x3821A000, 0x3821C000, 0x3821E000, + 0x38220000, 0x38222000, 0x38224000, 0x38226000, 0x38228000, 0x3822A000, 0x3822C000, 0x3822E000, 0x38230000, 0x38232000, 0x38234000, 0x38236000, 0x38238000, 0x3823A000, 0x3823C000, 0x3823E000, + 0x38240000, 0x38242000, 0x38244000, 0x38246000, 0x38248000, 0x3824A000, 0x3824C000, 0x3824E000, 0x38250000, 0x38252000, 0x38254000, 0x38256000, 0x38258000, 0x3825A000, 0x3825C000, 0x3825E000, + 0x38260000, 0x38262000, 0x38264000, 0x38266000, 0x38268000, 0x3826A000, 0x3826C000, 0x3826E000, 0x38270000, 0x38272000, 0x38274000, 0x38276000, 0x38278000, 0x3827A000, 0x3827C000, 0x3827E000, + 0x38280000, 0x38282000, 0x38284000, 0x38286000, 0x38288000, 0x3828A000, 0x3828C000, 0x3828E000, 0x38290000, 0x38292000, 0x38294000, 0x38296000, 0x38298000, 0x3829A000, 0x3829C000, 0x3829E000, + 0x382A0000, 0x382A2000, 0x382A4000, 0x382A6000, 0x382A8000, 0x382AA000, 0x382AC000, 0x382AE000, 0x382B0000, 0x382B2000, 0x382B4000, 0x382B6000, 0x382B8000, 0x382BA000, 0x382BC000, 0x382BE000, + 0x382C0000, 0x382C2000, 0x382C4000, 0x382C6000, 0x382C8000, 0x382CA000, 0x382CC000, 0x382CE000, 0x382D0000, 0x382D2000, 0x382D4000, 0x382D6000, 0x382D8000, 0x382DA000, 0x382DC000, 0x382DE000, + 0x382E0000, 0x382E2000, 0x382E4000, 0x382E6000, 0x382E8000, 0x382EA000, 0x382EC000, 0x382EE000, 0x382F0000, 0x382F2000, 0x382F4000, 0x382F6000, 0x382F8000, 0x382FA000, 0x382FC000, 0x382FE000, + 0x38300000, 0x38302000, 0x38304000, 0x38306000, 0x38308000, 0x3830A000, 0x3830C000, 0x3830E000, 0x38310000, 0x38312000, 0x38314000, 0x38316000, 0x38318000, 0x3831A000, 0x3831C000, 0x3831E000, + 0x38320000, 0x38322000, 0x38324000, 0x38326000, 0x38328000, 0x3832A000, 0x3832C000, 0x3832E000, 0x38330000, 0x38332000, 0x38334000, 0x38336000, 0x38338000, 0x3833A000, 0x3833C000, 0x3833E000, + 0x38340000, 0x38342000, 0x38344000, 0x38346000, 0x38348000, 0x3834A000, 0x3834C000, 0x3834E000, 0x38350000, 0x38352000, 0x38354000, 0x38356000, 0x38358000, 0x3835A000, 0x3835C000, 0x3835E000, + 0x38360000, 0x38362000, 0x38364000, 0x38366000, 0x38368000, 0x3836A000, 0x3836C000, 0x3836E000, 0x38370000, 0x38372000, 0x38374000, 0x38376000, 0x38378000, 0x3837A000, 0x3837C000, 0x3837E000, + 0x38380000, 0x38382000, 0x38384000, 0x38386000, 0x38388000, 0x3838A000, 0x3838C000, 0x3838E000, 0x38390000, 0x38392000, 0x38394000, 0x38396000, 0x38398000, 0x3839A000, 0x3839C000, 0x3839E000, + 0x383A0000, 0x383A2000, 0x383A4000, 0x383A6000, 0x383A8000, 0x383AA000, 0x383AC000, 0x383AE000, 0x383B0000, 0x383B2000, 0x383B4000, 0x383B6000, 0x383B8000, 0x383BA000, 0x383BC000, 0x383BE000, + 0x383C0000, 0x383C2000, 0x383C4000, 0x383C6000, 0x383C8000, 0x383CA000, 0x383CC000, 0x383CE000, 0x383D0000, 0x383D2000, 0x383D4000, 0x383D6000, 0x383D8000, 0x383DA000, 0x383DC000, 0x383DE000, + 0x383E0000, 0x383E2000, 0x383E4000, 0x383E6000, 0x383E8000, 0x383EA000, 0x383EC000, 0x383EE000, 0x383F0000, 0x383F2000, 0x383F4000, 0x383F6000, 0x383F8000, 0x383FA000, 0x383FC000, 0x383FE000, + 0x38400000, 0x38402000, 0x38404000, 0x38406000, 0x38408000, 0x3840A000, 0x3840C000, 0x3840E000, 0x38410000, 0x38412000, 0x38414000, 0x38416000, 0x38418000, 0x3841A000, 0x3841C000, 0x3841E000, + 0x38420000, 0x38422000, 0x38424000, 0x38426000, 0x38428000, 0x3842A000, 0x3842C000, 0x3842E000, 0x38430000, 0x38432000, 0x38434000, 0x38436000, 0x38438000, 0x3843A000, 0x3843C000, 0x3843E000, + 0x38440000, 0x38442000, 0x38444000, 0x38446000, 0x38448000, 0x3844A000, 0x3844C000, 0x3844E000, 0x38450000, 0x38452000, 0x38454000, 0x38456000, 0x38458000, 0x3845A000, 0x3845C000, 0x3845E000, + 0x38460000, 0x38462000, 0x38464000, 0x38466000, 0x38468000, 0x3846A000, 0x3846C000, 0x3846E000, 0x38470000, 0x38472000, 0x38474000, 0x38476000, 0x38478000, 0x3847A000, 0x3847C000, 0x3847E000, + 0x38480000, 0x38482000, 0x38484000, 0x38486000, 0x38488000, 0x3848A000, 0x3848C000, 0x3848E000, 0x38490000, 0x38492000, 0x38494000, 0x38496000, 0x38498000, 0x3849A000, 0x3849C000, 0x3849E000, + 0x384A0000, 0x384A2000, 0x384A4000, 0x384A6000, 0x384A8000, 0x384AA000, 0x384AC000, 0x384AE000, 0x384B0000, 0x384B2000, 0x384B4000, 0x384B6000, 0x384B8000, 0x384BA000, 0x384BC000, 0x384BE000, + 0x384C0000, 0x384C2000, 0x384C4000, 0x384C6000, 0x384C8000, 0x384CA000, 0x384CC000, 0x384CE000, 0x384D0000, 0x384D2000, 0x384D4000, 0x384D6000, 0x384D8000, 0x384DA000, 0x384DC000, 0x384DE000, + 0x384E0000, 0x384E2000, 0x384E4000, 0x384E6000, 0x384E8000, 0x384EA000, 0x384EC000, 0x384EE000, 0x384F0000, 0x384F2000, 0x384F4000, 0x384F6000, 0x384F8000, 0x384FA000, 0x384FC000, 0x384FE000, + 0x38500000, 0x38502000, 0x38504000, 0x38506000, 0x38508000, 0x3850A000, 0x3850C000, 0x3850E000, 0x38510000, 0x38512000, 0x38514000, 0x38516000, 0x38518000, 0x3851A000, 0x3851C000, 0x3851E000, + 0x38520000, 0x38522000, 0x38524000, 0x38526000, 0x38528000, 0x3852A000, 0x3852C000, 0x3852E000, 0x38530000, 0x38532000, 0x38534000, 0x38536000, 0x38538000, 0x3853A000, 0x3853C000, 0x3853E000, + 0x38540000, 0x38542000, 0x38544000, 0x38546000, 0x38548000, 0x3854A000, 0x3854C000, 0x3854E000, 0x38550000, 0x38552000, 0x38554000, 0x38556000, 0x38558000, 0x3855A000, 0x3855C000, 0x3855E000, + 0x38560000, 0x38562000, 0x38564000, 0x38566000, 0x38568000, 0x3856A000, 0x3856C000, 0x3856E000, 0x38570000, 0x38572000, 0x38574000, 0x38576000, 0x38578000, 0x3857A000, 0x3857C000, 0x3857E000, + 0x38580000, 0x38582000, 0x38584000, 0x38586000, 0x38588000, 0x3858A000, 0x3858C000, 0x3858E000, 0x38590000, 0x38592000, 0x38594000, 0x38596000, 0x38598000, 0x3859A000, 0x3859C000, 0x3859E000, + 0x385A0000, 0x385A2000, 0x385A4000, 0x385A6000, 0x385A8000, 0x385AA000, 0x385AC000, 0x385AE000, 0x385B0000, 0x385B2000, 0x385B4000, 0x385B6000, 0x385B8000, 0x385BA000, 0x385BC000, 0x385BE000, + 0x385C0000, 0x385C2000, 0x385C4000, 0x385C6000, 0x385C8000, 0x385CA000, 0x385CC000, 0x385CE000, 0x385D0000, 0x385D2000, 0x385D4000, 0x385D6000, 0x385D8000, 0x385DA000, 0x385DC000, 0x385DE000, + 0x385E0000, 0x385E2000, 0x385E4000, 0x385E6000, 0x385E8000, 0x385EA000, 0x385EC000, 0x385EE000, 0x385F0000, 0x385F2000, 0x385F4000, 0x385F6000, 0x385F8000, 0x385FA000, 0x385FC000, 0x385FE000, + 0x38600000, 0x38602000, 0x38604000, 0x38606000, 0x38608000, 0x3860A000, 0x3860C000, 0x3860E000, 0x38610000, 0x38612000, 0x38614000, 0x38616000, 0x38618000, 0x3861A000, 0x3861C000, 0x3861E000, + 0x38620000, 0x38622000, 0x38624000, 0x38626000, 0x38628000, 0x3862A000, 0x3862C000, 0x3862E000, 0x38630000, 0x38632000, 0x38634000, 0x38636000, 0x38638000, 0x3863A000, 0x3863C000, 0x3863E000, + 0x38640000, 0x38642000, 0x38644000, 0x38646000, 0x38648000, 0x3864A000, 0x3864C000, 0x3864E000, 0x38650000, 0x38652000, 0x38654000, 0x38656000, 0x38658000, 0x3865A000, 0x3865C000, 0x3865E000, + 0x38660000, 0x38662000, 0x38664000, 0x38666000, 0x38668000, 0x3866A000, 0x3866C000, 0x3866E000, 0x38670000, 0x38672000, 0x38674000, 0x38676000, 0x38678000, 0x3867A000, 0x3867C000, 0x3867E000, + 0x38680000, 0x38682000, 0x38684000, 0x38686000, 0x38688000, 0x3868A000, 0x3868C000, 0x3868E000, 0x38690000, 0x38692000, 0x38694000, 0x38696000, 0x38698000, 0x3869A000, 0x3869C000, 0x3869E000, + 0x386A0000, 0x386A2000, 0x386A4000, 0x386A6000, 0x386A8000, 0x386AA000, 0x386AC000, 0x386AE000, 0x386B0000, 0x386B2000, 0x386B4000, 0x386B6000, 0x386B8000, 0x386BA000, 0x386BC000, 0x386BE000, + 0x386C0000, 0x386C2000, 0x386C4000, 0x386C6000, 0x386C8000, 0x386CA000, 0x386CC000, 0x386CE000, 0x386D0000, 0x386D2000, 0x386D4000, 0x386D6000, 0x386D8000, 0x386DA000, 0x386DC000, 0x386DE000, + 0x386E0000, 0x386E2000, 0x386E4000, 0x386E6000, 0x386E8000, 0x386EA000, 0x386EC000, 0x386EE000, 0x386F0000, 0x386F2000, 0x386F4000, 0x386F6000, 0x386F8000, 0x386FA000, 0x386FC000, 0x386FE000, + 0x38700000, 0x38702000, 0x38704000, 0x38706000, 0x38708000, 0x3870A000, 0x3870C000, 0x3870E000, 0x38710000, 0x38712000, 0x38714000, 0x38716000, 0x38718000, 0x3871A000, 0x3871C000, 0x3871E000, + 0x38720000, 0x38722000, 0x38724000, 0x38726000, 0x38728000, 0x3872A000, 0x3872C000, 0x3872E000, 0x38730000, 0x38732000, 0x38734000, 0x38736000, 0x38738000, 0x3873A000, 0x3873C000, 0x3873E000, + 0x38740000, 0x38742000, 0x38744000, 0x38746000, 0x38748000, 0x3874A000, 0x3874C000, 0x3874E000, 0x38750000, 0x38752000, 0x38754000, 0x38756000, 0x38758000, 0x3875A000, 0x3875C000, 0x3875E000, + 0x38760000, 0x38762000, 0x38764000, 0x38766000, 0x38768000, 0x3876A000, 0x3876C000, 0x3876E000, 0x38770000, 0x38772000, 0x38774000, 0x38776000, 0x38778000, 0x3877A000, 0x3877C000, 0x3877E000, + 0x38780000, 0x38782000, 0x38784000, 0x38786000, 0x38788000, 0x3878A000, 0x3878C000, 0x3878E000, 0x38790000, 0x38792000, 0x38794000, 0x38796000, 0x38798000, 0x3879A000, 0x3879C000, 0x3879E000, + 0x387A0000, 0x387A2000, 0x387A4000, 0x387A6000, 0x387A8000, 0x387AA000, 0x387AC000, 0x387AE000, 0x387B0000, 0x387B2000, 0x387B4000, 0x387B6000, 0x387B8000, 0x387BA000, 0x387BC000, 0x387BE000, + 0x387C0000, 0x387C2000, 0x387C4000, 0x387C6000, 0x387C8000, 0x387CA000, 0x387CC000, 0x387CE000, 0x387D0000, 0x387D2000, 0x387D4000, 0x387D6000, 0x387D8000, 0x387DA000, 0x387DC000, 0x387DE000, + 0x387E0000, 0x387E2000, 0x387E4000, 0x387E6000, 0x387E8000, 0x387EA000, 0x387EC000, 0x387EE000, 0x387F0000, 0x387F2000, 0x387F4000, 0x387F6000, 0x387F8000, 0x387FA000, 0x387FC000, 0x387FE000 }; + static const uint32 exponent_table[64] = { + 0x00000000, 0x00800000, 0x01000000, 0x01800000, 0x02000000, 0x02800000, 0x03000000, 0x03800000, 0x04000000, 0x04800000, 0x05000000, 0x05800000, 0x06000000, 0x06800000, 0x07000000, 0x07800000, + 0x08000000, 0x08800000, 0x09000000, 0x09800000, 0x0A000000, 0x0A800000, 0x0B000000, 0x0B800000, 0x0C000000, 0x0C800000, 0x0D000000, 0x0D800000, 0x0E000000, 0x0E800000, 0x0F000000, 0x47800000, + 0x80000000, 0x80800000, 0x81000000, 0x81800000, 0x82000000, 0x82800000, 0x83000000, 0x83800000, 0x84000000, 0x84800000, 0x85000000, 0x85800000, 0x86000000, 0x86800000, 0x87000000, 0x87800000, + 0x88000000, 0x88800000, 0x89000000, 0x89800000, 0x8A000000, 0x8A800000, 0x8B000000, 0x8B800000, 0x8C000000, 0x8C800000, 0x8D000000, 0x8D800000, 0x8E000000, 0x8E800000, 0x8F000000, 0xC7800000 }; + static const unsigned short offset_table[64] = { + 0, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, + 0, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024 }; + uint32 bits = mantissa_table[offset_table[value>>10]+(value&0x3FF)] + exponent_table[value>>10]; +// return *reinterpret_cast(&bits); //violating strict aliasing! + float out; + std::memcpy(&out, &bits, sizeof(float)); + return out; + } + + /// Convert half-precision to IEEE double-precision. + /// \param value binary representation of half-precision value + /// \return double-precision value + inline double half2float_impl(uint16 value, double, true_type) + { + typedef bits::type uint32; + typedef bits::type uint64; + uint32 hi = static_cast(value&0x8000) << 16; + int abs = value & 0x7FFF; + if(abs) + { + hi |= 0x3F000000 << static_cast(abs>=0x7C00); + for(; abs<0x400; abs<<=1,hi-=0x100000) ; + hi += static_cast(abs) << 10; + } + uint64 bits = static_cast(hi) << 32; +// return *reinterpret_cast(&bits); //violating strict aliasing! + double out; + std::memcpy(&out, &bits, sizeof(double)); + return out; + } + + /// Convert half-precision to non-IEEE floating point. + /// \tparam T type to convert to (builtin integer type) + /// \param value binary representation of half-precision value + /// \return floating point value + template T half2float_impl(uint16 value, T, ...) + { + T out; + int abs = value & 0x7FFF; + if(abs > 0x7C00) + out = std::numeric_limits::has_quiet_NaN ? std::numeric_limits::quiet_NaN() : T(); + else if(abs == 0x7C00) + out = std::numeric_limits::has_infinity ? std::numeric_limits::infinity() : std::numeric_limits::max(); + else if(abs > 0x3FF) + out = std::ldexp(static_cast((abs&0x3FF)|0x400), (abs>>10)-25); + else + out = std::ldexp(static_cast(abs), -24); + return (value&0x8000) ? -out : out; + } + + /// Convert half-precision to floating point. + /// \tparam T type to convert to (builtin integer type) + /// \param value binary representation of half-precision value + /// \return floating point value + template T half2float(uint16 value) + { + return half2float_impl(value, T(), bool_type::is_iec559&&sizeof(typename bits::type)==sizeof(T)>()); + } + + /// Convert half-precision floating point to integer. + /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding + /// \tparam E `true` for round to even, `false` for round away from zero + /// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits) + /// \param value binary representation of half-precision value + /// \return integral value + template T half2int_impl(uint16 value) + { + #if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS + static_assert(std::is_integral::value, "half to int conversion only supports builtin integer types"); + #endif + unsigned int e = value & 0x7FFF; + if(e >= 0x7C00) + return (value&0x8000) ? std::numeric_limits::min() : std::numeric_limits::max(); + if(e < 0x3800) + { + if(R == std::round_toward_infinity) + return T(~(value>>15)&(e!=0)); + if(R == std::round_toward_neg_infinity) + return -T(value>0x8000); + return T(); + } + unsigned int m = (value&0x3FF) | 0x400; + e >>= 10; + if(e < 25) + { + if(R == std::round_to_nearest) + m += (1<<(24-e)) - (~(m>>(25-e))&E); + else if(R == std::round_toward_infinity) + m += ((value>>15)-1) & ((1<<(25-e))-1U); + else if(R == std::round_toward_neg_infinity) + m += -(value>>15) & ((1<<(25-e))-1U); + m >>= 25 - e; + } + else + m <<= e - 25; + return (value&0x8000) ? -static_cast(m) : static_cast(m); + } + + /// Convert half-precision floating point to integer. + /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding + /// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits) + /// \param value binary representation of half-precision value + /// \return integral value + template T half2int(uint16 value) { return half2int_impl(value); } + + /// Convert half-precision floating point to integer using round-to-nearest-away-from-zero. + /// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits) + /// \param value binary representation of half-precision value + /// \return integral value + template T half2int_up(uint16 value) { return half2int_impl(value); } + + /// Round half-precision number to nearest integer value. + /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding + /// \tparam E `true` for round to even, `false` for round away from zero + /// \param value binary representation of half-precision value + /// \return half-precision bits for nearest integral value + template uint16 round_half_impl(uint16 value) + { + unsigned int e = value & 0x7FFF; + uint16 result = value; + if(e < 0x3C00) + { + result &= 0x8000; + if(R == std::round_to_nearest) + result |= 0x3C00U & -(e>=(0x3800+E)); + else if(R == std::round_toward_infinity) + result |= 0x3C00U & -(~(value>>15)&(e!=0)); + else if(R == std::round_toward_neg_infinity) + result |= 0x3C00U & -(value>0x8000); + } + else if(e < 0x6400) + { + e = 25 - (e>>10); + unsigned int mask = (1<>e)&E); + else if(R == std::round_toward_infinity) + result += mask & ((value>>15)-1); + else if(R == std::round_toward_neg_infinity) + result += mask & -(value>>15); + result &= ~mask; + } + return result; + } + + /// Round half-precision number to nearest integer value. + /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding + /// \param value binary representation of half-precision value + /// \return half-precision bits for nearest integral value + template uint16 round_half(uint16 value) { return round_half_impl(value); } + + /// Round half-precision number to nearest integer value using round-to-nearest-away-from-zero. + /// \param value binary representation of half-precision value + /// \return half-precision bits for nearest integral value + inline uint16 round_half_up(uint16 value) { return round_half_impl(value); } + /// \} + + struct functions; + template struct unary_specialized; + template struct binary_specialized; + template struct half_caster; + } + + /// Half-precision floating point type. + /// This class implements an IEEE-conformant half-precision floating point type with the usual arithmetic operators and + /// conversions. It is implicitly convertible to single-precision floating point, which makes artihmetic expressions and + /// functions with mixed-type operands to be of the most precise operand type. Additionally all arithmetic operations + /// (and many mathematical functions) are carried out in single-precision internally. All conversions from single- to + /// half-precision are done using the library's default rounding mode, but temporary results inside chained arithmetic + /// expressions are kept in single-precision as long as possible (while of course still maintaining a strong half-precision type). + /// + /// According to the C++98/03 definition, the half type is not a POD type. But according to C++11's less strict and + /// extended definitions it is both a standard layout type and a trivially copyable type (even if not a POD type), which + /// means it can be standard-conformantly copied using raw binary copies. But in this context some more words about the + /// actual size of the type. Although the half is representing an IEEE 16-bit type, it does not neccessarily have to be of + /// exactly 16-bits size. But on any reasonable implementation the actual binary representation of this type will most + /// probably not ivolve any additional "magic" or padding beyond the simple binary representation of the underlying 16-bit + /// IEEE number, even if not strictly guaranteed by the standard. But even then it only has an actual size of 16 bits if + /// your C++ implementation supports an unsigned integer type of exactly 16 bits width. But this should be the case on + /// nearly any reasonable platform. + /// + /// So if your C++ implementation is not totally exotic or imposes special alignment requirements, it is a reasonable + /// assumption that the data of a half is just comprised of the 2 bytes of the underlying IEEE representation. + #if defined(__clang__) + /* this is a WAR, after nvcc's process, + * `friend class std::numeric_limits;` in the following code, becomes + * `friend class numeric_limits;`, namespsace `std` is removed, which results compilation error in clang. + * tested on nvcc V10.0.95, and clang 5.0.300080 in ndk 16b + */ + using std::numeric_limits; + using std::hash; + #endif + class half + { + friend struct detail::functions; + friend struct detail::unary_specialized; + friend struct detail::binary_specialized; + template friend struct detail::half_caster; + friend class std::numeric_limits; + #if HALF_ENABLE_CPP11_HASH + friend struct std::hash; + #endif + #if HALF_ENABLE_CPP11_USER_LITERALS + friend half literal::operator "" _h(long double); + #endif + + public: + /// Default constructor. + /// This initializes the half to 0. Although this does not match the builtin types' default-initialization semantics + /// and may be less efficient than no initialization, it is needed to provide proper value-initialization semantics. + HALF_CONSTEXPR half() HALF_NOEXCEPT : data_() {} + + /// Copy constructor. + /// \tparam T type of concrete half expression + /// \param rhs half expression to copy from + half(detail::expr rhs) : data_(detail::float2half(static_cast(rhs))) {} + + /// Conversion constructor. + /// \param rhs float to convert + explicit half(float rhs) : data_(detail::float2half(rhs)) {} + + /// Conversion to single-precision. + /// \return single precision value representing expression value + operator float() const { return detail::half2float(data_); } + + /// Assignment operator. + /// \tparam T type of concrete half expression + /// \param rhs half expression to copy from + /// \return reference to this half + half& operator=(detail::expr rhs) { return *this = static_cast(rhs); } + + /// Arithmetic assignment. + /// \tparam T type of concrete half expression + /// \param rhs half expression to add + /// \return reference to this half + template typename detail::enable::type operator+=(T rhs) { return *this += static_cast(rhs); } + + /// Arithmetic assignment. + /// \tparam T type of concrete half expression + /// \param rhs half expression to subtract + /// \return reference to this half + template typename detail::enable::type operator-=(T rhs) { return *this -= static_cast(rhs); } + + /// Arithmetic assignment. + /// \tparam T type of concrete half expression + /// \param rhs half expression to multiply with + /// \return reference to this half + template typename detail::enable::type operator*=(T rhs) { return *this *= static_cast(rhs); } + + /// Arithmetic assignment. + /// \tparam T type of concrete half expression + /// \param rhs half expression to divide by + /// \return reference to this half + template typename detail::enable::type operator/=(T rhs) { return *this /= static_cast(rhs); } + + /// Assignment operator. + /// \param rhs single-precision value to copy from + /// \return reference to this half + half& operator=(float rhs) { data_ = detail::float2half(rhs); return *this; } + + /// Arithmetic assignment. + /// \param rhs single-precision value to add + /// \return reference to this half + half& operator+=(float rhs) { data_ = detail::float2half(detail::half2float(data_)+rhs); return *this; } + + /// Arithmetic assignment. + /// \param rhs single-precision value to subtract + /// \return reference to this half + half& operator-=(float rhs) { data_ = detail::float2half(detail::half2float(data_)-rhs); return *this; } + + /// Arithmetic assignment. + /// \param rhs single-precision value to multiply with + /// \return reference to this half + half& operator*=(float rhs) { data_ = detail::float2half(detail::half2float(data_)*rhs); return *this; } + + /// Arithmetic assignment. + /// \param rhs single-precision value to divide by + /// \return reference to this half + half& operator/=(float rhs) { data_ = detail::float2half(detail::half2float(data_)/rhs); return *this; } + + /// Prefix increment. + /// \return incremented half value + half& operator++() { return *this += 1.0f; } + + /// Prefix decrement. + /// \return decremented half value + half& operator--() { return *this -= 1.0f; } + + /// Postfix increment. + /// \return non-incremented half value + half operator++(int) { half out(*this); ++*this; return out; } + + /// Postfix decrement. + /// \return non-decremented half value + half operator--(int) { half out(*this); --*this; return out; } + + private: + /// Rounding mode to use + static const std::float_round_style round_style = (std::float_round_style)(HALF_ROUND_STYLE); + + /// Constructor. + /// \param bits binary representation to set half to + HALF_CONSTEXPR half(detail::binary_t, detail::uint16 bits) HALF_NOEXCEPT : data_(bits) {} + + /// Internal binary representation + detail::uint16 data_; + }; + +#if HALF_ENABLE_CPP11_USER_LITERALS + namespace literal + { + /// Half literal. + /// While this returns an actual half-precision value, half literals can unfortunately not be constant expressions due + /// to rather involved conversions. + /// \param value literal value + /// \return half with given value (if representable) + inline half operator "" _h(long double value) { return half(detail::binary, detail::float2half(value)); } + } +#endif + + namespace detail + { + /// Wrapper implementing unspecialized half-precision functions. + struct functions + { + /// Addition implementation. + /// \param x first operand + /// \param y second operand + /// \return Half-precision sum stored in single-precision + static expr plus(float x, float y) { return expr(x+y); } + + /// Subtraction implementation. + /// \param x first operand + /// \param y second operand + /// \return Half-precision difference stored in single-precision + static expr minus(float x, float y) { return expr(x-y); } + + /// Multiplication implementation. + /// \param x first operand + /// \param y second operand + /// \return Half-precision product stored in single-precision + static expr multiplies(float x, float y) { return expr(x*y); } + + /// Division implementation. + /// \param x first operand + /// \param y second operand + /// \return Half-precision quotient stored in single-precision + static expr divides(float x, float y) { return expr(x/y); } + + /// Output implementation. + /// \param out stream to write to + /// \param arg value to write + /// \return reference to stream + template static std::basic_ostream& write(std::basic_ostream &out, float arg) { return out << arg; } + + /// Input implementation. + /// \param in stream to read from + /// \param arg half to read into + /// \return reference to stream + template static std::basic_istream& read(std::basic_istream &in, half &arg) + { + float f; + if(in >> f) + arg = f; + return in; + } + + /// Modulo implementation. + /// \param x first operand + /// \param y second operand + /// \return Half-precision division remainder stored in single-precision + static expr fmod(float x, float y) { return expr(std::fmod(x, y)); } + + /// Remainder implementation. + /// \param x first operand + /// \param y second operand + /// \return Half-precision division remainder stored in single-precision + static expr remainder(float x, float y) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::remainder(x, y)); + #else + if(builtin_isnan(x) || builtin_isnan(y)) + return expr(std::numeric_limits::quiet_NaN()); + float ax = std::fabs(x), ay = std::fabs(y); + if(ax >= 65536.0f || ay < std::ldexp(1.0f, -24)) + return expr(std::numeric_limits::quiet_NaN()); + if(ay >= 65536.0f) + return expr(x); + if(ax == ay) + return expr(builtin_signbit(x) ? -0.0f : 0.0f); + ax = std::fmod(ax, ay+ay); + float y2 = 0.5f * ay; + if(ax > y2) + { + ax -= ay; + if(ax >= y2) + ax -= ay; + } + return expr(builtin_signbit(x) ? -ax : ax); + #endif + } + + /// Remainder implementation. + /// \param x first operand + /// \param y second operand + /// \param quo address to store quotient bits at + /// \return Half-precision division remainder stored in single-precision + static expr remquo(float x, float y, int *quo) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::remquo(x, y, quo)); + #else + if(builtin_isnan(x) || builtin_isnan(y)) + return expr(std::numeric_limits::quiet_NaN()); + bool sign = builtin_signbit(x), qsign = static_cast(sign^builtin_signbit(y)); + float ax = std::fabs(x), ay = std::fabs(y); + if(ax >= 65536.0f || ay < std::ldexp(1.0f, -24)) + return expr(std::numeric_limits::quiet_NaN()); + if(ay >= 65536.0f) + return expr(x); + if(ax == ay) + return *quo = qsign ? -1 : 1, expr(sign ? -0.0f : 0.0f); + ax = std::fmod(ax, 8.0f*ay); + int cquo = 0; + if(ax >= 4.0f * ay) + { + ax -= 4.0f * ay; + cquo += 4; + } + if(ax >= 2.0f * ay) + { + ax -= 2.0f * ay; + cquo += 2; + } + float y2 = 0.5f * ay; + if(ax > y2) + { + ax -= ay; + ++cquo; + if(ax >= y2) + { + ax -= ay; + ++cquo; + } + } + return *quo = qsign ? -cquo : cquo, expr(sign ? -ax : ax); + #endif + } + + /// Positive difference implementation. + /// \param x first operand + /// \param y second operand + /// \return Positive difference stored in single-precision + static expr fdim(float x, float y) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::fdim(x, y)); + #else + return expr((x<=y) ? 0.0f : (x-y)); + #endif + } + + /// Fused multiply-add implementation. + /// \param x first operand + /// \param y second operand + /// \param z third operand + /// \return \a x * \a y + \a z stored in single-precision + static expr fma(float x, float y, float z) + { + #if HALF_ENABLE_CPP11_CMATH && defined(FP_FAST_FMAF) + return expr(std::fma(x, y, z)); + #else + return expr(x*y+z); + #endif + } + + /// Get NaN. + /// \return Half-precision quiet NaN + static half nanh() { return half(binary, 0x7FFF); } + + /// Exponential implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr exp(float arg) { return expr(std::exp(arg)); } + + /// Exponential implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr expm1(float arg) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::expm1(arg)); + #else + return expr(static_cast(std::exp(static_cast(arg))-1.0)); + #endif + } + + /// Binary exponential implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr exp2(float arg) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::exp2(arg)); + #else + return expr(static_cast(std::exp(arg*0.69314718055994530941723212145818))); + #endif + } + + /// Logarithm implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr log(float arg) { return expr(std::log(arg)); } + + /// Common logarithm implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr log10(float arg) { return expr(std::log10(arg)); } + + /// Logarithm implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr log1p(float arg) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::log1p(arg)); + #else + return expr(static_cast(std::log(1.0+arg))); + #endif + } + + /// Binary logarithm implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr log2(float arg) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::log2(arg)); + #else + return expr(static_cast(std::log(static_cast(arg))*1.4426950408889634073599246810019)); + #endif + } + + /// Square root implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr sqrt(float arg) { return expr(std::sqrt(arg)); } + + /// Cubic root implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr cbrt(float arg) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::cbrt(arg)); + #else + if(builtin_isnan(arg) || builtin_isinf(arg)) + return expr(arg); + return expr(builtin_signbit(arg) ? -static_cast(std::pow(-static_cast(arg), 1.0/3.0)) : + static_cast(std::pow(static_cast(arg), 1.0/3.0))); + #endif + } + + /// Hypotenuse implementation. + /// \param x first argument + /// \param y second argument + /// \return function value stored in single-preicision + static expr hypot(float x, float y) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::hypot(x, y)); + #else + return expr((builtin_isinf(x) || builtin_isinf(y)) ? std::numeric_limits::infinity() : + static_cast(std::sqrt(static_cast(x)*x+static_cast(y)*y))); + #endif + } + + /// Power implementation. + /// \param base value to exponentiate + /// \param exp power to expontiate to + /// \return function value stored in single-preicision + static expr pow(float base, float exp) { return expr(std::pow(base, exp)); } + + /// Sine implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr sin(float arg) { return expr(std::sin(arg)); } + + /// Cosine implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr cos(float arg) { return expr(std::cos(arg)); } + + /// Tan implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr tan(float arg) { return expr(std::tan(arg)); } + + /// Arc sine implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr asin(float arg) { return expr(std::asin(arg)); } + + /// Arc cosine implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr acos(float arg) { return expr(std::acos(arg)); } + + /// Arc tangent implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr atan(float arg) { return expr(std::atan(arg)); } + + /// Arc tangent implementation. + /// \param x first argument + /// \param y second argument + /// \return function value stored in single-preicision + static expr atan2(float x, float y) { return expr(std::atan2(x, y)); } + + /// Hyperbolic sine implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr sinh(float arg) { return expr(std::sinh(arg)); } + + /// Hyperbolic cosine implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr cosh(float arg) { return expr(std::cosh(arg)); } + + /// Hyperbolic tangent implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr tanh(float arg) { return expr(std::tanh(arg)); } + + /// Hyperbolic area sine implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr asinh(float arg) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::asinh(arg)); + #else + return expr((arg==-std::numeric_limits::infinity()) ? arg : static_cast(std::log(arg+std::sqrt(arg*arg+1.0)))); + #endif + } + + /// Hyperbolic area cosine implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr acosh(float arg) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::acosh(arg)); + #else + return expr((arg<-1.0f) ? std::numeric_limits::quiet_NaN() : static_cast(std::log(arg+std::sqrt(arg*arg-1.0)))); + #endif + } + + /// Hyperbolic area tangent implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr atanh(float arg) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::atanh(arg)); + #else + return expr(static_cast(0.5*std::log((1.0+arg)/(1.0-arg)))); + #endif + } + + /// Error function implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr erf(float arg) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::erf(arg)); + #else + return expr(static_cast(erf(static_cast(arg)))); + #endif + } + + /// Complementary implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr erfc(float arg) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::erfc(arg)); + #else + return expr(static_cast(1.0-erf(static_cast(arg)))); + #endif + } + + /// Gamma logarithm implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr lgamma(float arg) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::lgamma(arg)); + #else + if(builtin_isinf(arg)) + return expr(std::numeric_limits::infinity()); + if(arg < 0.0f) + { + float i, f = std::modf(-arg, &i); + if(f == 0.0f) + return expr(std::numeric_limits::infinity()); + return expr(static_cast(1.1447298858494001741434273513531- + std::log(std::abs(std::sin(3.1415926535897932384626433832795*f)))-lgamma(1.0-arg))); + } + return expr(static_cast(lgamma(static_cast(arg)))); + #endif + } + + /// Gamma implementation. + /// \param arg function argument + /// \return function value stored in single-preicision + static expr tgamma(float arg) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::tgamma(arg)); + #else + if(arg == 0.0f) + return builtin_signbit(arg) ? expr(-std::numeric_limits::infinity()) : expr(std::numeric_limits::infinity()); + if(arg < 0.0f) + { + float i, f = std::modf(-arg, &i); + if(f == 0.0f) + return expr(std::numeric_limits::quiet_NaN()); + double value = 3.1415926535897932384626433832795 / (std::sin(3.1415926535897932384626433832795*f)*std::exp(lgamma(1.0-arg))); + return expr(static_cast((std::fmod(i, 2.0f)==0.0f) ? -value : value)); + } + if(builtin_isinf(arg)) + return expr(arg); + return expr(static_cast(std::exp(lgamma(static_cast(arg))))); + #endif + } + + /// Floor implementation. + /// \param arg value to round + /// \return rounded value + static half floor(half arg) { return half(binary, round_half(arg.data_)); } + + /// Ceiling implementation. + /// \param arg value to round + /// \return rounded value + static half ceil(half arg) { return half(binary, round_half(arg.data_)); } + + /// Truncation implementation. + /// \param arg value to round + /// \return rounded value + static half trunc(half arg) { return half(binary, round_half(arg.data_)); } + + /// Nearest integer implementation. + /// \param arg value to round + /// \return rounded value + static half round(half arg) { return half(binary, round_half_up(arg.data_)); } + + /// Nearest integer implementation. + /// \param arg value to round + /// \return rounded value + static long lround(half arg) { return detail::half2int_up(arg.data_); } + + /// Nearest integer implementation. + /// \param arg value to round + /// \return rounded value + static half rint(half arg) { return half(binary, round_half(arg.data_)); } + + /// Nearest integer implementation. + /// \param arg value to round + /// \return rounded value + static long lrint(half arg) { return detail::half2int(arg.data_); } + + #if HALF_ENABLE_CPP11_LONG_LONG + /// Nearest integer implementation. + /// \param arg value to round + /// \return rounded value + static long long llround(half arg) { return detail::half2int_up(arg.data_); } + + /// Nearest integer implementation. + /// \param arg value to round + /// \return rounded value + static long long llrint(half arg) { return detail::half2int(arg.data_); } + #endif + + /// Decompression implementation. + /// \param arg number to decompress + /// \param exp address to store exponent at + /// \return normalized significant + static half frexp(half arg, int *exp) + { + int m = arg.data_ & 0x7FFF, e = -14; + if(m >= 0x7C00 || !m) + return *exp = 0, arg; + for(; m<0x400; m<<=1,--e) ; + return *exp = e+(m>>10), half(binary, (arg.data_&0x8000)|0x3800|(m&0x3FF)); + } + + /// Decompression implementation. + /// \param arg number to decompress + /// \param iptr address to store integer part at + /// \return fractional part + static half modf(half arg, half *iptr) + { + unsigned int e = arg.data_ & 0x7FFF; + if(e >= 0x6400) + return *iptr = arg, half(binary, arg.data_&(0x8000U|-(e>0x7C00))); + if(e < 0x3C00) + return iptr->data_ = arg.data_ & 0x8000, arg; + e >>= 10; + unsigned int mask = (1<<(25-e)) - 1, m = arg.data_ & mask; + iptr->data_ = arg.data_ & ~mask; + if(!m) + return half(binary, arg.data_&0x8000); + for(; m<0x400; m<<=1,--e) ; + return half(binary, static_cast((arg.data_&0x8000)|(e<<10)|(m&0x3FF))); + } + + /// Scaling implementation. + /// \param arg number to scale + /// \param exp power of two to scale by + /// \return scaled number + static half scalbln(half arg, long exp) + { + unsigned int m = arg.data_ & 0x7FFF; + if(m >= 0x7C00 || !m) + return arg; + for(; m<0x400; m<<=1,--exp) ; + exp += m >> 10; + uint16 value = arg.data_ & 0x8000; + if(exp > 30) + { + if(half::round_style == std::round_toward_zero) + value |= 0x7BFF; + else if(half::round_style == std::round_toward_infinity) + value |= 0x7C00 - (value>>15); + else if(half::round_style == std::round_toward_neg_infinity) + value |= 0x7BFF + (value>>15); + else + value |= 0x7C00; + } + else if(exp > 0) + value |= (exp<<10) | (m&0x3FF); + else if(exp > -11) + { + m = (m&0x3FF) | 0x400; + if(half::round_style == std::round_to_nearest) + { + m += 1 << -exp; + #if HALF_ROUND_TIES_TO_EVEN + m -= (m>>(1-exp)) & 1; + #endif + } + else if(half::round_style == std::round_toward_infinity) + m += ((value>>15)-1) & ((1<<(1-exp))-1U); + else if(half::round_style == std::round_toward_neg_infinity) + m += -(value>>15) & ((1<<(1-exp))-1U); + value |= m >> (1-exp); + } + else if(half::round_style == std::round_toward_infinity) + value -= (value>>15) - 1; + else if(half::round_style == std::round_toward_neg_infinity) + value += value >> 15; + return half(binary, value); + } + + /// Exponent implementation. + /// \param arg number to query + /// \return floating point exponent + static int ilogb(half arg) + { + int abs = arg.data_ & 0x7FFF; + if(!abs) + return FP_ILOGB0; + if(abs < 0x7C00) + { + int exp = (abs>>10) - 15; + if(abs < 0x400) + for(; abs<0x200; abs<<=1,--exp) ; + return exp; + } + if(abs > 0x7C00) + return FP_ILOGBNAN; + return INT_MAX; + } + + /// Exponent implementation. + /// \param arg number to query + /// \return floating point exponent + static half logb(half arg) + { + int abs = arg.data_ & 0x7FFF; + if(!abs) + return half(binary, 0xFC00); + if(abs < 0x7C00) + { + int exp = (abs>>10) - 15; + if(abs < 0x400) + for(; abs<0x200; abs<<=1,--exp) ; + uint16 bits = (exp<0) << 15; + if(exp) + { + unsigned int m = std::abs(exp) << 6, e = 18; + for(; m<0x400; m<<=1,--e) ; + bits |= (e<<10) + m; + } + return half(binary, bits); + } + if(abs > 0x7C00) + return arg; + return half(binary, 0x7C00); + } + + /// Enumeration implementation. + /// \param from number to increase/decrease + /// \param to direction to enumerate into + /// \return next representable number + static half nextafter(half from, half to) + { + uint16 fabs = from.data_ & 0x7FFF, tabs = to.data_ & 0x7FFF; + if(fabs > 0x7C00) + return from; + if(tabs > 0x7C00 || from.data_ == to.data_ || !(fabs|tabs)) + return to; + if(!fabs) + return half(binary, (to.data_&0x8000)+1); + bool lt = ((fabs==from.data_) ? static_cast(fabs) : -static_cast(fabs)) < + ((tabs==to.data_) ? static_cast(tabs) : -static_cast(tabs)); + return half(binary, from.data_+(((from.data_>>15)^static_cast(lt))<<1)-1); + } + + /// Enumeration implementation. + /// \param from number to increase/decrease + /// \param to direction to enumerate into + /// \return next representable number + static half nexttoward(half from, long double to) + { + if(isnan(from)) + return from; + auto lfrom = static_cast(from); + if(builtin_isnan(to) || lfrom == to) + return half(static_cast(to)); + if(!(from.data_&0x7FFF)) + return half(binary, (static_cast(builtin_signbit(to))<<15)+1); + return half(binary, from.data_+(((from.data_>>15)^static_cast(lfrom0x3FF) ? ((abs>=0x7C00) ? ((abs>0x7C00) ? FP_NAN : FP_INFINITE) : FP_NORMAL) :FP_SUBNORMAL) : FP_ZERO; + } + + /// Classification implementation. + /// \param arg value to classify + /// \retval true if finite number + /// \retval false else + static bool isfinite(half arg) { return (arg.data_&0x7C00) != 0x7C00; } + + /// Classification implementation. + /// \param arg value to classify + /// \retval true if infinite number + /// \retval false else + static bool isinf(half arg) { return (arg.data_&0x7FFF) == 0x7C00; } + + /// Classification implementation. + /// \param arg value to classify + /// \retval true if not a number + /// \retval false else + static bool isnan(half arg) { return (arg.data_&0x7FFF) > 0x7C00; } + + /// Classification implementation. + /// \param arg value to classify + /// \retval true if normal number + /// \retval false else + static bool isnormal(half arg) { return ((arg.data_&0x7C00)!=0) & ((arg.data_&0x7C00)!=0x7C00); } + + /// Sign bit implementation. + /// \param arg value to check + /// \retval true if signed + /// \retval false if unsigned + static bool signbit(half arg) { return (arg.data_&0x8000) != 0; } + + /// Comparison implementation. + /// \param x first operand + /// \param y second operand + /// \retval true if operands equal + /// \retval false else + static bool isequal(half x, half y) { return (x.data_==y.data_ || !((x.data_|y.data_)&0x7FFF)) && !isnan(x); } + + /// Comparison implementation. + /// \param x first operand + /// \param y second operand + /// \retval true if operands not equal + /// \retval false else + static bool isnotequal(half x, half y) { return (x.data_!=y.data_ && ((x.data_|y.data_)&0x7FFF)) || isnan(x); } + + /// Comparison implementation. + /// \param x first operand + /// \param y second operand + /// \retval true if \a x > \a y + /// \retval false else + static bool isgreater(half x, half y) + { + int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; + return xabs<=0x7C00 && yabs<=0x7C00 && (((xabs==x.data_) ? xabs : -xabs) > ((yabs==y.data_) ? yabs : -yabs)); + } + + /// Comparison implementation. + /// \param x first operand + /// \param y second operand + /// \retval true if \a x >= \a y + /// \retval false else + static bool isgreaterequal(half x, half y) + { + int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; + return xabs<=0x7C00 && yabs<=0x7C00 && (((xabs==x.data_) ? xabs : -xabs) >= ((yabs==y.data_) ? yabs : -yabs)); + } + + /// Comparison implementation. + /// \param x first operand + /// \param y second operand + /// \retval true if \a x < \a y + /// \retval false else + static bool isless(half x, half y) + { + int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; + return xabs<=0x7C00 && yabs<=0x7C00 && (((xabs==x.data_) ? xabs : -xabs) < ((yabs==y.data_) ? yabs : -yabs)); + } + + /// Comparison implementation. + /// \param x first operand + /// \param y second operand + /// \retval true if \a x <= \a y + /// \retval false else + static bool islessequal(half x, half y) + { + int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; + return xabs<=0x7C00 && yabs<=0x7C00 && (((xabs==x.data_) ? xabs : -xabs) <= ((yabs==y.data_) ? yabs : -yabs)); + } + + /// Comparison implementation. + /// \param x first operand + /// \param y second operand + /// \retval true if either \a x > \a y nor \a x < \a y + /// \retval false else + static bool islessgreater(half x, half y) + { + int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; + if(xabs > 0x7C00 || yabs > 0x7C00) + return false; + int a = (xabs==x.data_) ? xabs : -xabs, b = (yabs==y.data_) ? yabs : -yabs; + return a < b || a > b; + } + + /// Comparison implementation. + /// \param x first operand + /// \param y second operand + /// \retval true if operand unordered + /// \retval false else + static bool isunordered(half x, half y) { return isnan(x) || isnan(y); } + + private: + static double erf(double arg) + { + if(builtin_isinf(arg)) + return (arg<0.0) ? -1.0 : 1.0; + double x2 = arg * arg, ax2 = 0.147 * x2, value = std::sqrt(1.0-std::exp(-x2*(1.2732395447351626861510701069801+ax2)/(1.0+ax2))); + return builtin_signbit(arg) ? -value : value; + } + + static double lgamma(double arg) + { + double v = 1.0; + for(; arg<8.0; ++arg) v *= arg; + double w = 1.0 / (arg*arg); + return (((((((-0.02955065359477124183006535947712*w+0.00641025641025641025641025641026)*w+ + -0.00191752691752691752691752691753)*w+8.4175084175084175084175084175084e-4)*w+ + -5.952380952380952380952380952381e-4)*w+7.9365079365079365079365079365079e-4)*w+ + -0.00277777777777777777777777777778)*w+0.08333333333333333333333333333333)/arg + + 0.91893853320467274178032973640562 - std::log(v) - arg + (arg-0.5) * std::log(arg); + } + }; + + /// Wrapper for unary half-precision functions needing specialization for individual argument types. + /// \tparam T argument type + template struct unary_specialized + { + /// Negation implementation. + /// \param arg value to negate + /// \return negated value + static HALF_CONSTEXPR half negate(half arg) { return half(binary, arg.data_^0x8000); } + + /// Absolute value implementation. + /// \param arg function argument + /// \return absolute value + static half fabs(half arg) { return half(binary, arg.data_&0x7FFF); } + }; + template<> struct unary_specialized + { + static HALF_CONSTEXPR expr negate(float arg) { return expr(-arg); } + static expr fabs(float arg) { return expr(std::fabs(arg)); } + }; + + /// Wrapper for binary half-precision functions needing specialization for individual argument types. + /// \tparam T first argument type + /// \tparam U first argument type + template struct binary_specialized + { + /// Minimum implementation. + /// \param x first operand + /// \param y second operand + /// \return minimum value + static expr fmin(float x, float y) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::fmin(x, y)); + #else + if(builtin_isnan(x)) + return expr(y); + if(builtin_isnan(y)) + return expr(x); + return expr(std::min(x, y)); + #endif + } + + /// Maximum implementation. + /// \param x first operand + /// \param y second operand + /// \return maximum value + static expr fmax(float x, float y) + { + #if HALF_ENABLE_CPP11_CMATH + return expr(std::fmax(x, y)); + #else + if(builtin_isnan(x)) + return expr(y); + if(builtin_isnan(y)) + return expr(x); + return expr(std::max(x, y)); + #endif + } + }; + template<> struct binary_specialized + { + static half fmin(half x, half y) + { + int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; + if(xabs > 0x7C00) + return y; + if(yabs > 0x7C00) + return x; + return (((xabs==x.data_) ? xabs : -xabs) > ((yabs==y.data_) ? yabs : -yabs)) ? y : x; + } + static half fmax(half x, half y) + { + int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; + if(xabs > 0x7C00) + return y; + if(yabs > 0x7C00) + return x; + return (((xabs==x.data_) ? xabs : -xabs) < ((yabs==y.data_) ? yabs : -yabs)) ? y : x; + } + }; + + /// Helper class for half casts. + /// This class template has to be specialized for all valid cast argument to define an appropriate static `cast` member + /// function and a corresponding `type` member denoting its return type. + /// \tparam T destination type + /// \tparam U source type + /// \tparam R rounding mode to use + template struct half_caster {}; + template struct half_caster + { + #if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS + static_assert(std::is_arithmetic::value, "half_cast from non-arithmetic type unsupported"); + #endif + + static half cast(U arg) { return cast_impl(arg, is_float()); }; + + private: + static half cast_impl(U arg, true_type) { return half(binary, float2half(arg)); } + static half cast_impl(U arg, false_type) { return half(binary, int2half(arg)); } + }; + template struct half_caster + { + #if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS + static_assert(std::is_arithmetic::value, "half_cast to non-arithmetic type unsupported"); + #endif + + static T cast(half arg) { return cast_impl(arg, is_float()); } + + private: + static T cast_impl(half arg, true_type) { return half2float(arg.data_); } + static T cast_impl(half arg, false_type) { return half2int(arg.data_); } + }; + template struct half_caster + { + #if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS + static_assert(std::is_arithmetic::value, "half_cast to non-arithmetic type unsupported"); + #endif + + static T cast(expr arg) { return cast_impl(arg, is_float()); } + + private: + static T cast_impl(float arg, true_type) { return static_cast(arg); } + static T cast_impl(half arg, false_type) { return half2int(arg.data_); } + }; + template struct half_caster + { + static half cast(half arg) { return arg; } + }; + template struct half_caster : half_caster {}; + + /// \name Comparison operators + /// \{ + + /// Comparison for equality. + /// \param x first operand + /// \param y second operand + /// \retval true if operands equal + /// \retval false else + template typename enable::type operator==(T x, U y) { return functions::isequal(x, y); } + + /// Comparison for inequality. + /// \param x first operand + /// \param y second operand + /// \retval true if operands not equal + /// \retval false else + template typename enable::type operator!=(T x, U y) { return functions::isnotequal(x, y); } + + /// Comparison for less than. + /// \param x first operand + /// \param y second operand + /// \retval true if \a x less than \a y + /// \retval false else + template typename enable::type operator<(T x, U y) { return functions::isless(x, y); } + + /// Comparison for greater than. + /// \param x first operand + /// \param y second operand + /// \retval true if \a x greater than \a y + /// \retval false else + template typename enable::type operator>(T x, U y) { return functions::isgreater(x, y); } + + /// Comparison for less equal. + /// \param x first operand + /// \param y second operand + /// \retval true if \a x less equal \a y + /// \retval false else + template typename enable::type operator<=(T x, U y) { return functions::islessequal(x, y); } + + /// Comparison for greater equal. + /// \param x first operand + /// \param y second operand + /// \retval true if \a x greater equal \a y + /// \retval false else + template typename enable::type operator>=(T x, U y) { return functions::isgreaterequal(x, y); } + + /// \} + /// \name Arithmetic operators + /// \{ + + /// Add halfs. + /// \param x left operand + /// \param y right operand + /// \return sum of half expressions + template typename enable::type operator+(T x, U y) { return functions::plus(x, y); } + + /// Subtract halfs. + /// \param x left operand + /// \param y right operand + /// \return difference of half expressions + template typename enable::type operator-(T x, U y) { return functions::minus(x, y); } + + /// Multiply halfs. + /// \param x left operand + /// \param y right operand + /// \return product of half expressions + template typename enable::type operator*(T x, U y) { return functions::multiplies(x, y); } + + /// Divide halfs. + /// \param x left operand + /// \param y right operand + /// \return quotient of half expressions + template typename enable::type operator/(T x, U y) { return functions::divides(x, y); } + + /// Identity. + /// \param arg operand + /// \return uncahnged operand + template HALF_CONSTEXPR typename enable::type operator+(T arg) { return arg; } + + /// Negation. + /// \param arg operand + /// \return negated operand + template HALF_CONSTEXPR typename enable::type operator-(T arg) { return unary_specialized::negate(arg); } + + /// \} + /// \name Input and output + /// \{ + + /// Output operator. + /// \param out output stream to write into + /// \param arg half expression to write + /// \return reference to output stream + template typename enable&,T>::type + operator<<(std::basic_ostream &out, T arg) { return functions::write(out, arg); } + + /// Input operator. + /// \param in input stream to read from + /// \param arg half to read into + /// \return reference to input stream + template std::basic_istream& + operator>>(std::basic_istream &in, half &arg) { return functions::read(in, arg); } + + /// \} + /// \name Basic mathematical operations + /// \{ + + /// Absolute value. + /// \param arg operand + /// \return absolute value of \a arg +// template typename enable::type abs(T arg) { return unary_specialized::fabs(arg); } + inline half abs(half arg) { return unary_specialized::fabs(arg); } + inline expr abs(expr arg) { return unary_specialized::fabs(arg); } + + /// Absolute value. + /// \param arg operand + /// \return absolute value of \a arg +// template typename enable::type fabs(T arg) { return unary_specialized::fabs(arg); } + inline half fabs(half arg) { return unary_specialized::fabs(arg); } + inline expr fabs(expr arg) { return unary_specialized::fabs(arg); } + + /// Remainder of division. + /// \param x first operand + /// \param y second operand + /// \return remainder of floating point division. +// template typename enable::type fmod(T x, U y) { return functions::fmod(x, y); } + inline expr fmod(half x, half y) { return functions::fmod(x, y); } + inline expr fmod(half x, expr y) { return functions::fmod(x, y); } + inline expr fmod(expr x, half y) { return functions::fmod(x, y); } + inline expr fmod(expr x, expr y) { return functions::fmod(x, y); } + + /// Remainder of division. + /// \param x first operand + /// \param y second operand + /// \return remainder of floating point division. +// template typename enable::type remainder(T x, U y) { return functions::remainder(x, y); } + inline expr remainder(half x, half y) { return functions::remainder(x, y); } + inline expr remainder(half x, expr y) { return functions::remainder(x, y); } + inline expr remainder(expr x, half y) { return functions::remainder(x, y); } + inline expr remainder(expr x, expr y) { return functions::remainder(x, y); } + + /// Remainder of division. + /// \param x first operand + /// \param y second operand + /// \param quo address to store some bits of quotient at + /// \return remainder of floating point division. +// template typename enable::type remquo(T x, U y, int *quo) { return functions::remquo(x, y, quo); } + inline expr remquo(half x, half y, int *quo) { return functions::remquo(x, y, quo); } + inline expr remquo(half x, expr y, int *quo) { return functions::remquo(x, y, quo); } + inline expr remquo(expr x, half y, int *quo) { return functions::remquo(x, y, quo); } + inline expr remquo(expr x, expr y, int *quo) { return functions::remquo(x, y, quo); } + + /// Fused multiply add. + /// \param x first operand + /// \param y second operand + /// \param z third operand + /// \return ( \a x * \a y ) + \a z rounded as one operation. +// template typename enable::type fma(T x, U y, V z) { return functions::fma(x, y, z); } + inline expr fma(half x, half y, half z) { return functions::fma(x, y, z); } + inline expr fma(half x, half y, expr z) { return functions::fma(x, y, z); } + inline expr fma(half x, expr y, half z) { return functions::fma(x, y, z); } + inline expr fma(half x, expr y, expr z) { return functions::fma(x, y, z); } + inline expr fma(expr x, half y, half z) { return functions::fma(x, y, z); } + inline expr fma(expr x, half y, expr z) { return functions::fma(x, y, z); } + inline expr fma(expr x, expr y, half z) { return functions::fma(x, y, z); } + inline expr fma(expr x, expr y, expr z) { return functions::fma(x, y, z); } + + /// Maximum of half expressions. + /// \param x first operand + /// \param y second operand + /// \return maximum of operands +// template typename result::type fmax(T x, U y) { return binary_specialized::fmax(x, y); } + inline half fmax(half x, half y) { return binary_specialized::fmax(x, y); } + inline expr fmax(half x, expr y) { return binary_specialized::fmax(x, y); } + inline expr fmax(expr x, half y) { return binary_specialized::fmax(x, y); } + inline expr fmax(expr x, expr y) { return binary_specialized::fmax(x, y); } + + /// Minimum of half expressions. + /// \param x first operand + /// \param y second operand + /// \return minimum of operands +// template typename result::type fmin(T x, U y) { return binary_specialized::fmin(x, y); } + inline half fmin(half x, half y) { return binary_specialized::fmin(x, y); } + inline expr fmin(half x, expr y) { return binary_specialized::fmin(x, y); } + inline expr fmin(expr x, half y) { return binary_specialized::fmin(x, y); } + inline expr fmin(expr x, expr y) { return binary_specialized::fmin(x, y); } + + /// Positive difference. + /// \param x first operand + /// \param y second operand + /// \return \a x - \a y or 0 if difference negative +// template typename enable::type fdim(T x, U y) { return functions::fdim(x, y); } + inline expr fdim(half x, half y) { return functions::fdim(x, y); } + inline expr fdim(half x, expr y) { return functions::fdim(x, y); } + inline expr fdim(expr x, half y) { return functions::fdim(x, y); } + inline expr fdim(expr x, expr y) { return functions::fdim(x, y); } + + /// Get NaN value. + /// \return quiet NaN + inline half nanh(const char*) { return functions::nanh(); } + + /// \} + /// \name Exponential functions + /// \{ + + /// Exponential function. + /// \param arg function argument + /// \return e raised to \a arg +// template typename enable::type exp(T arg) { return functions::exp(arg); } + inline expr exp(half arg) { return functions::exp(arg); } + inline expr exp(expr arg) { return functions::exp(arg); } + + /// Exponential minus one. + /// \param arg function argument + /// \return e raised to \a arg subtracted by 1 +// template typename enable::type expm1(T arg) { return functions::expm1(arg); } + inline expr expm1(half arg) { return functions::expm1(arg); } + inline expr expm1(expr arg) { return functions::expm1(arg); } + + /// Binary exponential. + /// \param arg function argument + /// \return 2 raised to \a arg +// template typename enable::type exp2(T arg) { return functions::exp2(arg); } + inline expr exp2(half arg) { return functions::exp2(arg); } + inline expr exp2(expr arg) { return functions::exp2(arg); } + + /// Natural logorithm. + /// \param arg function argument + /// \return logarithm of \a arg to base e +// template typename enable::type log(T arg) { return functions::log(arg); } + inline expr log(half arg) { return functions::log(arg); } + inline expr log(expr arg) { return functions::log(arg); } + + /// Common logorithm. + /// \param arg function argument + /// \return logarithm of \a arg to base 10 +// template typename enable::type log10(T arg) { return functions::log10(arg); } + inline expr log10(half arg) { return functions::log10(arg); } + inline expr log10(expr arg) { return functions::log10(arg); } + + /// Natural logorithm. + /// \param arg function argument + /// \return logarithm of \a arg plus 1 to base e +// template typename enable::type log1p(T arg) { return functions::log1p(arg); } + inline expr log1p(half arg) { return functions::log1p(arg); } + inline expr log1p(expr arg) { return functions::log1p(arg); } + + /// Binary logorithm. + /// \param arg function argument + /// \return logarithm of \a arg to base 2 +// template typename enable::type log2(T arg) { return functions::log2(arg); } + inline expr log2(half arg) { return functions::log2(arg); } + inline expr log2(expr arg) { return functions::log2(arg); } + + /// \} + /// \name Power functions + /// \{ + + /// Square root. + /// \param arg function argument + /// \return square root of \a arg +// template typename enable::type sqrt(T arg) { return functions::sqrt(arg); } + inline expr sqrt(half arg) { return functions::sqrt(arg); } + inline expr sqrt(expr arg) { return functions::sqrt(arg); } + + /// Cubic root. + /// \param arg function argument + /// \return cubic root of \a arg +// template typename enable::type cbrt(T arg) { return functions::cbrt(arg); } + inline expr cbrt(half arg) { return functions::cbrt(arg); } + inline expr cbrt(expr arg) { return functions::cbrt(arg); } + + /// Hypotenuse function. + /// \param x first argument + /// \param y second argument + /// \return square root of sum of squares without internal over- or underflows +// template typename enable::type hypot(T x, U y) { return functions::hypot(x, y); } + inline expr hypot(half x, half y) { return functions::hypot(x, y); } + inline expr hypot(half x, expr y) { return functions::hypot(x, y); } + inline expr hypot(expr x, half y) { return functions::hypot(x, y); } + inline expr hypot(expr x, expr y) { return functions::hypot(x, y); } + + /// Power function. + /// \param base first argument + /// \param exp second argument + /// \return \a base raised to \a exp +// template typename enable::type pow(T base, U exp) { return functions::pow(base, exp); } + inline expr pow(half base, half exp) { return functions::pow(base, exp); } + inline expr pow(half base, expr exp) { return functions::pow(base, exp); } + inline expr pow(expr base, half exp) { return functions::pow(base, exp); } + inline expr pow(expr base, expr exp) { return functions::pow(base, exp); } + + /// \} + /// \name Trigonometric functions + /// \{ + + /// Sine function. + /// \param arg function argument + /// \return sine value of \a arg +// template typename enable::type sin(T arg) { return functions::sin(arg); } + inline expr sin(half arg) { return functions::sin(arg); } + inline expr sin(expr arg) { return functions::sin(arg); } + + /// Cosine function. + /// \param arg function argument + /// \return cosine value of \a arg +// template typename enable::type cos(T arg) { return functions::cos(arg); } + inline expr cos(half arg) { return functions::cos(arg); } + inline expr cos(expr arg) { return functions::cos(arg); } + + /// Tangent function. + /// \param arg function argument + /// \return tangent value of \a arg +// template typename enable::type tan(T arg) { return functions::tan(arg); } + inline expr tan(half arg) { return functions::tan(arg); } + inline expr tan(expr arg) { return functions::tan(arg); } + + /// Arc sine. + /// \param arg function argument + /// \return arc sine value of \a arg +// template typename enable::type asin(T arg) { return functions::asin(arg); } + inline expr asin(half arg) { return functions::asin(arg); } + inline expr asin(expr arg) { return functions::asin(arg); } + + /// Arc cosine function. + /// \param arg function argument + /// \return arc cosine value of \a arg +// template typename enable::type acos(T arg) { return functions::acos(arg); } + inline expr acos(half arg) { return functions::acos(arg); } + inline expr acos(expr arg) { return functions::acos(arg); } + + /// Arc tangent function. + /// \param arg function argument + /// \return arc tangent value of \a arg +// template typename enable::type atan(T arg) { return functions::atan(arg); } + inline expr atan(half arg) { return functions::atan(arg); } + inline expr atan(expr arg) { return functions::atan(arg); } + + /// Arc tangent function. + /// \param x first argument + /// \param y second argument + /// \return arc tangent value +// template typename enable::type atan2(T x, U y) { return functions::atan2(x, y); } + inline expr atan2(half x, half y) { return functions::atan2(x, y); } + inline expr atan2(half x, expr y) { return functions::atan2(x, y); } + inline expr atan2(expr x, half y) { return functions::atan2(x, y); } + inline expr atan2(expr x, expr y) { return functions::atan2(x, y); } + + /// \} + /// \name Hyperbolic functions + /// \{ + + /// Hyperbolic sine. + /// \param arg function argument + /// \return hyperbolic sine value of \a arg +// template typename enable::type sinh(T arg) { return functions::sinh(arg); } + inline expr sinh(half arg) { return functions::sinh(arg); } + inline expr sinh(expr arg) { return functions::sinh(arg); } + + /// Hyperbolic cosine. + /// \param arg function argument + /// \return hyperbolic cosine value of \a arg +// template typename enable::type cosh(T arg) { return functions::cosh(arg); } + inline expr cosh(half arg) { return functions::cosh(arg); } + inline expr cosh(expr arg) { return functions::cosh(arg); } + + /// Hyperbolic tangent. + /// \param arg function argument + /// \return hyperbolic tangent value of \a arg +// template typename enable::type tanh(T arg) { return functions::tanh(arg); } + inline expr tanh(half arg) { return functions::tanh(arg); } + inline expr tanh(expr arg) { return functions::tanh(arg); } + + /// Hyperbolic area sine. + /// \param arg function argument + /// \return area sine value of \a arg +// template typename enable::type asinh(T arg) { return functions::asinh(arg); } + inline expr asinh(half arg) { return functions::asinh(arg); } + inline expr asinh(expr arg) { return functions::asinh(arg); } + + /// Hyperbolic area cosine. + /// \param arg function argument + /// \return area cosine value of \a arg +// template typename enable::type acosh(T arg) { return functions::acosh(arg); } + inline expr acosh(half arg) { return functions::acosh(arg); } + inline expr acosh(expr arg) { return functions::acosh(arg); } + + /// Hyperbolic area tangent. + /// \param arg function argument + /// \return area tangent value of \a arg +// template typename enable::type atanh(T arg) { return functions::atanh(arg); } + inline expr atanh(half arg) { return functions::atanh(arg); } + inline expr atanh(expr arg) { return functions::atanh(arg); } + + /// \} + /// \name Error and gamma functions + /// \{ + + /// Error function. + /// \param arg function argument + /// \return error function value of \a arg +// template typename enable::type erf(T arg) { return functions::erf(arg); } + inline expr erf(half arg) { return functions::erf(arg); } + inline expr erf(expr arg) { return functions::erf(arg); } + + /// Complementary error function. + /// \param arg function argument + /// \return 1 minus error function value of \a arg +// template typename enable::type erfc(T arg) { return functions::erfc(arg); } + inline expr erfc(half arg) { return functions::erfc(arg); } + inline expr erfc(expr arg) { return functions::erfc(arg); } + + /// Natural logarithm of gamma function. + /// \param arg function argument + /// \return natural logarith of gamma function for \a arg +// template typename enable::type lgamma(T arg) { return functions::lgamma(arg); } + inline expr lgamma(half arg) { return functions::lgamma(arg); } + inline expr lgamma(expr arg) { return functions::lgamma(arg); } + + /// Gamma function. + /// \param arg function argument + /// \return gamma function value of \a arg +// template typename enable::type tgamma(T arg) { return functions::tgamma(arg); } + inline expr tgamma(half arg) { return functions::tgamma(arg); } + inline expr tgamma(expr arg) { return functions::tgamma(arg); } + + /// \} + /// \name Rounding + /// \{ + + /// Nearest integer not less than half value. + /// \param arg half to round + /// \return nearest integer not less than \a arg +// template typename enable::type ceil(T arg) { return functions::ceil(arg); } + inline half ceil(half arg) { return functions::ceil(arg); } + inline half ceil(expr arg) { return functions::ceil(arg); } + + /// Nearest integer not greater than half value. + /// \param arg half to round + /// \return nearest integer not greater than \a arg +// template typename enable::type floor(T arg) { return functions::floor(arg); } + inline half floor(half arg) { return functions::floor(arg); } + inline half floor(expr arg) { return functions::floor(arg); } + + /// Nearest integer not greater in magnitude than half value. + /// \param arg half to round + /// \return nearest integer not greater in magnitude than \a arg +// template typename enable::type trunc(T arg) { return functions::trunc(arg); } + inline half trunc(half arg) { return functions::trunc(arg); } + inline half trunc(expr arg) { return functions::trunc(arg); } + + /// Nearest integer. + /// \param arg half to round + /// \return nearest integer, rounded away from zero in half-way cases +// template typename enable::type round(T arg) { return functions::round(arg); } + inline half round(half arg) { return functions::round(arg); } + inline half round(expr arg) { return functions::round(arg); } + + /// Nearest integer. + /// \param arg half to round + /// \return nearest integer, rounded away from zero in half-way cases +// template typename enable::type lround(T arg) { return functions::lround(arg); } + inline long lround(half arg) { return functions::lround(arg); } + inline long lround(expr arg) { return functions::lround(arg); } + + /// Nearest integer using half's internal rounding mode. + /// \param arg half expression to round + /// \return nearest integer using default rounding mode +// template typename enable::type nearbyint(T arg) { return functions::nearbyint(arg); } + inline half nearbyint(half arg) { return functions::rint(arg); } + inline half nearbyint(expr arg) { return functions::rint(arg); } + + /// Nearest integer using half's internal rounding mode. + /// \param arg half expression to round + /// \return nearest integer using default rounding mode +// template typename enable::type rint(T arg) { return functions::rint(arg); } + inline half rint(half arg) { return functions::rint(arg); } + inline half rint(expr arg) { return functions::rint(arg); } + + /// Nearest integer using half's internal rounding mode. + /// \param arg half expression to round + /// \return nearest integer using default rounding mode +// template typename enable::type lrint(T arg) { return functions::lrint(arg); } + inline long lrint(half arg) { return functions::lrint(arg); } + inline long lrint(expr arg) { return functions::lrint(arg); } + #if HALF_ENABLE_CPP11_LONG_LONG + /// Nearest integer. + /// \param arg half to round + /// \return nearest integer, rounded away from zero in half-way cases +// template typename enable::type llround(T arg) { return functions::llround(arg); } + inline long long llround(half arg) { return functions::llround(arg); } + inline long long llround(expr arg) { return functions::llround(arg); } + + /// Nearest integer using half's internal rounding mode. + /// \param arg half expression to round + /// \return nearest integer using default rounding mode +// template typename enable::type llrint(T arg) { return functions::llrint(arg); } + inline long long llrint(half arg) { return functions::llrint(arg); } + inline long long llrint(expr arg) { return functions::llrint(arg); } + #endif + + /// \} + /// \name Floating point manipulation + /// \{ + + /// Decompress floating point number. + /// \param arg number to decompress + /// \param exp address to store exponent at + /// \return significant in range [0.5, 1) +// template typename enable::type frexp(T arg, int *exp) { return functions::frexp(arg, exp); } + inline half frexp(half arg, int *exp) { return functions::frexp(arg, exp); } + inline half frexp(expr arg, int *exp) { return functions::frexp(arg, exp); } + + /// Multiply by power of two. + /// \param arg number to modify + /// \param exp power of two to multiply with + /// \return \a arg multplied by 2 raised to \a exp +// template typename enable::type ldexp(T arg, int exp) { return functions::scalbln(arg, exp); } + inline half ldexp(half arg, int exp) { return functions::scalbln(arg, exp); } + inline half ldexp(expr arg, int exp) { return functions::scalbln(arg, exp); } + + /// Extract integer and fractional parts. + /// \param arg number to decompress + /// \param iptr address to store integer part at + /// \return fractional part +// template typename enable::type modf(T arg, half *iptr) { return functions::modf(arg, iptr); } + inline half modf(half arg, half *iptr) { return functions::modf(arg, iptr); } + inline half modf(expr arg, half *iptr) { return functions::modf(arg, iptr); } + + /// Multiply by power of two. + /// \param arg number to modify + /// \param exp power of two to multiply with + /// \return \a arg multplied by 2 raised to \a exp +// template typename enable::type scalbn(T arg, int exp) { return functions::scalbln(arg, exp); } + inline half scalbn(half arg, int exp) { return functions::scalbln(arg, exp); } + inline half scalbn(expr arg, int exp) { return functions::scalbln(arg, exp); } + + /// Multiply by power of two. + /// \param arg number to modify + /// \param exp power of two to multiply with + /// \return \a arg multplied by 2 raised to \a exp +// template typename enable::type scalbln(T arg, long exp) { return functions::scalbln(arg, exp); } + inline half scalbln(half arg, long exp) { return functions::scalbln(arg, exp); } + inline half scalbln(expr arg, long exp) { return functions::scalbln(arg, exp); } + + /// Extract exponent. + /// \param arg number to query + /// \return floating point exponent + /// \retval FP_ILOGB0 for zero + /// \retval FP_ILOGBNAN for NaN + /// \retval MAX_INT for infinity +// template typename enable::type ilogb(T arg) { return functions::ilogb(arg); } + inline int ilogb(half arg) { return functions::ilogb(arg); } + inline int ilogb(expr arg) { return functions::ilogb(arg); } + + /// Extract exponent. + /// \param arg number to query + /// \return floating point exponent +// template typename enable::type logb(T arg) { return functions::logb(arg); } + inline half logb(half arg) { return functions::logb(arg); } + inline half logb(expr arg) { return functions::logb(arg); } + + /// Next representable value. + /// \param from value to compute next representable value for + /// \param to direction towards which to compute next value + /// \return next representable value after \a from in direction towards \a to +// template typename enable::type nextafter(T from, U to) { return functions::nextafter(from, to); } + inline half nextafter(half from, half to) { return functions::nextafter(from, to); } + inline half nextafter(half from, expr to) { return functions::nextafter(from, to); } + inline half nextafter(expr from, half to) { return functions::nextafter(from, to); } + inline half nextafter(expr from, expr to) { return functions::nextafter(from, to); } + + /// Next representable value. + /// \param from value to compute next representable value for + /// \param to direction towards which to compute next value + /// \return next representable value after \a from in direction towards \a to +// template typename enable::type nexttoward(T from, long double to) { return functions::nexttoward(from, to); } + inline half nexttoward(half from, long double to) { return functions::nexttoward(from, to); } + inline half nexttoward(expr from, long double to) { return functions::nexttoward(from, to); } + + /// Take sign. + /// \param x value to change sign for + /// \param y value to take sign from + /// \return value equal to \a x in magnitude and to \a y in sign +// template typename enable::type copysign(T x, U y) { return functions::copysign(x, y); } + inline half copysign(half x, half y) { return functions::copysign(x, y); } + inline half copysign(half x, expr y) { return functions::copysign(x, y); } + inline half copysign(expr x, half y) { return functions::copysign(x, y); } + inline half copysign(expr x, expr y) { return functions::copysign(x, y); } + + /// \} + /// \name Floating point classification + /// \{ + + + /// Classify floating point value. + /// \param arg number to classify + /// \retval FP_ZERO for positive and negative zero + /// \retval FP_SUBNORMAL for subnormal numbers + /// \retval FP_INFINITY for positive and negative infinity + /// \retval FP_NAN for NaNs + /// \retval FP_NORMAL for all other (normal) values +// template typename enable::type fpclassify(T arg) { return functions::fpclassify(arg); } + inline int fpclassify(half arg) { return functions::fpclassify(arg); } + inline int fpclassify(expr arg) { return functions::fpclassify(arg); } + + /// Check if finite number. + /// \param arg number to check + /// \retval true if neither infinity nor NaN + /// \retval false else +// template typename enable::type isfinite(T arg) { return functions::isfinite(arg); } + inline bool isfinite(half arg) { return functions::isfinite(arg); } + inline bool isfinite(expr arg) { return functions::isfinite(arg); } + + /// Check for infinity. + /// \param arg number to check + /// \retval true for positive or negative infinity + /// \retval false else +// template typename enable::type isinf(T arg) { return functions::isinf(arg); } + inline bool isinf(half arg) { return functions::isinf(arg); } + inline bool isinf(expr arg) { return functions::isinf(arg); } + + /// Check for NaN. + /// \param arg number to check + /// \retval true for NaNs + /// \retval false else +// template typename enable::type isnan(T arg) { return functions::isnan(arg); } + inline bool isnan(half arg) { return functions::isnan(arg); } + inline bool isnan(expr arg) { return functions::isnan(arg); } + + /// Check if normal number. + /// \param arg number to check + /// \retval true if normal number + /// \retval false if either subnormal, zero, infinity or NaN +// template typename enable::type isnormal(T arg) { return functions::isnormal(arg); } + inline bool isnormal(half arg) { return functions::isnormal(arg); } + inline bool isnormal(expr arg) { return functions::isnormal(arg); } + + /// Check sign. + /// \param arg number to check + /// \retval true for negative number + /// \retval false for positive number +// template typename enable::type signbit(T arg) { return functions::signbit(arg); } + inline bool signbit(half arg) { return functions::signbit(arg); } + inline bool signbit(expr arg) { return functions::signbit(arg); } + + /// \} + /// \name Comparison + /// \{ + + /// Comparison for greater than. + /// \param x first operand + /// \param y second operand + /// \retval true if \a x greater than \a y + /// \retval false else +// template typename enable::type isgreater(T x, U y) { return functions::isgreater(x, y); } + inline bool isgreater(half x, half y) { return functions::isgreater(x, y); } + inline bool isgreater(half x, expr y) { return functions::isgreater(x, y); } + inline bool isgreater(expr x, half y) { return functions::isgreater(x, y); } + inline bool isgreater(expr x, expr y) { return functions::isgreater(x, y); } + + /// Comparison for greater equal. + /// \param x first operand + /// \param y second operand + /// \retval true if \a x greater equal \a y + /// \retval false else +// template typename enable::type isgreaterequal(T x, U y) { return functions::isgreaterequal(x, y); } + inline bool isgreaterequal(half x, half y) { return functions::isgreaterequal(x, y); } + inline bool isgreaterequal(half x, expr y) { return functions::isgreaterequal(x, y); } + inline bool isgreaterequal(expr x, half y) { return functions::isgreaterequal(x, y); } + inline bool isgreaterequal(expr x, expr y) { return functions::isgreaterequal(x, y); } + + /// Comparison for less than. + /// \param x first operand + /// \param y second operand + /// \retval true if \a x less than \a y + /// \retval false else +// template typename enable::type isless(T x, U y) { return functions::isless(x, y); } + inline bool isless(half x, half y) { return functions::isless(x, y); } + inline bool isless(half x, expr y) { return functions::isless(x, y); } + inline bool isless(expr x, half y) { return functions::isless(x, y); } + inline bool isless(expr x, expr y) { return functions::isless(x, y); } + + /// Comparison for less equal. + /// \param x first operand + /// \param y second operand + /// \retval true if \a x less equal \a y + /// \retval false else +// template typename enable::type islessequal(T x, U y) { return functions::islessequal(x, y); } + inline bool islessequal(half x, half y) { return functions::islessequal(x, y); } + inline bool islessequal(half x, expr y) { return functions::islessequal(x, y); } + inline bool islessequal(expr x, half y) { return functions::islessequal(x, y); } + inline bool islessequal(expr x, expr y) { return functions::islessequal(x, y); } + + /// Comarison for less or greater. + /// \param x first operand + /// \param y second operand + /// \retval true if either less or greater + /// \retval false else +// template typename enable::type islessgreater(T x, U y) { return functions::islessgreater(x, y); } + inline bool islessgreater(half x, half y) { return functions::islessgreater(x, y); } + inline bool islessgreater(half x, expr y) { return functions::islessgreater(x, y); } + inline bool islessgreater(expr x, half y) { return functions::islessgreater(x, y); } + inline bool islessgreater(expr x, expr y) { return functions::islessgreater(x, y); } + + /// Check if unordered. + /// \param x first operand + /// \param y second operand + /// \retval true if unordered (one or two NaN operands) + /// \retval false else +// template typename enable::type isunordered(T x, U y) { return functions::isunordered(x, y); } + inline bool isunordered(half x, half y) { return functions::isunordered(x, y); } + inline bool isunordered(half x, expr y) { return functions::isunordered(x, y); } + inline bool isunordered(expr x, half y) { return functions::isunordered(x, y); } + inline bool isunordered(expr x, expr y) { return functions::isunordered(x, y); } + + /// \name Casting + /// \{ + + /// Cast to or from half-precision floating point number. + /// This casts between [half](\ref half_float::half) and any built-in arithmetic type. The values are converted + /// directly using the given rounding mode, without any roundtrip over `float` that a `static_cast` would otherwise do. + /// It uses the default rounding mode. + /// + /// Using this cast with neither of the two types being a [half](\ref half_float::half) or with any of the two types + /// not being a built-in arithmetic type (apart from [half](\ref half_float::half), of course) results in a compiler + /// error and casting between [half](\ref half_float::half)s is just a no-op. + /// \tparam T destination type (half or built-in arithmetic type) + /// \tparam U source type (half or built-in arithmetic type) + /// \param arg value to cast + /// \return \a arg converted to destination type + template T half_cast(U arg) { return half_caster::cast(arg); } + + /// Cast to or from half-precision floating point number. + /// This casts between [half](\ref half_float::half) and any built-in arithmetic type. The values are converted + /// directly using the given rounding mode, without any roundtrip over `float` that a `static_cast` would otherwise do. + /// + /// Using this cast with neither of the two types being a [half](\ref half_float::half) or with any of the two types + /// not being a built-in arithmetic type (apart from [half](\ref half_float::half), of course) results in a compiler + /// error and casting between [half](\ref half_float::half)s is just a no-op. + /// \tparam T destination type (half or built-in arithmetic type) + /// \tparam R rounding mode to use. + /// \tparam U source type (half or built-in arithmetic type) + /// \param arg value to cast + /// \return \a arg converted to destination type + template T half_cast(U arg) { return half_caster::cast(arg); } + /// \} + } + + using detail::operator==; + using detail::operator!=; + using detail::operator<; + using detail::operator>; + using detail::operator<=; + using detail::operator>=; + using detail::operator+; + using detail::operator-; + using detail::operator*; + using detail::operator/; + using detail::operator<<; + using detail::operator>>; + + using detail::abs; + using detail::fabs; + using detail::fmod; + using detail::remainder; + using detail::remquo; + using detail::fma; + using detail::fmax; + using detail::fmin; + using detail::fdim; + using detail::nanh; + using detail::exp; + using detail::expm1; + using detail::exp2; + using detail::log; + using detail::log10; + using detail::log1p; + using detail::log2; + using detail::sqrt; + using detail::cbrt; + using detail::hypot; + using detail::pow; + using detail::sin; + using detail::cos; + using detail::tan; + using detail::asin; + using detail::acos; + using detail::atan; + using detail::atan2; + using detail::sinh; + using detail::cosh; + using detail::tanh; + using detail::asinh; + using detail::acosh; + using detail::atanh; + using detail::erf; + using detail::erfc; + using detail::lgamma; + using detail::tgamma; + using detail::ceil; + using detail::floor; + using detail::trunc; + using detail::round; + using detail::lround; + using detail::nearbyint; + using detail::rint; + using detail::lrint; +#if HALF_ENABLE_CPP11_LONG_LONG + using detail::llround; + using detail::llrint; +#endif + using detail::frexp; + using detail::ldexp; + using detail::modf; + using detail::scalbn; + using detail::scalbln; + using detail::ilogb; + using detail::logb; + using detail::nextafter; + using detail::nexttoward; + using detail::copysign; + using detail::fpclassify; + using detail::isfinite; + using detail::isinf; + using detail::isnan; + using detail::isnormal; + using detail::signbit; + using detail::isgreater; + using detail::isgreaterequal; + using detail::isless; + using detail::islessequal; + using detail::islessgreater; + using detail::isunordered; + + using detail::half_cast; +} + + +/// Extensions to the C++ standard library. +namespace std +{ + /// Numeric limits for half-precision floats. + /// Because of the underlying single-precision implementation of many operations, it inherits some properties from + /// `std::numeric_limits`. + template<> class numeric_limits : public numeric_limits + { + public: + /// Supports signed values. + static HALF_CONSTEXPR_CONST bool is_signed = true; + + /// Is not exact. + static HALF_CONSTEXPR_CONST bool is_exact = false; + + /// Doesn't provide modulo arithmetic. + static HALF_CONSTEXPR_CONST bool is_modulo = false; + + /// IEEE conformant. + static HALF_CONSTEXPR_CONST bool is_iec559 = true; + + /// Supports infinity. + static HALF_CONSTEXPR_CONST bool has_infinity = true; + + /// Supports quiet NaNs. + static HALF_CONSTEXPR_CONST bool has_quiet_NaN = true; + + /// Supports subnormal values. + static HALF_CONSTEXPR_CONST float_denorm_style has_denorm = denorm_present; + + /// Rounding mode. + /// Due to the mix of internal single-precision computations (using the rounding mode of the underlying + /// single-precision implementation) with the rounding mode of the single-to-half conversions, the actual rounding + /// mode might be `std::round_indeterminate` if the default half-precision rounding mode doesn't match the + /// single-precision rounding mode. + static HALF_CONSTEXPR_CONST float_round_style round_style = (std::numeric_limits::round_style== + half_float::half::round_style) ? half_float::half::round_style : round_indeterminate; + + /// Significant digits. + static HALF_CONSTEXPR_CONST int digits = 11; + + /// Significant decimal digits. + static HALF_CONSTEXPR_CONST int digits10 = 3; + + /// Required decimal digits to represent all possible values. + static HALF_CONSTEXPR_CONST int max_digits10 = 5; + + /// Number base. + static HALF_CONSTEXPR_CONST int radix = 2; + + /// One more than smallest exponent. + static HALF_CONSTEXPR_CONST int min_exponent = -13; + + /// Smallest normalized representable power of 10. + static HALF_CONSTEXPR_CONST int min_exponent10 = -4; + + /// One more than largest exponent + static HALF_CONSTEXPR_CONST int max_exponent = 16; + + /// Largest finitely representable power of 10. + static HALF_CONSTEXPR_CONST int max_exponent10 = 4; + + /// Smallest positive normal value. + static HALF_CONSTEXPR half_float::half min() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x0400); } + + /// Smallest finite value. + static HALF_CONSTEXPR half_float::half lowest() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0xFBFF); } + + /// Largest finite value. + static HALF_CONSTEXPR half_float::half max() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7BFF); } + + /// Difference between one and next representable value. + static HALF_CONSTEXPR half_float::half epsilon() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x1400); } + + /// Maximum rounding error. + static HALF_CONSTEXPR half_float::half round_error() HALF_NOTHROW + { return half_float::half(half_float::detail::binary, (round_style==std::round_to_nearest) ? 0x3800 : 0x3C00); } + + /// Positive infinity. + static HALF_CONSTEXPR half_float::half infinity() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7C00); } + + /// Quiet NaN. + static HALF_CONSTEXPR half_float::half quiet_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7FFF); } + + /// Signalling NaN. + static HALF_CONSTEXPR half_float::half signaling_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7DFF); } + + /// Smallest positive subnormal value. + static HALF_CONSTEXPR half_float::half denorm_min() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x0001); } + }; + +#if HALF_ENABLE_CPP11_HASH + /// Hash function for half-precision floats. + /// This is only defined if C++11 `std::hash` is supported and enabled. + template<> struct hash //: unary_function + { + /// Type of function argument. + typedef half_float::half argument_type; + + /// Function return type. + typedef size_t result_type; + + /// Compute hash function. + /// \param arg half to hash + /// \return hash value + result_type operator()(argument_type arg) const + { return hash()(static_cast(arg.data_)&-(arg.data_!=0x8000)); } + }; +#endif +} + + +#undef HALF_CONSTEXPR +#undef HALF_CONSTEXPR_CONST +#undef HALF_NOEXCEPT +#undef HALF_NOTHROW +#ifdef HALF_POP_WARNINGS + #pragma warning(pop) + #undef HALF_POP_WARNINGS +#endif + +#endif diff --git a/onnx2trt.hpp b/onnx2trt.hpp index 680ef900..3a2a7352 100644 --- a/onnx2trt.hpp +++ b/onnx2trt.hpp @@ -54,11 +54,15 @@ class IImporterContext virtual nvinfer1::ILogger& logger() = 0; virtual bool hasError() const = 0; virtual nvinfer1::IErrorRecorder* getErrorRecorder() const = 0; + virtual nvinfer1::IConstantLayer* getConstantLayer(const char* name) const = 0; + //! Push a new scope for base names (ONNX names). + virtual void pushBaseNameScope() = 0; + + //! Revert actions of registerTensor for names in the top scope and pop it. + virtual void popBaseNameScope() = 0; protected: - virtual ~IImporterContext() - { - } + virtual ~IImporterContext() {} }; } // namespace onnx2trt diff --git a/onnx2trt_common.hpp b/onnx2trt_common.hpp index 88427129..3a071a33 100644 --- a/onnx2trt_common.hpp +++ b/onnx2trt_common.hpp @@ -15,8 +15,7 @@ enum class PluginFormat : uint8_t { kNCHW = 0, //!< NCHW kNC2HW2 = 1, //!< NCHW with 2-element packed channels - kNHWC8 = 2 //!< NHWC with 8-element packed channels (C - //! must be a multiple of 8) + kNHWC8 = 2 //!< NHWC with 8-element packed channels (C must be a multiple of 8) }; // from NvInfer.h class IPluginExt : public IPlugin diff --git a/onnx2trt_utils.cpp b/onnx2trt_utils.cpp index 593c450a..fa8fde33 100644 --- a/onnx2trt_utils.cpp +++ b/onnx2trt_utils.cpp @@ -3,7 +3,6 @@ */ #include "onnx2trt_utils.hpp" -#include "NvInferSafeRuntime.h" #include "OnnxAttrs.hpp" #include @@ -543,9 +542,12 @@ bool convertOnnxWeights( { void* dataPtr{nullptr}; size_t nbytes{0}; - nvinfer1::Dims shape; auto onnxDtype = onnxTensor.data_type(); + nvinfer1::Dims shape{}; + shape.nbDims = onnxTensor.dims().size(); + std::copy_n(onnxTensor.dims().begin(), shape.nbDims, shape.d); + // ONNX weight values can be stored in either the TensorProto itself, or in an external file in the case // of large models. Check for this here. auto dataLocation = onnxTensor.data_location(); @@ -592,8 +594,6 @@ bool convertOnnxWeights( { return false; } - shape.nbDims = onnxTensor.dims().size(); - std::copy(onnxTensor.dims().begin(), onnxTensor.dims().end(), shape.d); // For weights parsed from external files, createTempWeights is necessary to keep them in scope ShapedWeights externalWeights; @@ -636,17 +636,8 @@ bool convertOnnxWeights( *weights = externalWeights; return true; } - // Weights information is within the TensorProto itself - // Pass through for optional (empty) initializers for unused attributes. - if (isOnnxTensorEmpty(onnxTensor)) - { - auto empty = onnx2trt::ShapedWeights::empty(::ONNX_NAMESPACE::TensorProto::FLOAT); - *weights = empty; - return true; - } - shape.nbDims = onnxTensor.dims().size(); - std::copy(onnxTensor.dims().begin(), onnxTensor.dims().end(), shape.d); + // Weights information is within the TensorProto itself // Cast non-native TRT types to their corresponding proxy types if (onnxDtype == ::ONNX_NAMESPACE::TensorProto::INT64) @@ -706,9 +697,10 @@ bool convertOnnxWeights( { switch (onnxDtype) { - // Import INT32 and FP16 weights as is. - case ::ONNX_NAMESPACE::TensorProto::INT32: - case ::ONNX_NAMESPACE::TensorProto::FLOAT16: dataPtr = (void*) (onnxTensor.int32_data().data()); break; + case ::ONNX_NAMESPACE::TensorProto::INT32: dataPtr = (void*) (onnxTensor.int32_data().data()); break; + case ::ONNX_NAMESPACE::TensorProto::FLOAT16: + dataPtr = convertINT32Data(onnxTensor.int32_data().data(), shape, onnxDtype, ctx); + break; case ::ONNX_NAMESPACE::TensorProto::INT8: dataPtr = convertINT32Data(onnxTensor.int32_data().data(), shape, onnxDtype, ctx); break; @@ -780,6 +772,12 @@ nvinfer1::ITensor& convertToTensor(TensorOrWeights& input, IImporterContext* ctx } // Handle non-tensor indices input by adding a new constant layer to the network. ShapedWeights& weights = input.weights(); + + auto const existingConstantLayer = ctx->getConstantLayer(weights.getName()); + if (existingConstantLayer != nullptr) + { + return *(existingConstantLayer->getOutput(0)); + } // Note the TRT doesn't natively handle boolean weights. First create an INT32 weights copy of the boolean weights, // then cast it back to bool within TRT. if (weights.type == ::ONNX_NAMESPACE::TensorProto::BOOL) @@ -1175,13 +1173,18 @@ nvinfer1::IPluginCreator* importPluginCreator( nvinfer1::IPluginCreator* creator = nullptr; #if ENABLE_STD_PLUGIN - creator = getPluginRegistry()->getPluginCreator(pluginName.c_str(), pluginVersion.c_str(), pluginNamespace.c_str()); + auto pluginRegistry = nvinfer1::getBuilderPluginRegistry(nvinfer1::EngineCapability::kSTANDARD); + if (pluginRegistry != nullptr) + { + creator = pluginRegistry->getPluginCreator(pluginName.c_str(), pluginVersion.c_str(), pluginNamespace.c_str()); + } #endif // ENABLE_STD_PLUGIN #if ENABLE_SAFE_PLUGIN - if (creator == nullptr && nvinfer1::safe::getSafePluginRegistry() != nullptr) + auto safetyPluginRegistry = nvinfer1::getBuilderPluginRegistry(nvinfer1::EngineCapability::kSAFETY); + if (creator == nullptr && safetyPluginRegistry != nullptr) { - creator = nvinfer1::safe::getSafePluginRegistry()->getPluginCreator( + creator = safetyPluginRegistry->getPluginCreator( pluginName.c_str(), pluginVersion.c_str(), pluginNamespace.c_str()); } #endif // ENABLE_SAFE_PLUGIN @@ -1209,13 +1212,6 @@ bool isDynamic(const nvinfer1::Dims& shape) return std::any_of(shape.d, shape.d + shape.nbDims, [](int dim) { return dim < 0; }); } -bool isOnnxTensorEmpty(const ::ONNX_NAMESPACE::TensorProto& onnxTensor) -{ - return onnxTensor.raw_data().empty() && onnxTensor.double_data().empty() && onnxTensor.float_data().empty() - && onnxTensor.int32_data().empty() && onnxTensor.int64_data().empty() && onnxTensor.string_data().empty() - && onnxTensor.uint64_data().empty(); -} - bool isTransposeRequired(nvinfer1::Dims const& shape, nvinfer1::Permutation const& perm) { int ndim = shape.nbDims; @@ -1312,7 +1308,7 @@ NodeImportResult lstmLegacyImporter( auto* source = reinterpret_cast(weights.values); std::vector buffer; buffer.resize(len); - for (int i = 0; i < num_directions; i++) + for (int d = 0; d < num_directions; d++) { for (int j = 0; j < batch_size; j++) { @@ -1320,10 +1316,10 @@ NodeImportResult lstmLegacyImporter( { for (int b = 0; b < dtype_size; b++) { - int src_idx = i * batch_size * hidden_size * dtype_size + j * hidden_size * dtype_size + int src_idx = d * batch_size * hidden_size * dtype_size + j * hidden_size * dtype_size + k * dtype_size + b; int buf_idx = j * num_directions * hidden_size * dtype_size - + i * hidden_size * dtype_size + k * dtype_size + b; + + d * hidden_size * dtype_size + k * dtype_size + b; buffer.at(buf_idx) = source[src_idx]; } } @@ -1558,6 +1554,11 @@ bool parseExternalWeights(IImporterContext* ctx, std::string file, std::string p // The weight paths in the ONNX model are relative paths to the main ONNX file. #ifdef _MSC_VER size_t slash = path.rfind("\\"); + // When using WSL path can have "\" or "/". Need to check both options here. + if (slash == std::string::npos) + { + slash = path.rfind("/"); + } #else size_t slash = path.rfind("/"); #endif @@ -1622,7 +1623,6 @@ NodeImportResult poolingHelper(IImporterContext* ctx, ::ONNX_NAMESPACE::NodeProt bool ceilMode(false); if (ctx->getOpsetVersion() >= 10) { - OnnxAttrs attrs(node, ctx); ceilMode = static_cast(attrs.get("ceil_mode", 0)); const auto dilations = attrs.get>("dilations", std::vector(2, 1)); for (size_t i = 0; i < dilations.size(); i++) @@ -1833,96 +1833,6 @@ nvinfer1::ITensor* sliceAcrossAxis( return output; } -bool supportsShapeTensor(nvinfer1::LayerType type, nvinfer1::ElementWiseOperation eleOp, - nvinfer1::ReduceOperation redOp, nvinfer1::FillOperation fillOp) -{ - switch (type) - { - // Layers that allow shape tensor output - case nvinfer1::LayerType::kCONCATENATION: - case nvinfer1::LayerType::kCONDITION: - case nvinfer1::LayerType::kCONDITIONAL_INPUT: - case nvinfer1::LayerType::kCONDITIONAL_OUTPUT: - case nvinfer1::LayerType::kCONSTANT: - case nvinfer1::LayerType::kGATHER: - case nvinfer1::LayerType::kIDENTITY: - case nvinfer1::LayerType::kPADDING: - case nvinfer1::LayerType::kSCATTER: - case nvinfer1::LayerType::kSELECT: - case nvinfer1::LayerType::kSHAPE: - case nvinfer1::LayerType::kSHUFFLE: - case nvinfer1::LayerType::kSLICE: return true; - // Layers that do not allow shape tensor output - case nvinfer1::LayerType::kACTIVATION: - case nvinfer1::LayerType::kASSERTION: - case nvinfer1::LayerType::kCONVOLUTION: - case nvinfer1::LayerType::kDECONVOLUTION: - case nvinfer1::LayerType::kDEQUANTIZE: - case nvinfer1::LayerType::kEINSUM: - case nvinfer1::LayerType::kFULLY_CONNECTED: - case nvinfer1::LayerType::kITERATOR: - case nvinfer1::LayerType::kLOOP_OUTPUT: - case nvinfer1::LayerType::kLRN: - case nvinfer1::LayerType::kMATRIX_MULTIPLY: - case nvinfer1::LayerType::kPARAMETRIC_RELU: - case nvinfer1::LayerType::kPLUGIN: - case nvinfer1::LayerType::kPLUGIN_V2: - case nvinfer1::LayerType::kPOOLING: - case nvinfer1::LayerType::kQUANTIZE: - case nvinfer1::LayerType::kRAGGED_SOFTMAX: - case nvinfer1::LayerType::kRECURRENCE: - case nvinfer1::LayerType::kRESIZE: - case nvinfer1::LayerType::kRNN_V2: - case nvinfer1::LayerType::kSCALE: - case nvinfer1::LayerType::kSOFTMAX: - case nvinfer1::LayerType::kTRIP_LIMIT: - case nvinfer1::LayerType::kTOPK: - case nvinfer1::LayerType::kUNARY: return false; - // Layers that have partial shape tensor output support - case nvinfer1::LayerType::kELEMENTWISE: - switch (eleOp) - { - // Supported elementwise operations - case nvinfer1::ElementWiseOperation::kAND: - case nvinfer1::ElementWiseOperation::kDIV: - case nvinfer1::ElementWiseOperation::kEQUAL: - case nvinfer1::ElementWiseOperation::kFLOOR_DIV: - case nvinfer1::ElementWiseOperation::kGREATER: - case nvinfer1::ElementWiseOperation::kLESS: - case nvinfer1::ElementWiseOperation::kMAX: - case nvinfer1::ElementWiseOperation::kMIN: - case nvinfer1::ElementWiseOperation::kOR: - case nvinfer1::ElementWiseOperation::kPROD: - case nvinfer1::ElementWiseOperation::kSUB: - case nvinfer1::ElementWiseOperation::kSUM: - case nvinfer1::ElementWiseOperation::kXOR: return true; - // Unsupported elementwise operations - case nvinfer1::ElementWiseOperation::kPOW: return false; - } - return false; - case nvinfer1::LayerType::kREDUCE: - switch (redOp) - { - // Supported reduce operations - case nvinfer1::ReduceOperation::kSUM: - case nvinfer1::ReduceOperation::kMAX: - case nvinfer1::ReduceOperation::kMIN: - case nvinfer1::ReduceOperation::kPROD: return true; - // Unsupported reduce operations - case nvinfer1::ReduceOperation::kAVG: return false; - } - return false; - case nvinfer1::LayerType::kFILL: - switch (fillOp) - { - case nvinfer1::FillOperation::kLINSPACE: return true; - case nvinfer1::FillOperation::kRANDOM_UNIFORM: return false; - } - return false; - } - return false; -} - nvinfer1::ITensor* squeezeTensor(IImporterContext* ctx, const ::ONNX_NAMESPACE::NodeProto& node, nvinfer1::ITensor& tensor, const std::vector& axes, bool regLayer) { @@ -2201,6 +2111,27 @@ nvinfer1::ITensor* unsqueezeTensor(IImporterContext* ctx, const ::ONNX_NAMESPACE return unsqueezeLayer->getOutput(0); } +nvinfer1::ITensor* resizeShapeTensor(IImporterContext* ctx, nvinfer1::ITensor& input, TensorOrWeights& scales) +{ + // Create below subnetwork for processing resize scale tensor or weights. + // clang-format off + // scale weights (convert to tensor) or scale tensor -> elementwise mul -> transformation(floor, ceil, round) -> identity (cast to int) -> resize shape tensor + // input -> shapeof -> identity (cast to float) -> + // clang-format on + auto* floatCast = ctx->network()->addIdentity(shapeOf(input).tensor(ctx)); + floatCast->setOutputType(0, nvinfer1::DataType::kFLOAT); + auto* inputShapeTensor = floatCast->getOutput(0); + + auto& scaleTensor = convertToTensor(scales, ctx); + auto* prod = ctx->network() + ->addElementWise(scaleTensor, *inputShapeTensor, nvinfer1::ElementWiseOperation::kPROD) + ->getOutput(0); + auto* floor = ctx->network()->addUnary(*prod, nvinfer1::UnaryOperation::kFLOOR)->getOutput(0); + auto* intCast = ctx->network()->addIdentity(*floor); + intCast->setOutputType(0, nvinfer1::DataType::kINT32); + return intCast->getOutput(0); +} + int64_t volume(const nvinfer1::Dims& dims) { std::for_each( diff --git a/onnx2trt_utils.hpp b/onnx2trt_utils.hpp index 0a9e12c0..11faaeaa 100644 --- a/onnx2trt_utils.hpp +++ b/onnx2trt_utils.hpp @@ -14,15 +14,18 @@ #include // For std::memcpy #include +#include #include #include -#include +#include +#include #define LOG(msg, severity) \ do \ { \ std::stringstream ss{}; \ - if (severity <= nvinfer1::ILogger::Severity::kWARNING) ss << __FILENAME__ << ":" << __LINE__ << ": "; \ + if (severity <= nvinfer1::ILogger::Severity::kWARNING) \ + ss << __FILENAME__ << ":" << __LINE__ << ": "; \ ss << msg; \ ctx->logger().log(severity, ss.str().c_str()); \ } while (0) @@ -98,7 +101,9 @@ inline nvinfer1::IConstantLayer* addConstantScalar( assert(volume(shape) == 1 && "Cannot add constant scalar with a shape that has volume > 1"); ShapedWeights scalarWeights = ctx->createTempWeights(type, shape); static_cast(scalarWeights.values)[0] = static_cast(scalar); - return ctx->network()->addConstant(scalarWeights.shape, scalarWeights); + nvinfer1::IConstantLayer* l = ctx->network()->addConstant(scalarWeights.shape, scalarWeights); + ctx->network()->setWeightsName(scalarWeights, scalarWeights.getName()); + return l; } // Helper function to create a tensor given a vector of values and a shape. @@ -110,7 +115,9 @@ inline nvinfer1::IConstantLayer* addConstant( assert(sizeof(ScalarType) == getDtypeSize(type) && "ONNX dtype does not have the same size as the value type"); ShapedWeights weights = ctx->createTempWeights(type, shape); std::memcpy(weights.values, values.data(), values.size() * sizeof(ScalarType)); - return ctx->network()->addConstant(weights.shape, weights); + nvinfer1::IConstantLayer* l = ctx->network()->addConstant(weights.shape, weights); + ctx->network()->setWeightsName(weights, weights.getName()); + return l; } enum ScaleOp @@ -148,12 +155,12 @@ Status isBroadcastValid(IImporterContext* ctx, const nvinfer1::Dims& firstShape, std::vector calculateBias( const nvinfer1::Dims& daDims, const nvinfer1::Dims& idxDims, const std::vector& pitches, int32_t axis); -// Helper function to calculate and return a vector representation of the pitches of a given shape -std::vector calculatePitches(const nvinfer1::Dims& inputDims); - // Helper function to check that linear resize can be used bool canUseLinearResize(const size_t scaleSize, const float* scaleFactors); +// Helper function to calculate and return a vector representation of the pitches of a given shape +std::vector calculatePitches(const nvinfer1::Dims& inputDims); + // Helper function to add a Cast layer in the network nvinfer1::ITensor* castHelper(IImporterContext* ctx, nvinfer1::ITensor* input, nvinfer1::DataType dtype); @@ -254,9 +261,6 @@ nvinfer1::ITensor* globalPoolingHelper(IImporterContext* ctx, ::ONNX_NAMESPACE:: // Helper function to determine if a shape contains dynamic dimensions bool isDynamic(const nvinfer1::Dims& shape); -// Helper function to determine if a ONNX tensor is empty -bool isOnnxTensorEmpty(const ::ONNX_NAMESPACE::TensorProto& onnxTensor); - // Helper function to load a creator from the registry nvinfer1::IPluginCreator* importPluginCreator( const std::string& pluginName, const std::string& pluginVersion, const std::string& pluginNamespace = ""); @@ -307,10 +311,6 @@ void setAttr( nvinfer1::ITensor* sliceAcrossAxis( IImporterContext* ctx, const ::ONNX_NAMESPACE::NodeProto& node, nvinfer1::ITensor* data, const int axis); -// Helper function to filter out shape tensor outputs for layers that do not support it -bool supportsShapeTensor(nvinfer1::LayerType type, nvinfer1::ElementWiseOperation eleOp, - nvinfer1::ReduceOperation redOp, nvinfer1::FillOperation fillOp); - // Helper function to squeeze a tensor on a given set of axes nvinfer1::ITensor* squeezeTensor(IImporterContext* ctx, const ::ONNX_NAMESPACE::NodeProto& node, nvinfer1::ITensor& tensor, const std::vector& axes, bool regLayer = false); @@ -326,6 +326,10 @@ NodeImportResult unaryHelper(IImporterContext* ctx, const ::ONNX_NAMESPACE::Node nvinfer1::ITensor* unsqueezeTensor(IImporterContext* ctx, const ::ONNX_NAMESPACE::NodeProto& node, nvinfer1::ITensor& tensor, const std::vector& axes, bool regLayer = false); +// Helper function to calculate and return the expected output shape of a resize given the resize scale weights or scale +// tensor. +nvinfer1::ITensor* resizeShapeTensor(IImporterContext* ctx, nvinfer1::ITensor& input, TensorOrWeights& scales); + // Helper function to convert a ShapedWeights object into a vector template Status weightsToVector(TensorOrWeights weights, std::vector* weightVector) @@ -354,6 +358,40 @@ Status weightsToVector(TensorOrWeights weights, std::vector* weightV return Status(ErrorCode::kSUCCESS); } +template +ShapedWeights::DataType getShapeWeightsDataType() +{ + static const std::unordered_map tMap( + {{std::type_index(typeid(bool)), ::ONNX_NAMESPACE::TensorProto::BOOL}, + {std::type_index(typeid(int8_t)), ::ONNX_NAMESPACE::TensorProto::INT8}, + {std::type_index(typeid(uint8_t)), ::ONNX_NAMESPACE::TensorProto::UINT8}, + {std::type_index(typeid(int16_t)), ::ONNX_NAMESPACE::TensorProto::INT16}, + {std::type_index(typeid(uint16_t)), ::ONNX_NAMESPACE::TensorProto::UINT16}, + {std::type_index(typeid(int32_t)), ::ONNX_NAMESPACE::TensorProto::INT32}, + {std::type_index(typeid(uint32_t)), ::ONNX_NAMESPACE::TensorProto::UINT32}, + {std::type_index(typeid(int64_t)), ::ONNX_NAMESPACE::TensorProto::INT64}, + {std::type_index(typeid(uint64_t)), ::ONNX_NAMESPACE::TensorProto::UINT64}, + {std::type_index(typeid(float)), ::ONNX_NAMESPACE::TensorProto::FLOAT}, + {std::type_index(typeid(double)), ::ONNX_NAMESPACE::TensorProto::DOUBLE}}); + + if (tMap.find(std::type_index(typeid(T))) != tMap.end()) + { + return tMap.at(std::type_index(typeid(T))); + } + return ::ONNX_NAMESPACE::TensorProto::UNDEFINED; +} + +// Helper function to convert a vector object into a ShapedWeights object +template +Status vectorToWeights(std::vector& weightVector, TensorOrWeights* weights) +{ + nvinfer1::Dims shape{1, {static_cast(weightVector.size())}}; + ShapedWeights::DataType dtype = getShapeWeightsDataType(); + ASSERT(dtype != ::ONNX_NAMESPACE::TensorProto::UNDEFINED && "Unsupported datatype", ErrorCode::kINVALID_VALUE); + *weights = ShapedWeights(dtype, weightVector.data(), shape); + return Status(ErrorCode::kSUCCESS); +} + // Helper function to convert ONNX node name. If no node name, using name of first output. const std::string getNodeName(const ::ONNX_NAMESPACE::NodeProto& node); @@ -372,8 +410,25 @@ ShapeTensor axesToInterlaceSubscripts(const ShapeTensor& axes, int nbDims); //! Helper function to add SoftMax layer. nvinfer1::ITensor* addSoftmax(IImporterContext* ctx, const ::ONNX_NAMESPACE::NodeProto& node, nvinfer1::ITensor& input); -// Helper function to import ONNX scatter nodes into TRT +//! Helper function to import ONNX scatter nodes into TRT NodeImportResult addScatterLayer( IImporterContext* ctx, const ::ONNX_NAMESPACE::NodeProto& node, std::vector& inputs, nvinfer1::ScatterMode mode, int32_t axis = 0); +//! RAII wrapper for IImporterContext::pushBaseNameScope() and popBaseNameScope(). +class NameScope +{ +public: + NameScope(IImporterContext& context) + : mContext(context) + { + mContext.pushBaseNameScope(); + } + ~NameScope() + { + mContext.popBaseNameScope(); + } +private: + IImporterContext& mContext; +}; + } // namespace onnx2trt diff --git a/onnx_tensorrt/__init__.py b/onnx_tensorrt/__init__.py index e241103d..6c2aa2a7 100644 --- a/onnx_tensorrt/__init__.py +++ b/onnx_tensorrt/__init__.py @@ -4,4 +4,4 @@ from . import backend -__version__ = "8.2.1" +__version__ = "8.4.1" diff --git a/onnx_utils.hpp b/onnx_utils.hpp index 715e0e15..67550230 100644 --- a/onnx_utils.hpp +++ b/onnx_utils.hpp @@ -1,4 +1,4 @@ -/* + /* * SPDX-License-Identifier: Apache-2.0 */ @@ -151,13 +151,13 @@ inline bool ParseFromFile_WAR(google::protobuf::Message* msg, const char* filena google::protobuf::io::IstreamInputStream rawInput(&stream); google::protobuf::io::CodedInputStream coded_input(&rawInput); - #if GOOGLE_PROTOBUF_VERSION >= 3011000 +#if GOOGLE_PROTOBUF_VERSION >= 3011000 // Starting Protobuf 3.11 accepts only single parameter. coded_input.SetTotalBytesLimit(std::numeric_limits::max()); - #else +#else // Note: This WARs the very low default size limit (64MB) coded_input.SetTotalBytesLimit(std::numeric_limits::max(), std::numeric_limits::max() / 4); - #endif +#endif return msg->ParseFromCodedStream(&coded_input); }