blob: fd089c17336851470fd7fa6a1145918403ca5d46 [file] [log] [blame]
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This file provides general C++ utility functions in TFLite.
// For example: Converting between `TfLiteIntArray`, `std::vector` and
// Flatbuffer vectors. These functions can't live in `context.h` since it's pure
// C.
#ifndef TENSORFLOW_LITE_UTIL_H_
#define TENSORFLOW_LITE_UTIL_H_
#include <stddef.h>
#include <initializer_list>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
// Memory allocation parameter used by ArenaPlanner.
// Clients (such as delegates) might look at this to ensure interop between
// TFLite memory & hardware buffers.
// NOTE: This only holds for tensors allocated on the arena.
constexpr int kDefaultTensorAlignment = 64;
// The prefix of Flex op custom code.
// This will be matched agains the `custom_code` field in `OperatorCode`
// Flatbuffer Table.
// WARNING: This is an experimental API and subject to change.
constexpr char kFlexCustomCodePrefix[] = "Flex";
// Checks whether the prefix of the custom name indicates the operation is an
// Flex operation.
bool IsFlexOp(const char* custom_name);
// Converts a `std::vector` to a `TfLiteIntArray`. The caller takes ownership
// of the returned pointer.
TfLiteIntArray* ConvertVectorToTfLiteIntArray(const std::vector<int>& input);
// Converts an array (of the given size) to a `TfLiteIntArray`. The caller
// takes ownership of the returned pointer, and must make sure 'dims' has at
// least 'ndims' elements.
TfLiteIntArray* ConvertArrayToTfLiteIntArray(int ndims, const int* dims);
// Checks whether a `TfLiteIntArray` and an int array have matching elements.
// The caller must guarantee that 'b' has at least 'b_size' elements.
bool EqualArrayAndTfLiteIntArray(const TfLiteIntArray* a, int b_size,
const int* b);
size_t CombineHashes(std::initializer_list<size_t> hashes);
// Populates the size in bytes of a type into `bytes`. Returns kTfLiteOk for
// valid types, and kTfLiteError otherwise.
TfLiteStatus GetSizeOfType(TfLiteContext* context, const TfLiteType type,
size_t* bytes);
// Creates a stub TfLiteRegistration instance with the provided
// `custom_op_name`. The op will fail if invoked, and is useful as a
// placeholder to defer op resolution.
// Note that `custom_op_name` must remain valid for the returned op's lifetime..
TfLiteRegistration CreateUnresolvedCustomOp(const char* custom_op_name);
// Checks whether the provided op is an unresolved custom op.
bool IsUnresolvedCustomOp(const TfLiteRegistration& registration);
// Returns a descriptive name with the given op TfLiteRegistration.
std::string GetOpNameByRegistration(const TfLiteRegistration& registration);
// The prefix of a validation subgraph name.
// WARNING: This is an experimental API and subject to change.
constexpr char kValidationSubgraphNamePrefix[] = "VALIDATION:";
// Checks whether the prefix of the subgraph name indicates the subgraph is a
// validation subgraph.
bool IsValidationSubgraph(const char* name);
// Multiply two sizes and return true if overflow occurred;
// This is based off tensorflow/overflow.h but is simpler as we already
// have unsigned numbers. It is also generalized to work where sizeof(size_t)
// is not 8.
TfLiteStatus MultiplyAndCheckOverflow(size_t a, size_t b, size_t* product);
// Returns whether the TfLiteTensor is a resource or variant tensor.
inline bool IsResourceOrVariant(const TfLiteTensor* tensor) {
return tensor->type == kTfLiteResource || tensor->type == kTfLiteVariant;
}
// Compute the number of bytes required to represent a tensor with dimensions
// specified by the array dims (of length dims_size). Returns the status code
// and bytes.
TfLiteStatus BytesRequired(TfLiteType type, const int* dims, size_t dims_size,
size_t* bytes, TfLiteContext* context);
/// UNIQUE PTR WRAPPERS ///
struct TfLiteIntArrayDeleter {
void operator()(TfLiteIntArray* a) {
if (a) {
TfLiteIntArrayFree(a);
}
}
};
struct TfLiteTensorDeleter {
void operator()(TfLiteTensor* t) {
if (t) {
TfLiteTensorFree(t);
}
free(t);
}
};
// `unique_ptr` wrapper for `TfLiteIntArray`s.
using IntArrayUniquePtr =
std::unique_ptr<TfLiteIntArray, TfLiteIntArrayDeleter>;
IntArrayUniquePtr BuildTfLiteIntArray(const std::vector<int>& data);
// `unique_ptr` wrapper for `TfLiteTensor`s.
using TensorUniquePtr = std::unique_ptr<TfLiteTensor, TfLiteTensorDeleter>;
TensorUniquePtr BuildTfLiteTensor();
TensorUniquePtr BuildTfLiteTensor(TfLiteType type, const std::vector<int>& dims,
TfLiteAllocationType allocation_type);
TensorUniquePtr BuildTfLiteTensor(TfLiteType type, IntArrayUniquePtr dims,
TfLiteAllocationType allocation_type);
} // namespace tflite
#endif // TENSORFLOW_LITE_UTIL_H_