lite/c/c_api.h
C API for TensorFlow Lite.
Summary
The API leans towards simplicity and uniformity instead of convenience, as most usage will be by language-specific wrappers. It provides largely the same set of functionality as that of the C++ TensorFlow Lite Interpreter
API, but is useful for shared libraries where having a stable ABI boundary is important.
Conventions:
- We use the prefix TfLite for everything in the API.
- size_t is used to represent byte sizes of objects that are materialized in the address space of the calling process.
- int is used as an index into arrays.
Usage:
// Create the model and interpreter options.
TfLiteModel* model = TfLiteModelCreateFromFile("/path/to/model.tflite");
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsSetNumThreads(options, 2);
// Create the interpreter.
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
// Allocate tensors and populate the input tensor data.
TfLiteInterpreterAllocateTensors(interpreter);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float));
// Execute inference.
TfLiteInterpreterInvoke(interpreter);
// Extract the output tensor data.
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float));
// Dispose of the model and interpreter objects.
TfLiteInterpreterDelete(interpreter);
TfLiteInterpreterOptionsDelete(options);
TfLiteModelDelete(model);
to access the APIs documented on this page.
Typedefs |
|
---|---|
TfLiteInterpreter
|
typedefstruct TfLiteInterpreter
TfLiteInterpreter provides inference from a provided model. |
TfLiteInterpreterOptions
|
typedefstruct TfLiteInterpreterOptions
TfLiteInterpreterOptions allows customized interpreter configuration. |
TfLiteModel
|
typedefstruct TfLiteModel
TfLiteModel wraps a loaded TensorFlow Lite model. |
TfLiteSignatureRunner
|
typedefstruct TfLiteSignatureRunner
TfLiteSignatureRunner is used to run inference on a signature. |
TfLiteTensor
|
typedefstruct TfLiteTensor
A tensor in the interpreter system which is a wrapper around a buffer of data including a dimensionality (or NULL if not currently defined). |
Functions |
|
---|---|
TfLiteExtensionApisVersion(void)
|
TFL_CAPI_EXPORT const char *
The TensorFlow Lite Extension APIs version.
|
TfLiteInterpreterAllocateTensors(TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT TfLiteStatus
Updates allocations for all tensors, resizing dependent tensors using the specified input tensor dimensionality.
|
TfLiteInterpreterCancel(const TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT TfLiteStatus
Tries to cancel any in-flight invocation.
|
TfLiteInterpreterCreate(const TfLiteModel *model, const TfLiteInterpreterOptions *optional_options)
|
TFL_CAPI_EXPORT TfLiteInterpreter *
Returns a new interpreter using the provided model and options, or null on failure.
|
TfLiteInterpreterDelete(TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT void
Destroys the interpreter.
|
TfLiteInterpreterGetInputTensor(const TfLiteInterpreter *interpreter, int32_t input_index)
|
TFL_CAPI_EXPORT TfLiteTensor *
Returns the tensor associated with the input index.
|
TfLiteInterpreterGetInputTensorCount(const TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT int32_t
Returns the number of input tensors associated with the model.
|
TfLiteInterpreterGetOutputTensor(const TfLiteInterpreter *interpreter, int32_t output_index)
|
TFL_CAPI_EXPORT const TfLiteTensor *
Returns the tensor associated with the output index.
|
TfLiteInterpreterGetOutputTensorCount(const TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT int32_t
Returns the number of output tensors associated with the model.
|
TfLiteInterpreterGetSignatureCount(const TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT int32_t
SignatureRunner APIs |
TfLiteInterpreterGetSignatureKey(const TfLiteInterpreter *interpreter, int32_t signature_index)
|
TFL_CAPI_EXPORT const char *
Returns the key of the Nth signature in the model, where N is specified as
signature_index . |
TfLiteInterpreterGetSignatureRunner(const TfLiteInterpreter *interpreter, const char *signature_key)
|
TFL_CAPI_EXPORT TfLiteSignatureRunner *
Returns a new signature runner using the provided interpreter and signature key, or nullptr on failure.
|
TfLiteInterpreterGetTensor(const TfLiteInterpreter *interpreter, int index)
|
TFL_CAPI_EXPORT TfLiteTensor *
Returns modifiable access to the tensor that corresponds to the specified
index and is associated with the provided interpreter . |
TfLiteInterpreterInputTensorIndices(const TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT const int *
Returns a pointer to an array of input tensor indices.
|
TfLiteInterpreterInvoke(TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT TfLiteStatus
Runs inference for the loaded graph.
|
TfLiteInterpreterOptionsAddDelegate(TfLiteInterpreterOptions *options, TfLiteOpaqueDelegate *delegate)
|
TFL_CAPI_EXPORT void
Adds a delegate to be applied during
TfLiteInterpreter creation. |
TfLiteInterpreterOptionsAddRegistrationExternal(TfLiteInterpreterOptions *options, TfLiteRegistrationExternal *registration)
|
TFL_CAPI_EXPORT void
Adds an op registration to be applied during
TfLiteInterpreter creation. |
TfLiteInterpreterOptionsCopy(const TfLiteInterpreterOptions *from)
|
TFL_CAPI_EXPORT TfLiteInterpreterOptions *
Creates and returns a shallow copy of an options object.
|
TfLiteInterpreterOptionsCreate()
|
TFL_CAPI_EXPORT TfLiteInterpreterOptions *
Returns a new interpreter options instances.
|
TfLiteInterpreterOptionsDelete(TfLiteInterpreterOptions *options)
|
TFL_CAPI_EXPORT void
Destroys the interpreter options instance.
|
TfLiteInterpreterOptionsEnableCancellation(TfLiteInterpreterOptions *options, bool enable)
|
TFL_CAPI_EXPORT TfLiteStatus
Enables users to cancel in-flight invocations with
TfLiteInterpreterCancel . |
TfLiteInterpreterOptionsSetErrorReporter(TfLiteInterpreterOptions *options, void(*)(void *user_data, const char *format, va_list args) reporter, void *user_data)
|
TFL_CAPI_EXPORT void
Sets a custom error reporter for interpreter execution.
|
TfLiteInterpreterOptionsSetNumThreads(TfLiteInterpreterOptions *options, int32_t num_threads)
|
TFL_CAPI_EXPORT void
Sets the number of CPU threads to use for the interpreter.
|
TfLiteInterpreterOutputTensorIndices(const TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT const int *
Returns a pointer to an array of output tensor indices.
|
TfLiteInterpreterResizeInputTensor(TfLiteInterpreter *interpreter, int32_t input_index, const int *input_dims, int32_t input_dims_size)
|
TFL_CAPI_EXPORT TfLiteStatus
Resizes the specified input tensor.
|
TfLiteModelCreate(const void *model_data, size_t model_size)
|
TFL_CAPI_EXPORT TfLiteModel *
Returns a model from the provided buffer, or null on failure.
|
TfLiteModelCreateFromFile(const char *model_path)
|
TFL_CAPI_EXPORT TfLiteModel *
Returns a model from the provided file, or null on failure.
|
TfLiteModelCreateFromFileWithErrorReporter(const char *model_path, void(*)(void *user_data, const char *format, va_list args) reporter, void *user_data)
|
TFL_CAPI_EXPORT TfLiteModel *
Same as
TfLiteModelCreateFromFile with customizble error reporter. |
TfLiteModelCreateWithErrorReporter(const void *model_data, size_t model_size, void(*)(void *user_data, const char *format, va_list args) reporter, void *user_data)
|
TFL_CAPI_EXPORT TfLiteModel *
Same as
TfLiteModelCreate with customizble error reporter. |
TfLiteModelDelete(TfLiteModel *model)
|
TFL_CAPI_EXPORT void
Destroys the model instance.
|
TfLiteSchemaVersion(void)
|
TFL_CAPI_EXPORT int
The supported TensorFlow Lite model file Schema version.
|
TfLiteSignatureRunnerAllocateTensors(TfLiteSignatureRunner *signature_runner)
|
TFL_CAPI_EXPORT TfLiteStatus
Updates allocations for tensors associated with a signature and resizes dependent tensors using the specified input tensor dimensionality.
|
TfLiteSignatureRunnerDelete(TfLiteSignatureRunner *signature_runner)
|
TFL_CAPI_EXPORT void
Destroys the signature runner.
|
TfLiteSignatureRunnerGetInputCount(const TfLiteSignatureRunner *signature_runner)
|
TFL_CAPI_EXPORT size_t
Returns the number of inputs associated with a signature.
|
TfLiteSignatureRunnerGetInputName(const TfLiteSignatureRunner *signature_runner, int32_t input_index)
|
TFL_CAPI_EXPORT const char *
Returns the (null-terminated) name of the Nth input in a signature, where N is specified as
input_index . |
TfLiteSignatureRunnerGetInputTensor(TfLiteSignatureRunner *signature_runner, const char *input_name)
|
TFL_CAPI_EXPORT TfLiteTensor *
Returns the input tensor identified by
input_name in the given signature. |
TfLiteSignatureRunnerGetOutputCount(const TfLiteSignatureRunner *signature_runner)
|
TFL_CAPI_EXPORT size_t
Returns the number of output tensors associated with the signature.
|
TfLiteSignatureRunnerGetOutputName(const TfLiteSignatureRunner *signature_runner, int32_t output_index)
|
TFL_CAPI_EXPORT const char *
Returns the (null-terminated) name of the Nth output in a signature, where N is specified as
output_index . |
TfLiteSignatureRunnerGetOutputTensor(const TfLiteSignatureRunner *signature_runner, const char *output_name)
|
TFL_CAPI_EXPORT const TfLiteTensor *
Returns the output tensor identified by
output_name in the given signature. |
TfLiteSignatureRunnerInvoke(TfLiteSignatureRunner *signature_runner)
|
TFL_CAPI_EXPORT TfLiteStatus
Runs inference on a given signature.
|
TfLiteSignatureRunnerResizeInputTensor(TfLiteSignatureRunner *signature_runner, const char *input_name, const int *input_dims, int32_t input_dims_size)
|
TFL_CAPI_EXPORT TfLiteStatus
Resizes the input tensor identified as
input_name to be the dimensions specified by input_dims and input_dims_size . |
TfLiteTensorByteSize(const TfLiteTensor *tensor)
|
TFL_CAPI_EXPORT size_t
Returns the size of the underlying data in bytes.
|
TfLiteTensorCopyFromBuffer(TfLiteTensor *tensor, const void *input_data, size_t input_data_size)
|
TFL_CAPI_EXPORT TfLiteStatus
Copies from the provided input buffer into the tensor's buffer.
|
TfLiteTensorCopyToBuffer(const TfLiteTensor *output_tensor, void *output_data, size_t output_data_size)
|
TFL_CAPI_EXPORT TfLiteStatus
Copies to the provided output buffer from the tensor's buffer.
|
TfLiteTensorData(const TfLiteTensor *tensor)
|
TFL_CAPI_EXPORT void *
Returns a pointer to the underlying data buffer.
|
TfLiteTensorDim(const TfLiteTensor *tensor, int32_t dim_index)
|
TFL_CAPI_EXPORT int32_t
Returns the length of the tensor in the "dim_index" dimension.
|
TfLiteTensorName(const TfLiteTensor *tensor)
|
TFL_CAPI_EXPORT const char *
Returns the (null-terminated) name of the tensor.
|
TfLiteTensorNumDims(const TfLiteTensor *tensor)
|
TFL_CAPI_EXPORT int32_t
Returns the number of dimensions that the tensor has.
|
TfLiteTensorQuantizationParams(const TfLiteTensor *tensor)
|
TFL_CAPI_EXPORT TfLiteQuantizationParams
Returns the parameters for asymmetric quantization.
|
TfLiteTensorType(const TfLiteTensor *tensor)
|
TFL_CAPI_EXPORT TfLiteType
Returns the type of a tensor element.
|
TfLiteVersion(void)
|
TFL_CAPI_EXPORT const char *
The TensorFlow Lite Runtime version.
|
Typedefs
TfLiteInterpreter
struct TfLiteInterpreter TfLiteInterpreter
TfLiteInterpreter provides inference from a provided model.
TfLiteInterpreterOptions
struct TfLiteInterpreterOptions TfLiteInterpreterOptions
TfLiteInterpreterOptions allows customized interpreter configuration.
TfLiteSignatureRunner
struct TfLiteSignatureRunner TfLiteSignatureRunner
TfLiteSignatureRunner is used to run inference on a signature.
Note: A signature is used to define a computation in a TF model. A model can have multiple signatures. Each signature contains three components:
- Signature Key: A unique string to identify a signature
- Inputs: A list of names, each mapped to an input tensor of a signature
- Outputs: A list of names, each mapped to an output tensor of a signature
To learn more about signatures in TFLite, refer to: https://www.tensorflow.org/lite/guide/signatures
Using the TfLiteSignatureRunner, for a particular signature, you can set its inputs, invoke (i.e. execute) the computation, and retrieve its outputs.
TfLiteTensor
struct TfLiteTensor TfLiteTensor
A tensor in the interpreter system which is a wrapper around a buffer of data including a dimensionality (or NULL if not currently defined).
Functions
TfLiteExtensionApisVersion
TFL_CAPI_EXPORT const char * TfLiteExtensionApisVersion( void )
The TensorFlow Lite Extension APIs version.
Returns a pointer to a statically allocated string that is the version number of the TF Lite Extension APIs supported by the (potentially dynamically loaded) TF Lite Runtime library. The TF Lite "Extension APIs" are the APIs for extending TF Lite with custom ops and delegates. More specifically, this version number covers the (non-experimental) functionality documented in the following header files:
- lite/c/c_api_opaque.h
- lite/c/common.h
- lite/c/builtin_op_data.h
- lite/builtin_ops.h
This version number uses semantic versioning, and the return value should be in semver 2 format http://semver.org, starting with MAJOR.MINOR.PATCH, e.g. "2.14.0" or "2.15.0-rc2".
TfLiteInterpreterAllocateTensors
TFL_CAPI_EXPORT TfLiteStatus TfLiteInterpreterAllocateTensors( TfLiteInterpreter *interpreter )
Updates allocations for all tensors, resizing dependent tensors using the specified input tensor dimensionality.
This is a relatively expensive operation, and need only be called after creating the graph and/or resizing any inputs.
TfLiteInterpreterCancel
TFL_CAPI_EXPORT TfLiteStatus TfLiteInterpreterCancel( const TfLiteInterpreter *interpreter )
Tries to cancel any in-flight invocation.
Returns kTfLiteError if cancellation is not enabled via TfLiteInterpreterOptionsEnableCancellation
.
TfLiteInterpreterCreate
TFL_CAPI_EXPORT TfLiteInterpreter * TfLiteInterpreterCreate( const TfLiteModel *model, const TfLiteInterpreterOptions *optional_options )
Returns a new interpreter using the provided model and options, or null on failure.
model
must be a valid model instance. The caller retains ownership of the object, and may destroy it (via TfLiteModelDelete) immediately after creating the interpreter. However, if the TfLiteModel was allocated with TfLiteModelCreate, then themodel_data
buffer that was passed to TfLiteModelCreate must outlive the lifetime of the TfLiteInterpreter object that this function returns, and must not be modified during that time; and if the TfLiteModel was allocated with TfLiteModelCreateFromFile, then the contents of the model file must not be modified during the lifetime of the TfLiteInterpreter object that this function returns.optional_options
may be null. The caller retains ownership of the object, and can safely destroy it (via TfLiteInterpreterOptionsDelete) immediately after creating the interpreter.
TfLiteInterpreterDelete
TFL_CAPI_EXPORT void TfLiteInterpreterDelete( TfLiteInterpreter *interpreter )
Destroys the interpreter.
TfLiteInterpreterGetInputTensor
TFL_CAPI_EXPORT TfLiteTensor * TfLiteInterpreterGetInputTensor( const TfLiteInterpreter *interpreter, int32_t input_index )
Returns the tensor associated with the input index.
REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor)
TfLiteInterpreterGetInputTensorCount
TFL_CAPI_EXPORT int32_t TfLiteInterpreterGetInputTensorCount( const TfLiteInterpreter *interpreter )
Returns the number of input tensors associated with the model.
TfLiteInterpreterGetOutputTensor
TFL_CAPI_EXPORT const TfLiteTensor * TfLiteInterpreterGetOutputTensor( const TfLiteInterpreter *interpreter, int32_t output_index )
Returns the tensor associated with the output index.
REQUIRES: 0 <= output_index < TfLiteInterpreterGetOutputTensorCount(tensor)
TfLiteInterpreterGetOutputTensorCount
TFL_CAPI_EXPORT int32_t TfLiteInterpreterGetOutputTensorCount( const TfLiteInterpreter *interpreter )
Returns the number of output tensors associated with the model.
TfLiteInterpreterGetSignatureCount
TFL_CAPI_EXPORT int32_t TfLiteInterpreterGetSignatureCount( const TfLiteInterpreter *interpreter )
SignatureRunner APIs
You can run inference by either:
(i) (recommended) using the Interpreter to initialize SignatureRunner(s) and then only using SignatureRunner APIs.
(ii) only using Interpreter APIs.
NOTE:
- Only use one of the above options to run inference, i.e. avoid mixing both SignatureRunner APIs and Interpreter APIs to run inference as they share the same underlying data (e.g. updating an input tensor “A” retrieved using the Interpreter APIs will update the state of the input tensor “B” retrieved using SignatureRunner APIs, if they point to the same underlying tensor in the model; as it is not possible for a user to debug this by analyzing the code, it can lead to undesirable behavior).
- The TfLiteSignatureRunner type is conditionally thread-safe, provided that no two threads attempt to simultaneously access two TfLiteSignatureRunner instances that point to the same underlying signature, or access a TfLiteSignatureRunner and its underlying TfLiteInterpreter, unless all such simultaneous accesses are reads (rather than writes).
- The lifetime of a TfLiteSignatureRunner object ends when TfLiteSignatureRunnerDelete() is called on it (or when the lifetime of the underlying TfLiteInterpreter ends but you should call TfLiteSignatureRunnerDelete() before that happens in order to avoid resource leaks).
- You can only apply delegates to the interpreter (via TfLiteInterpreterOptions) and not to a signature. Returns the number of signatures defined in the model.
TfLiteInterpreterGetSignatureKey
TFL_CAPI_EXPORT const char * TfLiteInterpreterGetSignatureKey( const TfLiteInterpreter *interpreter, int32_t signature_index )
Returns the key of the Nth signature in the model, where N is specified as signature_index
.
NOTE: The lifetime of the returned key is the same as (and depends on) the lifetime of interpreter
.
TfLiteInterpreterGetSignatureRunner
TFL_CAPI_EXPORT TfLiteSignatureRunner * TfLiteInterpreterGetSignatureRunner( const TfLiteInterpreter *interpreter, const char *signature_key )
Returns a new signature runner using the provided interpreter and signature key, or nullptr on failure.
NOTE: signature_key
is a null-terminated C string that must match the key of a signature in the interpreter's model.
NOTE: The returned signature runner should be destroyed, by calling TfLiteSignatureRunnerDelete(), before the interpreter is destroyed.
TfLiteInterpreterGetTensor
TFL_CAPI_EXPORT TfLiteTensor * TfLiteInterpreterGetTensor( const TfLiteInterpreter *interpreter, int index )
Returns modifiable access to the tensor that corresponds to the specified index
and is associated with the provided interpreter
.
This requires the index
to be between 0 and N - 1, where N is the number of tensors in the model.
Typically the tensors associated with the interpreter
would be set during the interpreter
initialization, through a mechanism like the InterpreterBuilder
, and remain unchanged throughout the lifetime of the interpreter. However, there are some circumstances in which the pointer may not remain valid throughout the lifetime of the interpreter, because calls to AddTensors
on the interpreter invalidate the returned pointer.
Note the difference between this function and TfLiteInterpreterGetInputTensor
(or TfLiteInterpreterGetOutputTensor
for that matter): TfLiteInterpreterGetTensor
takes an index into the array of all tensors associated with the interpreter
's model, whereas TfLiteInterpreterGetInputTensor
takes an index into the array of input tensors.
The ownership of the tensor remains with the TFLite runtime, meaning the caller should not deallocate the pointer.
TfLiteInterpreterInputTensorIndices
TFL_CAPI_EXPORT const int * TfLiteInterpreterInputTensorIndices( const TfLiteInterpreter *interpreter )
Returns a pointer to an array of input tensor indices.
The length of the array can be obtained via a call to TfLiteInterpreterGetInputTensorCount
.
Typically the input tensors associated with an interpreter
would be set during the initialization of the interpreter
, through a mechanism like the InterpreterBuilder
, and remain unchanged throughout the lifetime of the interpreter. However, there are some circumstances in which the pointer may not remain valid throughout the lifetime of the interpreter, because calls to SetInputs
on the interpreter invalidate the returned pointer.
The ownership of the array remains with the TFLite runtime.
TfLiteInterpreterInvoke
TFL_CAPI_EXPORT TfLiteStatus TfLiteInterpreterInvoke( TfLiteInterpreter *interpreter )
Runs inference for the loaded graph.
Before calling this function, the caller should first invoke TfLiteInterpreterAllocateTensors() and should also set the values for the input tensors. After successfully calling this function, the values for the output tensors will be set.
If the (experimental!) delegate fallback option was enabled in the interpreter options, then the interpreter will automatically fall back to not using any delegates if execution with delegates fails. For details, see TfLiteInterpreterOptionsSetEnableDelegateFallback in c_api_experimental.h.
Returns one of the following status codes:
- kTfLiteOk: Success. Output is valid.
- kTfLiteDelegateError: Execution with delegates failed, due to a problem with the delegate(s). If fallback was not enabled, output is invalid. If fallback was enabled, this return value indicates that fallback succeeded, the output is valid, and all delegates previously applied to the interpreter have been undone.
- kTfLiteApplicationError: Same as for kTfLiteDelegateError, except that the problem was not with the delegate itself, but rather was due to an incompatibility between the delegate(s) and the interpreter or model.
- kTfLiteError: Unexpected/runtime failure. Output is invalid.
TfLiteInterpreterOptionsAddDelegate
TFL_CAPI_EXPORT void TfLiteInterpreterOptionsAddDelegate( TfLiteInterpreterOptions *options, TfLiteOpaqueDelegate *delegate )
Adds a delegate to be applied during TfLiteInterpreter
creation.
If delegate application fails, interpreter creation will also fail with an associated error logged.
If you are NOT using "TensorFlow Lite in Play Services", and NOT building with TFLITE_WITH_STABLE_ABI
or TFLITE_USE_OPAQUE_DELEGATE
macros enabled, it is possible to pass a TfLiteDelegate*
rather than a TfLiteOpaqueDelegate*
to this function, since in those cases, TfLiteOpaqueDelegate
is just a typedef alias for TfLiteDelegate
. This is for compatibility with existing source code and existing delegates. For new delegates, it is recommended to use TfLiteOpaqueDelegate
rather than TfLiteDelegate
. (See TfLiteOpaqueDelegate
in tensorflow/lite/core/c/c_api_types.h.)
TfLiteInterpreterOptionsAddRegistrationExternal
TFL_CAPI_EXPORT void TfLiteInterpreterOptionsAddRegistrationExternal( TfLiteInterpreterOptions *options, TfLiteRegistrationExternal *registration )
Adds an op registration to be applied during TfLiteInterpreter
creation.
The TfLiteRegistrationExternal
object is needed to implement custom op of TFLite Interpreter via C API. Calling this function ensures that any TfLiteInterpreter
created with the specified options
can execute models that use the custom operator specified in registration
. Please refer https://www.tensorflow.org/lite/guide/ops_custom for custom op support. This is an experimental API and subject to change.
TfLiteInterpreterOptionsCopy
TFL_CAPI_EXPORT TfLiteInterpreterOptions * TfLiteInterpreterOptionsCopy( const TfLiteInterpreterOptions *from )
Creates and returns a shallow copy of an options object.
The caller is responsible for calling TfLiteInterpreterOptionsDelete
to deallocate the object pointed to by the returned pointer.
TfLiteInterpreterOptionsCreate
TFL_CAPI_EXPORT TfLiteInterpreterOptions * TfLiteInterpreterOptionsCreate()
Returns a new interpreter options instances.
TfLiteInterpreterOptionsDelete
TFL_CAPI_EXPORT void TfLiteInterpreterOptionsDelete( TfLiteInterpreterOptions *options )
Destroys the interpreter options instance.
TfLiteInterpreterOptionsEnableCancellation
TFL_CAPI_EXPORT TfLiteStatus TfLiteInterpreterOptionsEnableCancellation( TfLiteInterpreterOptions *options, bool enable )
Enables users to cancel in-flight invocations with TfLiteInterpreterCancel
.
By default it is disabled and calling to TfLiteInterpreterCancel
will return kTfLiteError. See TfLiteInterpreterCancel
.
TfLiteInterpreterOptionsSetErrorReporter
TFL_CAPI_EXPORT void TfLiteInterpreterOptionsSetErrorReporter( TfLiteInterpreterOptions *options, void(*)(void *user_data, const char *format, va_list args) reporter, void *user_data )
Sets a custom error reporter for interpreter execution.
reporter
takes the provideduser_data
object, as well as a C-style format string and arg list (see also vprintf).user_data
is optional. If non-null, it is owned by the client and must remain valid for the duration of the interpreter lifetime.
TfLiteInterpreterOptionsSetNumThreads
TFL_CAPI_EXPORT void TfLiteInterpreterOptionsSetNumThreads( TfLiteInterpreterOptions *options, int32_t num_threads )
Sets the number of CPU threads to use for the interpreter.
TfLiteInterpreterOutputTensorIndices
TFL_CAPI_EXPORT const int * TfLiteInterpreterOutputTensorIndices( const TfLiteInterpreter *interpreter )
Returns a pointer to an array of output tensor indices.
The length of the array can be obtained via a call to TfLiteInterpreterGetOutputTensorCount
.
Typically the output tensors associated with an interpreter
would be set during the initialization of the interpreter
, through a mechanism like the InterpreterBuilder
, and remain unchanged throughout the lifetime of the interpreter. However, there are some circumstances in which the pointer may not remain valid throughout the lifetime of the interpreter, because calls to SetOutputs
on the interpreter invalidate the returned pointer.
The ownership of the array remains with the TFLite runtime.
TfLiteInterpreterResizeInputTensor
TFL_CAPI_EXPORT TfLiteStatus TfLiteInterpreterResizeInputTensor( TfLiteInterpreter *interpreter, int32_t input_index, const int *input_dims, int32_t input_dims_size )
Resizes the specified input tensor.
REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor)
This function makes a copy of the input dimensions, so the client can safely deallocate input_dims
immediately after this function returns.
TfLiteModelCreate
TFL_CAPI_EXPORT TfLiteModel * TfLiteModelCreate( const void *model_data, size_t model_size )
Returns a model from the provided buffer, or null on failure.
TfLiteModelCreateFromFile
TFL_CAPI_EXPORT TfLiteModel * TfLiteModelCreateFromFile( const char *model_path )
Returns a model from the provided file, or null on failure.
TfLiteModelCreateFromFileWithErrorReporter
TFL_CAPI_EXPORT TfLiteModel * TfLiteModelCreateFromFileWithErrorReporter( const char *model_path, void(*)(void *user_data, const char *format, va_list args) reporter, void *user_data )
Same as TfLiteModelCreateFromFile
with customizble error reporter.
reporter
takes the provideduser_data
object, as well as a C-style format string and arg list (see also vprintf).user_data
is optional. If non-null, it is owned by the client and must remain valid for the duration of the interpreter lifetime.
TfLiteModelCreateWithErrorReporter
TFL_CAPI_EXPORT TfLiteModel * TfLiteModelCreateWithErrorReporter( const void *model_data, size_t model_size, void(*)(void *user_data, const char *format, va_list args) reporter, void *user_data )
Same as TfLiteModelCreate
with customizble error reporter.
reporter
takes the provideduser_data
object, as well as a C-style format string and arg list (see also vprintf).user_data
is optional. If non-null, it is owned by the client and must remain valid for the duration of the interpreter lifetime.
TfLiteModelDelete
TFL_CAPI_EXPORT void TfLiteModelDelete( TfLiteModel *model )
Destroys the model instance.
TfLiteSchemaVersion
TFL_CAPI_EXPORT int TfLiteSchemaVersion( void )
The supported TensorFlow Lite model file Schema version.
Returns the (major) version number of the Schema used for model files that is supported by the (potentially dynamically loaded) TensorFlow Lite Runtime.
Model files using schema versions different to this may not be supported by the current version of the TF Lite Runtime.
TfLiteSignatureRunnerAllocateTensors
TFL_CAPI_EXPORT TfLiteStatus TfLiteSignatureRunnerAllocateTensors( TfLiteSignatureRunner *signature_runner )
Updates allocations for tensors associated with a signature and resizes dependent tensors using the specified input tensor dimensionality.
This is a relatively expensive operation and hence should only be called after initializing the signature runner object and/or resizing any inputs.
TfLiteSignatureRunnerDelete
TFL_CAPI_EXPORT void TfLiteSignatureRunnerDelete( TfLiteSignatureRunner *signature_runner )
Destroys the signature runner.
TfLiteSignatureRunnerGetInputCount
TFL_CAPI_EXPORT size_t TfLiteSignatureRunnerGetInputCount( const TfLiteSignatureRunner *signature_runner )
Returns the number of inputs associated with a signature.
TfLiteSignatureRunnerGetInputName
TFL_CAPI_EXPORT const char * TfLiteSignatureRunnerGetInputName( const TfLiteSignatureRunner *signature_runner, int32_t input_index )
Returns the (null-terminated) name of the Nth input in a signature, where N is specified as input_index
.
NOTE: The lifetime of the returned name is the same as (and depends on) the lifetime of signature_runner
.
TfLiteSignatureRunnerGetInputTensor
TFL_CAPI_EXPORT TfLiteTensor * TfLiteSignatureRunnerGetInputTensor( TfLiteSignatureRunner *signature_runner, const char *input_name )
Returns the input tensor identified by input_name
in the given signature.
Returns nullptr if the given name is not valid.
NOTE: The lifetime of the returned tensor is the same as (and depends on) the lifetime of signature_runner
.
TfLiteSignatureRunnerGetOutputCount
TFL_CAPI_EXPORT size_t TfLiteSignatureRunnerGetOutputCount( const TfLiteSignatureRunner *signature_runner )
Returns the number of output tensors associated with the signature.
TfLiteSignatureRunnerGetOutputName
TFL_CAPI_EXPORT const char * TfLiteSignatureRunnerGetOutputName( const TfLiteSignatureRunner *signature_runner, int32_t output_index )
Returns the (null-terminated) name of the Nth output in a signature, where N is specified as output_index
.
NOTE: The lifetime of the returned name is the same as (and depends on) the lifetime of signature_runner
.
TfLiteSignatureRunnerGetOutputTensor
TFL_CAPI_EXPORT const TfLiteTensor * TfLiteSignatureRunnerGetOutputTensor( const TfLiteSignatureRunner *signature_runner, const char *output_name )
Returns the output tensor identified by output_name
in the given signature.
Returns nullptr if the given name is not valid.
NOTE: The lifetime of the returned tensor is the same as (and depends on) the lifetime of signature_runner
.
TfLiteSignatureRunnerInvoke
TFL_CAPI_EXPORT TfLiteStatus TfLiteSignatureRunnerInvoke( TfLiteSignatureRunner *signature_runner )
Runs inference on a given signature.
Before calling this function, the caller should first invoke TfLiteSignatureRunnerAllocateTensors() and should also set the values for the input tensors. After successfully calling this function, the values for the output tensors will be set.
TfLiteSignatureRunnerResizeInputTensor
TFL_CAPI_EXPORT TfLiteStatus TfLiteSignatureRunnerResizeInputTensor( TfLiteSignatureRunner *signature_runner, const char *input_name, const int *input_dims, int32_t input_dims_size )
Resizes the input tensor identified as input_name
to be the dimensions specified by input_dims
and input_dims_size
.
Only unknown dimensions can be resized with this function. Unknown dimensions are indicated as -1
in the dims_signature
attribute of a TfLiteTensor.
Returns status of failure or success. Note that this doesn't actually resize any existing buffers. A call to TfLiteSignatureRunnerAllocateTensors() is required to change the tensor input buffer.
NOTE: This function is similar to TfLiteInterpreterResizeInputTensorStrict() and not TfLiteInterpreterResizeInputTensor().
NOTE: input_name
must match the name of an input in the signature.
NOTE: This function makes a copy of the input dimensions, so the caller can safely deallocate input_dims
immediately after this function returns.
TfLiteTensorByteSize
TFL_CAPI_EXPORT size_t TfLiteTensorByteSize( const TfLiteTensor *tensor )
Returns the size of the underlying data in bytes.
TfLiteTensorCopyFromBuffer
TFL_CAPI_EXPORT TfLiteStatus TfLiteTensorCopyFromBuffer( TfLiteTensor *tensor, const void *input_data, size_t input_data_size )
Copies from the provided input buffer into the tensor's buffer.
REQUIRES: input_data_size == TfLiteTensorByteSize(tensor)
TfLiteTensorCopyToBuffer
TFL_CAPI_EXPORT TfLiteStatus TfLiteTensorCopyToBuffer( const TfLiteTensor *output_tensor, void *output_data, size_t output_data_size )
Copies to the provided output buffer from the tensor's buffer.
REQUIRES: output_data_size == TfLiteTensorByteSize(tensor)
TfLiteTensorData
TFL_CAPI_EXPORT void * TfLiteTensorData( const TfLiteTensor *tensor )
Returns a pointer to the underlying data buffer.
TfLiteTensorDim
TFL_CAPI_EXPORT int32_t TfLiteTensorDim( const TfLiteTensor *tensor, int32_t dim_index )
Returns the length of the tensor in the "dim_index" dimension.
REQUIRES: 0 <= dim_index < TFLiteTensorNumDims(tensor)
TfLiteTensorName
TFL_CAPI_EXPORT const char * TfLiteTensorName( const TfLiteTensor *tensor )
Returns the (null-terminated) name of the tensor.
TfLiteTensorNumDims
TFL_CAPI_EXPORT int32_t TfLiteTensorNumDims( const TfLiteTensor *tensor )
Returns the number of dimensions that the tensor has.
Returns -1 in case the 'opaque_tensor' does not have its dimensions property set.
TfLiteTensorQuantizationParams
TFL_CAPI_EXPORT TfLiteQuantizationParams TfLiteTensorQuantizationParams( const TfLiteTensor *tensor )
Returns the parameters for asymmetric quantization.
The quantization parameters are only valid when the tensor type is kTfLiteUInt8
and the scale != 0
. Quantized values can be converted back to float using: real_value = scale * (quantized_value - zero_point);
TfLiteTensorType
TFL_CAPI_EXPORT TfLiteType TfLiteTensorType( const TfLiteTensor *tensor )
Returns the type of a tensor element.
TfLiteVersion
TFL_CAPI_EXPORT const char * TfLiteVersion( void )
The TensorFlow Lite Runtime version.
Returns a pointer to a statically allocated string that is the version number of the (potentially dynamically loaded) TF Lite Runtime library. TensorFlow Lite uses semantic versioning, and the return value should be in semver 2 format http://semver.org, starting with MAJOR.MINOR.PATCH, e.g. "2.12.0" or "2.13.0-rc2".