/**
|
* @license
|
* Copyright 2018 Google LLC. All Rights Reserved.
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
* you may not use this file except in compliance with the License.
|
* You may obtain a copy of the License at
|
*
|
* http://www.apache.org/licenses/LICENSE-2.0
|
*
|
* Unless required by applicable law or agreed to in writing, software
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
* See the License for the specific language governing permissions and
|
* limitations under the License.
|
* =============================================================================
|
*/
|
/// <amd-module name="@tensorflow/tfjs-converter/dist/executor/graph_model" />
|
import { InferenceModel, io, ModelPredictConfig, NamedTensorMap, Tensor } from '@tensorflow/tfjs-core';
|
import { NamedTensorsMap, TensorInfo } from '../data/types';
|
export declare const TFHUB_SEARCH_PARAM = "?tfjs-format=file";
|
export declare const DEFAULT_MODEL_NAME = "model.json";
|
type Url = string | io.IOHandler | io.IOHandlerSync;
|
type UrlIOHandler<T extends Url> = T extends string ? io.IOHandler : T;
|
/**
|
* A `tf.GraphModel` is a directed, acyclic graph built from a
|
* SavedModel GraphDef and allows inference execution.
|
*
|
* A `tf.GraphModel` can only be created by loading from a model converted from
|
* a [TensorFlow SavedModel](https://www.tensorflow.org/guide/saved_model) using
|
* the command line converter tool and loaded via `tf.loadGraphModel`.
|
*
|
* @doc {heading: 'Models', subheading: 'Classes'}
|
*/
|
export declare class GraphModel<ModelURL extends Url = string | io.IOHandler> implements InferenceModel {
|
private modelUrl;
|
private loadOptions;
|
private executor;
|
private version;
|
private handler;
|
private artifacts;
|
private initializer;
|
private resourceIdToCapturedInput;
|
private resourceManager;
|
private signature;
|
private initializerSignature;
|
private structuredOutputKeys;
|
private readonly io;
|
get modelVersion(): string;
|
get inputNodes(): string[];
|
get outputNodes(): string[];
|
get inputs(): TensorInfo[];
|
get outputs(): TensorInfo[];
|
get weights(): NamedTensorsMap;
|
get metadata(): {};
|
get modelSignature(): {};
|
get modelStructuredOutputKeys(): {};
|
/**
|
* @param modelUrl url for the model, or an `io.IOHandler`.
|
* @param weightManifestUrl url for the weight file generated by
|
* scripts/convert.py script.
|
* @param requestOption options for Request, which allows to send credentials
|
* and custom headers.
|
* @param onProgress Optional, progress callback function, fired periodically
|
* before the load is completed.
|
*/
|
constructor(modelUrl: ModelURL, loadOptions?: io.LoadOptions, tfio?: typeof io);
|
private findIOHandler;
|
/**
|
* Loads the model and weight files, construct the in memory weight map and
|
* compile the inference graph.
|
*/
|
load(): UrlIOHandler<ModelURL> extends io.IOHandlerSync ? boolean : Promise<boolean>;
|
/**
|
* Synchronously construct the in memory weight map and
|
* compile the inference graph.
|
*
|
* @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true}
|
*/
|
loadSync(artifacts: io.ModelArtifacts): boolean;
|
private loadStreaming;
|
private loadWithWeightMap;
|
/**
|
* Save the configuration and/or weights of the GraphModel.
|
*
|
* An `IOHandler` is an object that has a `save` method of the proper
|
* signature defined. The `save` method manages the storing or
|
* transmission of serialized data ("artifacts") that represent the
|
* model's topology and weights onto or via a specific medium, such as
|
* file downloads, local storage, IndexedDB in the web browser and HTTP
|
* requests to a server. TensorFlow.js provides `IOHandler`
|
* implementations for a number of frequently used saving mediums, such as
|
* `tf.io.browserDownloads` and `tf.io.browserLocalStorage`. See `tf.io`
|
* for more details.
|
*
|
* This method also allows you to refer to certain types of `IOHandler`s
|
* as URL-like string shortcuts, such as 'localstorage://' and
|
* 'indexeddb://'.
|
*
|
* Example 1: Save `model`'s topology and weights to browser [local
|
* storage](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage);
|
* then load it back.
|
*
|
* ```js
|
* const modelUrl =
|
* 'https://storage.googleapis.com/tfjs-models/savedmodel/mobilenet_v2_1.0_224/model.json';
|
* const model = await tf.loadGraphModel(modelUrl);
|
* const zeros = tf.zeros([1, 224, 224, 3]);
|
* model.predict(zeros).print();
|
*
|
* const saveResults = await model.save('localstorage://my-model-1');
|
*
|
* const loadedModel = await tf.loadGraphModel('localstorage://my-model-1');
|
* console.log('Prediction from loaded model:');
|
* model.predict(zeros).print();
|
* ```
|
*
|
* @param handlerOrURL An instance of `IOHandler` or a URL-like,
|
* scheme-based string shortcut for `IOHandler`.
|
* @param config Options for saving the model.
|
* @returns A `Promise` of `SaveResult`, which summarizes the result of
|
* the saving, such as byte sizes of the saved artifacts for the model's
|
* topology and weight values.
|
*
|
* @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true}
|
*/
|
save(handlerOrURL: io.IOHandler | string, config?: io.SaveConfig): Promise<io.SaveResult>;
|
private addStructuredOutputNames;
|
/**
|
* Execute the inference for the input tensors.
|
*
|
* @param input The input tensors, when there is single input for the model,
|
* inputs param should be a `tf.Tensor`. For models with mutliple inputs,
|
* inputs params should be in either `tf.Tensor`[] if the input order is
|
* fixed, or otherwise NamedTensorMap format.
|
*
|
* For model with multiple inputs, we recommend you use NamedTensorMap as the
|
* input type, if you use `tf.Tensor`[], the order of the array needs to
|
* follow the
|
* order of inputNodes array. @see {@link GraphModel.inputNodes}
|
*
|
* You can also feed any intermediate nodes using the NamedTensorMap as the
|
* input type. For example, given the graph
|
* InputNode => Intermediate => OutputNode,
|
* you can execute the subgraph Intermediate => OutputNode by calling
|
* model.execute('IntermediateNode' : tf.tensor(...));
|
*
|
* This is useful for models that uses tf.dynamic_rnn, where the intermediate
|
* state needs to be fed manually.
|
*
|
* For batch inference execution, the tensors for each input need to be
|
* concatenated together. For example with mobilenet, the required input shape
|
* is [1, 244, 244, 3], which represents the [batch, height, width, channel].
|
* If we are provide a batched data of 100 images, the input tensor should be
|
* in the shape of [100, 244, 244, 3].
|
*
|
* @param config Prediction configuration for specifying the batch size.
|
* Currently the batch size option is ignored for graph model.
|
*
|
* @returns Inference result tensors. If the model is converted and it
|
* originally had structured_outputs in tensorflow, then a NamedTensorMap
|
* will be returned matching the structured_outputs. If no structured_outputs
|
* are present, the output will be single `tf.Tensor` if the model has single
|
* output node, otherwise Tensor[].
|
*
|
* @doc {heading: 'Models', subheading: 'Classes'}
|
*/
|
predict(inputs: Tensor | Tensor[] | NamedTensorMap, config?: ModelPredictConfig): Tensor | Tensor[] | NamedTensorMap;
|
/**
|
* Execute the inference for the input tensors in async fashion, use this
|
* method when your model contains control flow ops.
|
*
|
* @param input The input tensors, when there is single input for the model,
|
* inputs param should be a `tf.Tensor`. For models with mutliple inputs,
|
* inputs params should be in either `tf.Tensor`[] if the input order is
|
* fixed, or otherwise NamedTensorMap format.
|
*
|
* For model with multiple inputs, we recommend you use NamedTensorMap as the
|
* input type, if you use `tf.Tensor`[], the order of the array needs to
|
* follow the
|
* order of inputNodes array. @see {@link GraphModel.inputNodes}
|
*
|
* You can also feed any intermediate nodes using the NamedTensorMap as the
|
* input type. For example, given the graph
|
* InputNode => Intermediate => OutputNode,
|
* you can execute the subgraph Intermediate => OutputNode by calling
|
* model.execute('IntermediateNode' : tf.tensor(...));
|
*
|
* This is useful for models that uses tf.dynamic_rnn, where the intermediate
|
* state needs to be fed manually.
|
*
|
* For batch inference execution, the tensors for each input need to be
|
* concatenated together. For example with mobilenet, the required input shape
|
* is [1, 244, 244, 3], which represents the [batch, height, width, channel].
|
* If we are provide a batched data of 100 images, the input tensor should be
|
* in the shape of [100, 244, 244, 3].
|
*
|
* @param config Prediction configuration for specifying the batch size.
|
* Currently the batch size option is ignored for graph model.
|
*
|
* @returns A Promise of inference result tensors. If the model is converted
|
* and it originally had structured_outputs in tensorflow, then a
|
* NamedTensorMap will be returned matching the structured_outputs. If no
|
* structured_outputs are present, the output will be single `tf.Tensor` if
|
* the model has single output node, otherwise Tensor[].
|
*
|
* @doc {heading: 'Models', subheading: 'Classes'}
|
*/
|
predictAsync(inputs: Tensor | Tensor[] | NamedTensorMap, config?: ModelPredictConfig): Promise<Tensor | Tensor[] | NamedTensorMap>;
|
private normalizeInputs;
|
private normalizeOutputs;
|
private executeInitializerGraph;
|
private executeInitializerGraphAsync;
|
private setResourceIdToCapturedInput;
|
/**
|
* Executes inference for the model for given input tensors.
|
* @param inputs tensor, tensor array or tensor map of the inputs for the
|
* model, keyed by the input node names.
|
* @param outputs output node name from the TensorFlow model, if no
|
* outputs are specified, the default outputs of the model would be used.
|
* You can inspect intermediate nodes of the model by adding them to the
|
* outputs array.
|
*
|
* @returns A single tensor if provided with a single output or no outputs
|
* are provided and there is only one default output, otherwise return a
|
* tensor array. The order of the tensor array is the same as the outputs
|
* if provided, otherwise the order of outputNodes attribute of the model.
|
*
|
* @doc {heading: 'Models', subheading: 'Classes'}
|
*/
|
execute(inputs: Tensor | Tensor[] | NamedTensorMap, outputs?: string | string[]): Tensor | Tensor[];
|
/**
|
* Executes inference for the model for given input tensors in async
|
* fashion, use this method when your model contains control flow ops.
|
* @param inputs tensor, tensor array or tensor map of the inputs for the
|
* model, keyed by the input node names.
|
* @param outputs output node name from the TensorFlow model, if no outputs
|
* are specified, the default outputs of the model would be used. You can
|
* inspect intermediate nodes of the model by adding them to the outputs
|
* array.
|
*
|
* @returns A Promise of single tensor if provided with a single output or
|
* no outputs are provided and there is only one default output, otherwise
|
* return a tensor map.
|
*
|
* @doc {heading: 'Models', subheading: 'Classes'}
|
*/
|
executeAsync(inputs: Tensor | Tensor[] | NamedTensorMap, outputs?: string | string[]): Promise<Tensor | Tensor[]>;
|
/**
|
* Get intermediate tensors for model debugging mode (flag
|
* KEEP_INTERMEDIATE_TENSORS is true).
|
*
|
* @doc {heading: 'Models', subheading: 'Classes'}
|
*/
|
getIntermediateTensors(): NamedTensorsMap;
|
/**
|
* Dispose intermediate tensors for model debugging mode (flag
|
* KEEP_INTERMEDIATE_TENSORS is true).
|
*
|
* @doc {heading: 'Models', subheading: 'Classes'}
|
*/
|
disposeIntermediateTensors(): void;
|
private convertTensorMapToTensorsMap;
|
/**
|
* Releases the memory used by the weight tensors and resourceManager.
|
*
|
* @doc {heading: 'Models', subheading: 'Classes'}
|
*/
|
dispose(): void;
|
}
|
/**
|
* Load a graph model given a URL to the model definition.
|
*
|
* Example of loading MobileNetV2 from a URL and making a prediction with a
|
* zeros input:
|
*
|
* ```js
|
* const modelUrl =
|
* 'https://storage.googleapis.com/tfjs-models/savedmodel/mobilenet_v2_1.0_224/model.json';
|
* const model = await tf.loadGraphModel(modelUrl);
|
* const zeros = tf.zeros([1, 224, 224, 3]);
|
* model.predict(zeros).print();
|
* ```
|
*
|
* Example of loading MobileNetV2 from a TF Hub URL and making a prediction
|
* with a zeros input:
|
*
|
* ```js
|
* const modelUrl =
|
* 'https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/classification/2';
|
* const model = await tf.loadGraphModel(modelUrl, {fromTFHub: true});
|
* const zeros = tf.zeros([1, 224, 224, 3]);
|
* model.predict(zeros).print();
|
* ```
|
* @param modelUrl The url or an `io.IOHandler` that loads the model.
|
* @param options Options for the HTTP request, which allows to send
|
* credentials
|
* and custom headers.
|
*
|
* @doc {heading: 'Models', subheading: 'Loading'}
|
*/
|
export declare function loadGraphModel(modelUrl: string | io.IOHandler, options?: io.LoadOptions, tfio?: typeof io): Promise<GraphModel>;
|
/**
|
* Load a graph model given a synchronous IO handler with a 'load' method.
|
*
|
* @param modelSource The `io.IOHandlerSync` that loads the model, or the
|
* `io.ModelArtifacts` that encode the model, or a tuple of
|
* `[io.ModelJSON, ArrayBuffer]` of which the first element encodes the
|
* model and the second contains the weights.
|
*
|
* @doc {heading: 'Models', subheading: 'Loading'}
|
*/
|
export declare function loadGraphModelSync(modelSource: io.IOHandlerSync | io.ModelArtifacts | [io.ModelJSON, /* Weights */ ArrayBuffer]): GraphModel<io.IOHandlerSync>;
|
export {};
|