/**
|
* @license
|
* Copyright 2018 Google LLC. All Rights Reserved.
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
* you may not use this file except in compliance with the License.
|
* You may obtain a copy of the License at
|
*
|
* http://www.apache.org/licenses/LICENSE-2.0
|
*
|
* Unless required by applicable law or agreed to in writing, software
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
* See the License for the specific language governing permissions and
|
* limitations under the License.
|
* =============================================================================
|
*/
|
import { CustomCallback, LayersModel } from '@tensorflow/tfjs';
|
type LogFunction = (message: string) => void;
|
export declare const progressBarHelper: {
|
ProgressBar: any;
|
log: LogFunction;
|
};
|
/**
|
* Terminal-based progress bar callback for tf.Model.fit().
|
*/
|
export declare class ProgbarLogger extends CustomCallback {
|
private numTrainBatchesPerEpoch;
|
private progressBar;
|
private currentEpochBegin;
|
private epochDurationMillis;
|
private usPerStep;
|
private batchesInLatestEpoch;
|
private terminalWidth;
|
private readonly RENDER_THROTTLE_MS;
|
/**
|
* Construtor of LoggingCallback.
|
*/
|
constructor();
|
private formatLogsAsMetricsContent;
|
private isFieldRelevant;
|
}
|
/**
|
* Get a succint string representation of a number.
|
*
|
* Uses decimal notation if the number isn't too small.
|
* Otherwise, use engineering notation.
|
*
|
* @param x Input number.
|
* @return Succinct string representing `x`.
|
*/
|
export declare function getSuccinctNumberDisplay(x: number): string;
|
/**
|
* Determine the number of decimal places to display.
|
*
|
* @param x Number to display.
|
* @return Number of decimal places to display for `x`.
|
*/
|
export declare function getDisplayDecimalPlaces(x: number): number;
|
export interface TensorBoardCallbackArgs {
|
/**
|
* The frequency at which loss and metric values are written to logs.
|
*
|
* Currently supported options are:
|
*
|
* - 'batch': Write logs at the end of every batch of training, in addition
|
* to the end of every epoch of training.
|
* - 'epoch': Write logs at the end of every epoch of training.
|
*
|
* Note that writing logs too often slows down the training.
|
*
|
* Default: 'epoch'.
|
*/
|
updateFreq?: 'batch' | 'epoch';
|
/**
|
* The frequency (in epochs) at which to compute activation and weight
|
* histograms for the layers of the model.
|
*
|
* If set to 0, histograms won't be computed.
|
*
|
* Validation data (or split) must be specified for histogram visualizations.
|
*
|
* Default: 0.
|
*/
|
histogramFreq?: number;
|
}
|
/**
|
* Callback for logging to TensorBoard during training.
|
*
|
* Users are expected to access this class through the `tensorBoardCallback()`
|
* factory method instead.
|
*/
|
export declare class TensorBoardCallback extends CustomCallback {
|
readonly logdir: string;
|
private model;
|
private trainWriter;
|
private valWriter;
|
private batchesSeen;
|
private readonly args;
|
constructor(logdir?: string, args?: TensorBoardCallbackArgs);
|
setModel(model: LayersModel): void;
|
private logMetrics;
|
private logWeights;
|
private ensureTrainWriterCreated;
|
private ensureValWriterCreated;
|
}
|
/**
|
* Callback for logging to TensorBoard during training.
|
*
|
* Writes the loss and metric values (if any) to the specified log directory
|
* (`logdir`) which can be ingested and visualized by TensorBoard.
|
* This callback is usually passed as a callback to `tf.Model.fit()` or
|
* `tf.Model.fitDataset()` calls during model training. The frequency at which
|
* the values are logged can be controlled with the `updateFreq` field of the
|
* configuration object (2nd argument).
|
*
|
* Usage example:
|
* ```js
|
* // Constructor a toy multilayer-perceptron regressor for demo purpose.
|
* const model = tf.sequential();
|
* model.add(
|
* tf.layers.dense({units: 100, activation: 'relu', inputShape: [200]}));
|
* model.add(tf.layers.dense({units: 1}));
|
* model.compile({
|
* loss: 'meanSquaredError',
|
* optimizer: 'sgd',
|
* metrics: ['MAE']
|
* });
|
*
|
* // Generate some random fake data for demo purpose.
|
* const xs = tf.randomUniform([10000, 200]);
|
* const ys = tf.randomUniform([10000, 1]);
|
* const valXs = tf.randomUniform([1000, 200]);
|
* const valYs = tf.randomUniform([1000, 1]);
|
*
|
* // Start model training process.
|
* await model.fit(xs, ys, {
|
* epochs: 100,
|
* validationData: [valXs, valYs],
|
* // Add the tensorBoard callback here.
|
* callbacks: tf.node.tensorBoard('/tmp/fit_logs_1')
|
* });
|
* ```
|
*
|
* Then you can use the following commands to point tensorboard
|
* to the logdir:
|
*
|
* ```sh
|
* pip install tensorboard # Unless you've already installed it.
|
* tensorboard --logdir /tmp/fit_logs_1
|
* ```
|
*
|
* @param logdir Directory to which the logs will be written.
|
* @param args Optional configuration arguments.
|
* @returns An instance of `TensorBoardCallback`, which is a subclass of
|
* `tf.CustomCallback`.
|
*
|
* @doc {heading: 'TensorBoard', namespace: 'node'}
|
*/
|
export declare function tensorBoard(logdir?: string, args?: TensorBoardCallbackArgs): TensorBoardCallback;
|
export {};
|