"use strict";
|
/**
|
* @license
|
* Copyright 2020 Google LLC. All Rights Reserved.
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
* you may not use this file except in compliance with the License.
|
* You may obtain a copy of the License at
|
*
|
* http://www.apache.org/licenses/LICENSE-2.0
|
*
|
* Unless required by applicable law or agreed to in writing, software
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
* See the License for the specific language governing permissions and
|
* limitations under the License.
|
* =============================================================================
|
*/
|
Object.defineProperty(exports, "__esModule", { value: true });
|
exports._fusedMatMulConfig = void 0;
|
var tfjs_1 = require("@tensorflow/tfjs");
|
exports._fusedMatMulConfig = {
|
kernelName: tfjs_1._FusedMatMul,
|
backendName: 'tensorflow',
|
kernelFunc: function (args) {
|
var _a = args.inputs, a = _a.a, b = _a.b, bias = _a.bias, preluActivationWeights = _a.preluActivationWeights;
|
var backend = args.backend;
|
var _b = args.attrs, transposeA = _b.transposeA, transposeB = _b.transposeB, activation = _b.activation, leakyreluAlpha = _b.leakyreluAlpha;
|
// Core TensorFlow does not have a fused BatchMatMul op. Combine calls to
|
// achieve the same results:
|
return (0, tfjs_1.tidy)(function () {
|
var result = (0, tfjs_1.matMul)(a, b, transposeA, transposeB);
|
if (bias != null) {
|
result = (0, tfjs_1.add)(result, bias);
|
}
|
result = backend.applyActivation(result, activation, preluActivationWeights, leakyreluAlpha);
|
return result;
|
});
|
}
|
};
|