chenyc
2025-05-29 92f69c57b920cf62ecc9f15f9ed196fa26dbf2ac
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var tslib_1 = require("tslib");
var FaceDetection_1 = require("../classes/FaceDetection");
var env_1 = require("../env");
var createCanvas_1 = require("./createCanvas");
var getContext2dOrThrow_1 = require("./getContext2dOrThrow");
var imageTensorToCanvas_1 = require("./imageTensorToCanvas");
var toNetInput_1 = require("./toNetInput");
/**
 * Extracts the image regions containing the detected faces.
 *
 * @param input The image that face detection has been performed on.
 * @param detections The face detection results or face bounding boxes for that image.
 * @returns The Canvases of the corresponding image region for each detected face.
 */
function extractFaces(input, detections) {
    return tslib_1.__awaiter(this, void 0, void 0, function () {
        var Canvas, canvas, netInput, tensorOrCanvas, _a, ctx, boxes;
        return tslib_1.__generator(this, function (_b) {
            switch (_b.label) {
                case 0:
                    Canvas = env_1.env.getEnv().Canvas;
                    canvas = input;
                    if (!!(input instanceof Canvas)) return [3 /*break*/, 5];
                    return [4 /*yield*/, toNetInput_1.toNetInput(input)];
                case 1:
                    netInput = _b.sent();
                    if (netInput.batchSize > 1) {
                        throw new Error('extractFaces - batchSize > 1 not supported');
                    }
                    tensorOrCanvas = netInput.getInput(0);
                    if (!(tensorOrCanvas instanceof Canvas)) return [3 /*break*/, 2];
                    _a = tensorOrCanvas;
                    return [3 /*break*/, 4];
                case 2: return [4 /*yield*/, imageTensorToCanvas_1.imageTensorToCanvas(tensorOrCanvas)];
                case 3:
                    _a = _b.sent();
                    _b.label = 4;
                case 4:
                    canvas = _a;
                    _b.label = 5;
                case 5:
                    ctx = getContext2dOrThrow_1.getContext2dOrThrow(canvas);
                    boxes = detections.map(function (det) { return det instanceof FaceDetection_1.FaceDetection
                        ? det.forSize(canvas.width, canvas.height).box.floor()
                        : det; })
                        .map(function (box) { return box.clipAtImageBorders(canvas.width, canvas.height); });
                    return [2 /*return*/, boxes.map(function (_a) {
                            var x = _a.x, y = _a.y, width = _a.width, height = _a.height;
                            var faceImg = createCanvas_1.createCanvas({ width: width, height: height });
                            getContext2dOrThrow_1.getContext2dOrThrow(faceImg)
                                .putImageData(ctx.getImageData(x, y, width, height), 0, 0);
                            return faceImg;
                        })];
            }
        });
    });
}
exports.extractFaces = extractFaces;
//# sourceMappingURL=extractFaces.js.map