mirror of
https://github.com/ocogeclub/ocoge.git
synced 2024-11-22 23:59:49 +00:00
6198 lines
681 KiB
JavaScript
6198 lines
681 KiB
JavaScript
|
(function (global, factory) {
|
||
|
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
|
||
|
typeof define === 'function' && define.amd ? define(['exports'], factory) :
|
||
|
(factory((global.faceapi = global.faceapi || {})));
|
||
|
}(this, (function (exports) { 'use strict';
|
||
|
|
||
|
/**
|
||
|
* @license
|
||
|
* Copyright 2018 Google LLC. All Rights Reserved.
|
||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||
|
* you may not use this file except in compliance with the License.
|
||
|
* You may obtain a copy of the License at
|
||
|
*
|
||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||
|
*
|
||
|
* Unless required by applicable law or agreed to in writing, software
|
||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
|
* See the License for the specific language governing permissions and
|
||
|
* limitations under the License.
|
||
|
* =============================================================================
|
||
|
*/
|
||
|
var extendStatics=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t;}||function(e,t){for(var n in t)t.hasOwnProperty(n)&&(e[n]=t[n]);};function __extends(e,t){function n(){this.constructor=e;}extendStatics(e,t),e.prototype=null===t?Object.create(t):(n.prototype=t.prototype,new n);}var __assign=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e};function __awaiter(e,t,n,r){return new(n||(n=Promise))(function(o,a){function i(e){try{u(r.next(e));}catch(e){a(e);}}function s(e){try{u(r.throw(e));}catch(e){a(e);}}function u(e){e.done?o(e.value):new n(function(t){t(e.value);}).then(i,s);}u((r=r.apply(e,t||[])).next());})}function __generator(e,t){var n,r,o,a,i={label:0,sent:function(){if(1&o[0])throw o[1];return o[1]},trys:[],ops:[]};return a={next:s(0),throw:s(1),return:s(2)},"function"==typeof Symbol&&(a[Symbol.iterator]=function(){return this}),a;function s(a){return function(s){return function(a){if(n)throw new TypeError("Generator is already executing.");for(;i;)try{if(n=1,r&&(o=2&a[0]?r.return:a[0]?r.throw||((o=r.return)&&o.call(r),0):r.next)&&!(o=o.call(r,a[1])).done)return o;switch(r=0,o&&(a=[2&a[0],o.value]),a[0]){case 0:case 1:o=a;break;case 4:return i.label++,{value:a[1],done:!1};case 5:i.label++,r=a[1],a=[0];continue;case 7:a=i.ops.pop(),i.trys.pop();continue;default:if(!(o=(o=i.trys).length>0&&o[o.length-1])&&(6===a[0]||2===a[0])){i=0;continue}if(3===a[0]&&(!o||a[1]>o[0]&&a[1]<o[3])){i.label=a[1];break}if(6===a[0]&&i.label<o[1]){i.label=o[1],o=a;break}if(o&&i.label<o[2]){i.label=o[2],i.ops.push(a);break}o[2]&&i.ops.pop(),i.trys.pop();continue}a=t.call(e,i);}catch(e){a=[6,e],r=0;}finally{n=o=0;}if(5&a[0])throw a[1];return {value:a[0]?a[1]:void 0,done:!0}}([a,s])}}}function isMobile(){var e=navigator.userAgent||navigator.vendor||window.opera;return /(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i.test(e)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(e.substr(0,4))}function shuffle(e){for(var t=e.length,n=0,r=0;t>0;)r=Math.random()*t|0,n=e[--t],e[t]=e[r],e[r]=n;}function clamp(e,t,n){return Math.max(e,Math.min(t,n))}function randUniform(e,t){var n=Math.random();return t*n+(
|
||
|
|
||
|
var tfCore_esm = /*#__PURE__*/Object.freeze({
|
||
|
setBackend: setBackend,
|
||
|
getBackend: getBackend,
|
||
|
disposeVariables: disposeVariables,
|
||
|
memory: memory,
|
||
|
version_core: version,
|
||
|
nextFrame: nextFrame,
|
||
|
environment: environment,
|
||
|
io: io,
|
||
|
serialization: serialization,
|
||
|
test_util: test_util,
|
||
|
util: util,
|
||
|
webgl: webgl,
|
||
|
AdadeltaOptimizer: AdadeltaOptimizer,
|
||
|
AdagradOptimizer: AdagradOptimizer,
|
||
|
AdamOptimizer: AdamOptimizer,
|
||
|
AdamaxOptimizer: AdamaxOptimizer,
|
||
|
MomentumOptimizer: MomentumOptimizer,
|
||
|
Optimizer: Optimizer,
|
||
|
RMSPropOptimizer: RMSPropOptimizer,
|
||
|
SGDOptimizer: SGDOptimizer,
|
||
|
Tensor: Tensor,
|
||
|
TensorBuffer: TensorBuffer,
|
||
|
variable: variable,
|
||
|
Variable: Variable,
|
||
|
get Rank () { return Rank; },
|
||
|
get Reduction () { return Reduction; },
|
||
|
ENV: ENV,
|
||
|
Environment: Environment,
|
||
|
DataStorage: DataStorage,
|
||
|
image: image_ops,
|
||
|
linalg: linalg_ops,
|
||
|
losses: loss_ops,
|
||
|
op: op,
|
||
|
batchNormalization2d: batchNormalization2d,
|
||
|
batchNormalization3d: batchNormalization3d,
|
||
|
batchNormalization4d: batchNormalization4d,
|
||
|
batchNormalization: batchNormalization,
|
||
|
complex: complex,
|
||
|
real: real,
|
||
|
imag: imag,
|
||
|
concat: concat,
|
||
|
concat1d: concat1d,
|
||
|
concat2d: concat2d,
|
||
|
concat3d: concat3d,
|
||
|
concat4d: concat4d,
|
||
|
split: split$1,
|
||
|
conv1d: conv1d,
|
||
|
conv2d: conv2d,
|
||
|
depthwiseConv2d: depthwiseConv2d,
|
||
|
separableConv2d: separableConv2d,
|
||
|
conv2dTranspose: conv2dTranspose,
|
||
|
matMul: matMul,
|
||
|
dot: dot,
|
||
|
outerProduct: outerProduct,
|
||
|
reverse: reverse,
|
||
|
reverse1d: reverse1d,
|
||
|
reverse2d: reverse2d,
|
||
|
reverse3d: reverse3d,
|
||
|
reverse4d: reverse4d,
|
||
|
maxPool: maxPool,
|
||
|
avgPool: avgPool,
|
||
|
slice: slice,
|
||
|
slice1d: slice1d,
|
||
|
slice2d: slice2d,
|
||
|
slice3d: slice3d,
|
||
|
slice4d: slice4d,
|
||
|
abs: abs,
|
||
|
acos: acos,
|
||
|
acosh: acosh,
|
||
|
asin: asin,
|
||
|
asinh: asinh,
|
||
|
atan: atan,
|
||
|
atanh: atanh,
|
||
|
ceil: ceil,
|
||
|
clipByValue: clipByValue,
|
||
|
cos: cos,
|
||
|
cosh: cosh,
|
||
|
erf: erf,
|
||
|
exp: exp,
|
||
|
expm1: expm1,
|
||
|
floor: floor,
|
||
|
log: log$1,
|
||
|
log1p: log1p,
|
||
|
logSigmoid: logSigmoid,
|
||
|
neg: neg,
|
||
|
reciprocal: reciprocal,
|
||
|
round: round,
|
||
|
rsqrt: rsqrt,
|
||
|
sigmoid: sigmoid,
|
||
|
sign: sign,
|
||
|
sin: sin,
|
||
|
sinh: sinh,
|
||
|
softplus: softplus,
|
||
|
sqrt: sqrt,
|
||
|
square: square,
|
||
|
step: step,
|
||
|
tan: tan,
|
||
|
tanh: tanh$1,
|
||
|
all: all,
|
||
|
any: any,
|
||
|
argMax: argMax,
|
||
|
argMin: argMin,
|
||
|
logSumExp: logSumExp,
|
||
|
max: max,
|
||
|
mean: mean,
|
||
|
min: min,
|
||
|
moments: moments,
|
||
|
sum: sum,
|
||
|
equal: equal,
|
||
|
equalStrict: equalStrict,
|
||
|
greater: greater,
|
||
|
greaterEqual: greaterEqual,
|
||
|
greaterEqualStrict: greaterEqualStrict,
|
||
|
greaterStrict: greaterStrict,
|
||
|
less: less,
|
||
|
lessEqual: lessEqual,
|
||
|
lessEqualStrict: lessEqualStrict,
|
||
|
lessStrict: lessStrict,
|
||
|
notEqual: notEqual,
|
||
|
notEqualStrict: notEqualStrict,
|
||
|
add: add,
|
||
|
addN: addN,
|
||
|
addStrict: addStrict,
|
||
|
atan2: atan2,
|
||
|
div: div,
|
||
|
divStrict: divStrict,
|
||
|
floorDiv: floorDiv,
|
||
|
maximum: maximum,
|
||
|
maximumStrict: maximumStrict,
|
||
|
minimum: minimum,
|
||
|
minimumStrict: minimumStrict,
|
||
|
mod: mod,
|
||
|
modStrict: modStrict,
|
||
|
mul: mul,
|
||
|
mulStrict: mulStrict,
|
||
|
pow: pow,
|
||
|
powStrict: powStrict,
|
||
|
squaredDifference: squaredDifference,
|
||
|
squaredDifferenceStrict: squaredDifferenceStrict,
|
||
|
sub: sub,
|
||
|
subStrict: subStrict,
|
||
|
elu: elu,
|
||
|
leakyRelu: leakyRelu,
|
||
|
prelu: prelu,
|
||
|
relu: relu,
|
||
|
selu: selu,
|
||
|
logicalAnd: logicalAnd,
|
||
|
logicalNot: logicalNot,
|
||
|
logicalOr: logicalOr,
|
||
|
logicalXor: logicalXor,
|
||
|
where: where,
|
||
|
whereAsync: whereAsync,
|
||
|
buffer: buffer,
|
||
|
toPixels: toPixels,
|
||
|
print: print,
|
||
|
batchToSpaceND: batchToSpaceND,
|
||
|
cast: cast,
|
||
|
clone: clone,
|
||
|
cumsum: cumsum,
|
||
|
depthToSpace: depthToSpace,
|
||
|
expandDims: expandDims,
|
||
|
eye: eye,
|
||
|
fromPixels: fromPixels,
|
||
|
multinomial: multinomial,
|
||
|
oneHot: oneHot,
|
||
|
pad: pad,
|
||
|
pad1d: pad1d,
|
||
|
pad2d: pad2d,
|
||
|
pad3d: pad3d,
|
||
|
pad4d: pad4d,
|
||
|
rand: rand,
|
||
|
randomNormal: randomNormal,
|
||
|
randomUniform: randomUniform,
|
||
|
reshape: reshape,
|
||
|
spaceToBatchND: spaceToBatchND,
|
||
|
squeeze: squeeze,
|
||
|
stack: stack,
|
||
|
tile: tile,
|
||
|
truncatedNormal: truncatedNormal,
|
||
|
unstack: unstack,
|
||
|
fill: fill,
|
||
|
linspace: linspace,
|
||
|
ones: ones$1,
|
||
|
range: range,
|
||
|
scalar: scalar,
|
||
|
tensor: tensor,
|
||
|
tensor1d: tensor1d,
|
||
|
tensor2d: tensor2d,
|
||
|
tensor3d: tensor3d,
|
||
|
tensor4d: tensor4d,
|
||
|
tensor5d: tensor5d,
|
||
|
tensor6d: tensor6d,
|
||
|
zeros: zeros,
|
||
|
onesLike: onesLike,
|
||
|
zerosLike: zerosLike,
|
||
|
transpose: transpose,
|
||
|
softmax: softmax,
|
||
|
localResponseNormalization: localResponseNormalization,
|
||
|
norm: norm,
|
||
|
gather: gather,
|
||
|
unsortedSegmentSum: unsortedSegmentSum,
|
||
|
basicLSTMCell: basicLSTMCell,
|
||
|
multiRNNCell: multiRNNCell,
|
||
|
movingAverage: movingAverage,
|
||
|
stridedSlice: stridedSlice,
|
||
|
topk: topk,
|
||
|
train: train,
|
||
|
tidy: tidy,
|
||
|
keep: keep,
|
||
|
dispose: dispose,
|
||
|
time: time,
|
||
|
profile: profile,
|
||
|
customGrad: customGrad,
|
||
|
grad: grad,
|
||
|
grads: grads,
|
||
|
valueAndGrad: valueAndGrad,
|
||
|
valueAndGrads: valueAndGrads,
|
||
|
variableGrads: variableGrads
|
||
|
});
|
||
|
|
||
|
/*! *****************************************************************************
|
||
|
Copyright (c) Microsoft Corporation. All rights reserved.
|
||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||
|
this file except in compliance with the License. You may obtain a copy of the
|
||
|
License at http://www.apache.org/licenses/LICENSE-2.0
|
||
|
|
||
|
THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||
|
KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
|
||
|
WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
|
||
|
MERCHANTABLITY OR NON-INFRINGEMENT.
|
||
|
|
||
|
See the Apache Version 2.0 License for specific language governing permissions
|
||
|
and limitations under the License.
|
||
|
***************************************************************************** */
|
||
|
/* global Reflect, Promise */
|
||
|
|
||
|
var extendStatics$1 = function(d, b) {
|
||
|
extendStatics$1 = Object.setPrototypeOf ||
|
||
|
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||
|
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
|
||
|
return extendStatics$1(d, b);
|
||
|
};
|
||
|
|
||
|
function __extends$1(d, b) {
|
||
|
extendStatics$1(d, b);
|
||
|
function __() { this.constructor = d; }
|
||
|
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||
|
}
|
||
|
|
||
|
var __assign$1 = function() {
|
||
|
__assign$1 = Object.assign || function __assign(t) {
|
||
|
for (var s, i = 1, n = arguments.length; i < n; i++) {
|
||
|
s = arguments[i];
|
||
|
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];
|
||
|
}
|
||
|
return t;
|
||
|
};
|
||
|
return __assign$1.apply(this, arguments);
|
||
|
};
|
||
|
|
||
|
function __awaiter$1(thisArg, _arguments, P, generator) {
|
||
|
return new (P || (P = Promise))(function (resolve, reject) {
|
||
|
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||
|
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||
|
function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
|
||
|
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function __generator$1(thisArg, body) {
|
||
|
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
|
||
|
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
|
||
|
function verb(n) { return function (v) { return step([n, v]); }; }
|
||
|
function step(op) {
|
||
|
if (f) throw new TypeError("Generator is already executing.");
|
||
|
while (_) try {
|
||
|
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
|
||
|
if (y = 0, t) op = [op[0] & 2, t.value];
|
||
|
switch (op[0]) {
|
||
|
case 0: case 1: t = op; break;
|
||
|
case 4: _.label++; return { value: op[1], done: false };
|
||
|
case 5: _.label++; y = op[1]; op = [0]; continue;
|
||
|
case 7: op = _.ops.pop(); _.trys.pop(); continue;
|
||
|
default:
|
||
|
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
|
||
|
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
|
||
|
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
|
||
|
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
|
||
|
if (t[2]) _.ops.pop();
|
||
|
_.trys.pop(); continue;
|
||
|
}
|
||
|
op = body.call(thisArg, _);
|
||
|
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
|
||
|
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
|
||
|
}
|
||
|
}
|
||
|
|
||
|
var commonjsGlobal$1 = typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {};
|
||
|
|
||
|
function createCommonjsModule$1(fn, module) {
|
||
|
return module = { exports: {} }, fn(module, module.exports), module.exports;
|
||
|
}
|
||
|
|
||
|
var isBuffer = function isBuffer(arg) {
|
||
|
return arg instanceof Buffer;
|
||
|
};
|
||
|
|
||
|
var inherits_browser = createCommonjsModule$1(function (module) {
|
||
|
if (typeof Object.create === 'function') {
|
||
|
// implementation from standard node.js 'util' module
|
||
|
module.exports = function inherits(ctor, superCtor) {
|
||
|
ctor.super_ = superCtor;
|
||
|
ctor.prototype = Object.create(superCtor.prototype, {
|
||
|
constructor: {
|
||
|
value: ctor,
|
||
|
enumerable: false,
|
||
|
writable: true,
|
||
|
configurable: true
|
||
|
}
|
||
|
});
|
||
|
};
|
||
|
} else {
|
||
|
// old school shim for old browsers
|
||
|
module.exports = function inherits(ctor, superCtor) {
|
||
|
ctor.super_ = superCtor;
|
||
|
var TempCtor = function () {};
|
||
|
TempCtor.prototype = superCtor.prototype;
|
||
|
ctor.prototype = new TempCtor();
|
||
|
ctor.prototype.constructor = ctor;
|
||
|
};
|
||
|
}
|
||
|
});
|
||
|
|
||
|
var inherits = createCommonjsModule$1(function (module) {
|
||
|
try {
|
||
|
var util = util$1;
|
||
|
if (typeof util.inherits !== 'function') throw '';
|
||
|
module.exports = util.inherits;
|
||
|
} catch (e) {
|
||
|
module.exports = inherits_browser;
|
||
|
}
|
||
|
});
|
||
|
|
||
|
var util$1 = createCommonjsModule$1(function (module, exports) {
|
||
|
// Copyright Joyent, Inc. and other Node contributors.
|
||
|
//
|
||
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
||
|
// copy of this software and associated documentation files (the
|
||
|
// "Software"), to deal in the Software without restriction, including
|
||
|
// without limitation the rights to use, copy, modify, merge, publish,
|
||
|
// distribute, sublicense, and/or sell copies of the Software, and to permit
|
||
|
// persons to whom the Software is furnished to do so, subject to the
|
||
|
// following conditions:
|
||
|
//
|
||
|
// The above copyright notice and this permission notice shall be included
|
||
|
// in all copies or substantial portions of the Software.
|
||
|
//
|
||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||
|
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
|
||
|
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||
|
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||
|
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||
|
// USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||
|
|
||
|
var formatRegExp = /%[sdj%]/g;
|
||
|
exports.format = function(f) {
|
||
|
if (!isString(f)) {
|
||
|
var objects = [];
|
||
|
for (var i = 0; i < arguments.length; i++) {
|
||
|
objects.push(inspect(arguments[i]));
|
||
|
}
|
||
|
return objects.join(' ');
|
||
|
}
|
||
|
|
||
|
var i = 1;
|
||
|
var args = arguments;
|
||
|
var len = args.length;
|
||
|
var str = String(f).replace(formatRegExp, function(x) {
|
||
|
if (x === '%%') return '%';
|
||
|
if (i >= len) return x;
|
||
|
switch (x) {
|
||
|
case '%s': return String(args[i++]);
|
||
|
case '%d': return Number(args[i++]);
|
||
|
case '%j':
|
||
|
try {
|
||
|
return JSON.stringify(args[i++]);
|
||
|
} catch (_) {
|
||
|
return '[Circular]';
|
||
|
}
|
||
|
default:
|
||
|
return x;
|
||
|
}
|
||
|
});
|
||
|
for (var x = args[i]; i < len; x = args[++i]) {
|
||
|
if (isNull(x) || !isObject(x)) {
|
||
|
str += ' ' + x;
|
||
|
} else {
|
||
|
str += ' ' + inspect(x);
|
||
|
}
|
||
|
}
|
||
|
return str;
|
||
|
};
|
||
|
|
||
|
|
||
|
// Mark that a method should not be used.
|
||
|
// Returns a modified function which warns once by default.
|
||
|
// If --no-deprecation is set, then it is a no-op.
|
||
|
exports.deprecate = function(fn, msg) {
|
||
|
// Allow for deprecating things in the process of starting up.
|
||
|
if (isUndefined(commonjsGlobal$1.process)) {
|
||
|
return function() {
|
||
|
return exports.deprecate(fn, msg).apply(this, arguments);
|
||
|
};
|
||
|
}
|
||
|
|
||
|
if (process.noDeprecation === true) {
|
||
|
return fn;
|
||
|
}
|
||
|
|
||
|
var warned = false;
|
||
|
function deprecated() {
|
||
|
if (!warned) {
|
||
|
if (process.throwDeprecation) {
|
||
|
throw new Error(msg);
|
||
|
} else if (process.traceDeprecation) {
|
||
|
console.trace(msg);
|
||
|
} else {
|
||
|
console.error(msg);
|
||
|
}
|
||
|
warned = true;
|
||
|
}
|
||
|
return fn.apply(this, arguments);
|
||
|
}
|
||
|
|
||
|
return deprecated;
|
||
|
};
|
||
|
|
||
|
|
||
|
var debugs = {};
|
||
|
var debugEnviron;
|
||
|
exports.debuglog = function(set) {
|
||
|
if (isUndefined(debugEnviron))
|
||
|
debugEnviron = process.env.NODE_DEBUG || '';
|
||
|
set = set.toUpperCase();
|
||
|
if (!debugs[set]) {
|
||
|
if (new RegExp('\\b' + set + '\\b', 'i').test(debugEnviron)) {
|
||
|
var pid = process.pid;
|
||
|
debugs[set] = function() {
|
||
|
var msg = exports.format.apply(exports, arguments);
|
||
|
console.error('%s %d: %s', set, pid, msg);
|
||
|
};
|
||
|
} else {
|
||
|
debugs[set] = function() {};
|
||
|
}
|
||
|
}
|
||
|
return debugs[set];
|
||
|
};
|
||
|
|
||
|
|
||
|
/**
|
||
|
* Echos the value of a value. Trys to print the value out
|
||
|
* in the best way possible given the different types.
|
||
|
*
|
||
|
* @param {Object} obj The object to print out.
|
||
|
* @param {Object} opts Optional options object that alters the output.
|
||
|
*/
|
||
|
/* legacy: obj, showHidden, depth, colors*/
|
||
|
function inspect(obj, opts) {
|
||
|
// default options
|
||
|
var ctx = {
|
||
|
seen: [],
|
||
|
stylize: stylizeNoColor
|
||
|
};
|
||
|
// legacy...
|
||
|
if (arguments.length >= 3) ctx.depth = arguments[2];
|
||
|
if (arguments.length >= 4) ctx.colors = arguments[3];
|
||
|
if (isBoolean(opts)) {
|
||
|
// legacy...
|
||
|
ctx.showHidden = opts;
|
||
|
} else if (opts) {
|
||
|
// got an "options" object
|
||
|
exports._extend(ctx, opts);
|
||
|
}
|
||
|
// set default options
|
||
|
if (isUndefined(ctx.showHidden)) ctx.showHidden = false;
|
||
|
if (isUndefined(ctx.depth)) ctx.depth = 2;
|
||
|
if (isUndefined(ctx.colors)) ctx.colors = false;
|
||
|
if (isUndefined(ctx.customInspect)) ctx.customInspect = true;
|
||
|
if (ctx.colors) ctx.stylize = stylizeWithColor;
|
||
|
return formatValue(ctx, obj, ctx.depth);
|
||
|
}
|
||
|
exports.inspect = inspect;
|
||
|
|
||
|
|
||
|
// http://en.wikipedia.org/wiki/ANSI_escape_code#graphics
|
||
|
inspect.colors = {
|
||
|
'bold' : [1, 22],
|
||
|
'italic' : [3, 23],
|
||
|
'underline' : [4, 24],
|
||
|
'inverse' : [7, 27],
|
||
|
'white' : [37, 39],
|
||
|
'grey' : [90, 39],
|
||
|
'black' : [30, 39],
|
||
|
'blue' : [34, 39],
|
||
|
'cyan' : [36, 39],
|
||
|
'green' : [32, 39],
|
||
|
'magenta' : [35, 39],
|
||
|
'red' : [31, 39],
|
||
|
'yellow' : [33, 39]
|
||
|
};
|
||
|
|
||
|
// Don't use 'blue' not visible on cmd.exe
|
||
|
inspect.styles = {
|
||
|
'special': 'cyan',
|
||
|
'number': 'yellow',
|
||
|
'boolean': 'yellow',
|
||
|
'undefined': 'grey',
|
||
|
'null': 'bold',
|
||
|
'string': 'green',
|
||
|
'date': 'magenta',
|
||
|
// "name": intentionally not styling
|
||
|
'regexp': 'red'
|
||
|
};
|
||
|
|
||
|
|
||
|
function stylizeWithColor(str, styleType) {
|
||
|
var style = inspect.styles[styleType];
|
||
|
|
||
|
if (style) {
|
||
|
return '\u001b[' + inspect.colors[style][0] + 'm' + str +
|
||
|
'\u001b[' + inspect.colors[style][1] + 'm';
|
||
|
} else {
|
||
|
return str;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
|
||
|
function stylizeNoColor(str, styleType) {
|
||
|
return str;
|
||
|
}
|
||
|
|
||
|
|
||
|
function arrayToHash(array) {
|
||
|
var hash = {};
|
||
|
|
||
|
array.forEach(function(val, idx) {
|
||
|
hash[val] = true;
|
||
|
});
|
||
|
|
||
|
return hash;
|
||
|
}
|
||
|
|
||
|
|
||
|
function formatValue(ctx, value, recurseTimes) {
|
||
|
// Provide a hook for user-specified inspect functions.
|
||
|
// Check that value is an object with an inspect function on it
|
||
|
if (ctx.customInspect &&
|
||
|
value &&
|
||
|
isFunction(value.inspect) &&
|
||
|
// Filter out the util module, it's inspect function is special
|
||
|
value.inspect !== exports.inspect &&
|
||
|
// Also filter out any prototype objects using the circular check.
|
||
|
!(value.constructor && value.constructor.prototype === value)) {
|
||
|
var ret = value.inspect(recurseTimes, ctx);
|
||
|
if (!isString(ret)) {
|
||
|
ret = formatValue(ctx, ret, recurseTimes);
|
||
|
}
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
// Primitive types cannot have properties
|
||
|
var primitive = formatPrimitive(ctx, value);
|
||
|
if (primitive) {
|
||
|
return primitive;
|
||
|
}
|
||
|
|
||
|
// Look up the keys of the object.
|
||
|
var keys = Object.keys(value);
|
||
|
var visibleKeys = arrayToHash(keys);
|
||
|
|
||
|
if (ctx.showHidden) {
|
||
|
keys = Object.getOwnPropertyNames(value);
|
||
|
}
|
||
|
|
||
|
// IE doesn't make error fields non-enumerable
|
||
|
// http://msdn.microsoft.com/en-us/library/ie/dww52sbt(v=vs.94).aspx
|
||
|
if (isError(value)
|
||
|
&& (keys.indexOf('message') >= 0 || keys.indexOf('description') >= 0)) {
|
||
|
return formatError(value);
|
||
|
}
|
||
|
|
||
|
// Some type of object without properties can be shortcutted.
|
||
|
if (keys.length === 0) {
|
||
|
if (isFunction(value)) {
|
||
|
var name = value.name ? ': ' + value.name : '';
|
||
|
return ctx.stylize('[Function' + name + ']', 'special');
|
||
|
}
|
||
|
if (isRegExp(value)) {
|
||
|
return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp');
|
||
|
}
|
||
|
if (isDate(value)) {
|
||
|
return ctx.stylize(Date.prototype.toString.call(value), 'date');
|
||
|
}
|
||
|
if (isError(value)) {
|
||
|
return formatError(value);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
var base = '', array = false, braces = ['{', '}'];
|
||
|
|
||
|
// Make Array say that they are Array
|
||
|
if (isArray(value)) {
|
||
|
array = true;
|
||
|
braces = ['[', ']'];
|
||
|
}
|
||
|
|
||
|
// Make functions say that they are functions
|
||
|
if (isFunction(value)) {
|
||
|
var n = value.name ? ': ' + value.name : '';
|
||
|
base = ' [Function' + n + ']';
|
||
|
}
|
||
|
|
||
|
// Make RegExps say that they are RegExps
|
||
|
if (isRegExp(value)) {
|
||
|
base = ' ' + RegExp.prototype.toString.call(value);
|
||
|
}
|
||
|
|
||
|
// Make dates with properties first say the date
|
||
|
if (isDate(value)) {
|
||
|
base = ' ' + Date.prototype.toUTCString.call(value);
|
||
|
}
|
||
|
|
||
|
// Make error with message first say the error
|
||
|
if (isError(value)) {
|
||
|
base = ' ' + formatError(value);
|
||
|
}
|
||
|
|
||
|
if (keys.length === 0 && (!array || value.length == 0)) {
|
||
|
return braces[0] + base + braces[1];
|
||
|
}
|
||
|
|
||
|
if (recurseTimes < 0) {
|
||
|
if (isRegExp(value)) {
|
||
|
return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp');
|
||
|
} else {
|
||
|
return ctx.stylize('[Object]', 'special');
|
||
|
}
|
||
|
}
|
||
|
|
||
|
ctx.seen.push(value);
|
||
|
|
||
|
var output;
|
||
|
if (array) {
|
||
|
output = formatArray(ctx, value, recurseTimes, visibleKeys, keys);
|
||
|
} else {
|
||
|
output = keys.map(function(key) {
|
||
|
return formatProperty(ctx, value, recurseTimes, visibleKeys, key, array);
|
||
|
});
|
||
|
}
|
||
|
|
||
|
ctx.seen.pop();
|
||
|
|
||
|
return reduceToSingleString(output, base, braces);
|
||
|
}
|
||
|
|
||
|
|
||
|
function formatPrimitive(ctx, value) {
|
||
|
if (isUndefined(value))
|
||
|
return ctx.stylize('undefined', 'undefined');
|
||
|
if (isString(value)) {
|
||
|
var simple = '\'' + JSON.stringify(value).replace(/^"|"$/g, '')
|
||
|
.replace(/'/g, "\\'")
|
||
|
.replace(/\\"/g, '"') + '\'';
|
||
|
return ctx.stylize(simple, 'string');
|
||
|
}
|
||
|
if (isNumber(value))
|
||
|
return ctx.stylize('' + value, 'number');
|
||
|
if (isBoolean(value))
|
||
|
return ctx.stylize('' + value, 'boolean');
|
||
|
// For some reason typeof null is "object", so special case here.
|
||
|
if (isNull(value))
|
||
|
return ctx.stylize('null', 'null');
|
||
|
}
|
||
|
|
||
|
|
||
|
function formatError(value) {
|
||
|
return '[' + Error.prototype.toString.call(value) + ']';
|
||
|
}
|
||
|
|
||
|
|
||
|
function formatArray(ctx, value, recurseTimes, visibleKeys, keys) {
|
||
|
var output = [];
|
||
|
for (var i = 0, l = value.length; i < l; ++i) {
|
||
|
if (hasOwnProperty(value, String(i))) {
|
||
|
output.push(formatProperty(ctx, value, recurseTimes, visibleKeys,
|
||
|
String(i), true));
|
||
|
} else {
|
||
|
output.push('');
|
||
|
}
|
||
|
}
|
||
|
keys.forEach(function(key) {
|
||
|
if (!key.match(/^\d+$/)) {
|
||
|
output.push(formatProperty(ctx, value, recurseTimes, visibleKeys,
|
||
|
key, true));
|
||
|
}
|
||
|
});
|
||
|
return output;
|
||
|
}
|
||
|
|
||
|
|
||
|
function formatProperty(ctx, value, recurseTimes, visibleKeys, key, array) {
|
||
|
var name, str, desc;
|
||
|
desc = Object.getOwnPropertyDescriptor(value, key) || { value: value[key] };
|
||
|
if (desc.get) {
|
||
|
if (desc.set) {
|
||
|
str = ctx.stylize('[Getter/Setter]', 'special');
|
||
|
} else {
|
||
|
str = ctx.stylize('[Getter]', 'special');
|
||
|
}
|
||
|
} else {
|
||
|
if (desc.set) {
|
||
|
str = ctx.stylize('[Setter]', 'special');
|
||
|
}
|
||
|
}
|
||
|
if (!hasOwnProperty(visibleKeys, key)) {
|
||
|
name = '[' + key + ']';
|
||
|
}
|
||
|
if (!str) {
|
||
|
if (ctx.seen.indexOf(desc.value) < 0) {
|
||
|
if (isNull(recurseTimes)) {
|
||
|
str = formatValue(ctx, desc.value, null);
|
||
|
} else {
|
||
|
str = formatValue(ctx, desc.value, recurseTimes - 1);
|
||
|
}
|
||
|
if (str.indexOf('\n') > -1) {
|
||
|
if (array) {
|
||
|
str = str.split('\n').map(function(line) {
|
||
|
return ' ' + line;
|
||
|
}).join('\n').substr(2);
|
||
|
} else {
|
||
|
str = '\n' + str.split('\n').map(function(line) {
|
||
|
return ' ' + line;
|
||
|
}).join('\n');
|
||
|
}
|
||
|
}
|
||
|
} else {
|
||
|
str = ctx.stylize('[Circular]', 'special');
|
||
|
}
|
||
|
}
|
||
|
if (isUndefined(name)) {
|
||
|
if (array && key.match(/^\d+$/)) {
|
||
|
return str;
|
||
|
}
|
||
|
name = JSON.stringify('' + key);
|
||
|
if (name.match(/^"([a-zA-Z_][a-zA-Z_0-9]*)"$/)) {
|
||
|
name = name.substr(1, name.length - 2);
|
||
|
name = ctx.stylize(name, 'name');
|
||
|
} else {
|
||
|
name = name.replace(/'/g, "\\'")
|
||
|
.replace(/\\"/g, '"')
|
||
|
.replace(/(^"|"$)/g, "'");
|
||
|
name = ctx.stylize(name, 'string');
|
||
|
}
|
||
|
}
|
||
|
|
||
|
return name + ': ' + str;
|
||
|
}
|
||
|
|
||
|
|
||
|
function reduceToSingleString(output, base, braces) {
|
||
|
var length = output.reduce(function(prev, cur) {
|
||
|
if (cur.indexOf('\n') >= 0) ;
|
||
|
return prev + cur.replace(/\u001b\[\d\d?m/g, '').length + 1;
|
||
|
}, 0);
|
||
|
|
||
|
if (length > 60) {
|
||
|
return braces[0] +
|
||
|
(base === '' ? '' : base + '\n ') +
|
||
|
' ' +
|
||
|
output.join(',\n ') +
|
||
|
' ' +
|
||
|
braces[1];
|
||
|
}
|
||
|
|
||
|
return braces[0] + base + ' ' + output.join(', ') + ' ' + braces[1];
|
||
|
}
|
||
|
|
||
|
|
||
|
// NOTE: These type checking functions intentionally don't use `instanceof`
|
||
|
// because it is fragile and can be easily faked with `Object.create()`.
|
||
|
function isArray(ar) {
|
||
|
return Array.isArray(ar);
|
||
|
}
|
||
|
exports.isArray = isArray;
|
||
|
|
||
|
function isBoolean(arg) {
|
||
|
return typeof arg === 'boolean';
|
||
|
}
|
||
|
exports.isBoolean = isBoolean;
|
||
|
|
||
|
function isNull(arg) {
|
||
|
return arg === null;
|
||
|
}
|
||
|
exports.isNull = isNull;
|
||
|
|
||
|
function isNullOrUndefined(arg) {
|
||
|
return arg == null;
|
||
|
}
|
||
|
exports.isNullOrUndefined = isNullOrUndefined;
|
||
|
|
||
|
function isNumber(arg) {
|
||
|
return typeof arg === 'number';
|
||
|
}
|
||
|
exports.isNumber = isNumber;
|
||
|
|
||
|
function isString(arg) {
|
||
|
return typeof arg === 'string';
|
||
|
}
|
||
|
exports.isString = isString;
|
||
|
|
||
|
function isSymbol(arg) {
|
||
|
return typeof arg === 'symbol';
|
||
|
}
|
||
|
exports.isSymbol = isSymbol;
|
||
|
|
||
|
function isUndefined(arg) {
|
||
|
return arg === void 0;
|
||
|
}
|
||
|
exports.isUndefined = isUndefined;
|
||
|
|
||
|
function isRegExp(re) {
|
||
|
return isObject(re) && objectToString(re) === '[object RegExp]';
|
||
|
}
|
||
|
exports.isRegExp = isRegExp;
|
||
|
|
||
|
function isObject(arg) {
|
||
|
return typeof arg === 'object' && arg !== null;
|
||
|
}
|
||
|
exports.isObject = isObject;
|
||
|
|
||
|
function isDate(d) {
|
||
|
return isObject(d) && objectToString(d) === '[object Date]';
|
||
|
}
|
||
|
exports.isDate = isDate;
|
||
|
|
||
|
function isError(e) {
|
||
|
return isObject(e) &&
|
||
|
(objectToString(e) === '[object Error]' || e instanceof Error);
|
||
|
}
|
||
|
exports.isError = isError;
|
||
|
|
||
|
function isFunction(arg) {
|
||
|
return typeof arg === 'function';
|
||
|
}
|
||
|
exports.isFunction = isFunction;
|
||
|
|
||
|
function isPrimitive(arg) {
|
||
|
return arg === null ||
|
||
|
typeof arg === 'boolean' ||
|
||
|
typeof arg === 'number' ||
|
||
|
typeof arg === 'string' ||
|
||
|
typeof arg === 'symbol' || // ES6 symbol
|
||
|
typeof arg === 'undefined';
|
||
|
}
|
||
|
exports.isPrimitive = isPrimitive;
|
||
|
|
||
|
exports.isBuffer = isBuffer;
|
||
|
|
||
|
function objectToString(o) {
|
||
|
return Object.prototype.toString.call(o);
|
||
|
}
|
||
|
|
||
|
|
||
|
function pad(n) {
|
||
|
return n < 10 ? '0' + n.toString(10) : n.toString(10);
|
||
|
}
|
||
|
|
||
|
|
||
|
var months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
|
||
|
'Oct', 'Nov', 'Dec'];
|
||
|
|
||
|
// 26 Feb 16:19:34
|
||
|
function timestamp() {
|
||
|
var d = new Date();
|
||
|
var time = [pad(d.getHours()),
|
||
|
pad(d.getMinutes()),
|
||
|
pad(d.getSeconds())].join(':');
|
||
|
return [d.getDate(), months[d.getMonth()], time].join(' ');
|
||
|
}
|
||
|
|
||
|
|
||
|
// log is just a thin wrapper to console.log that prepends a timestamp
|
||
|
exports.log = function() {
|
||
|
console.log('%s - %s', timestamp(), exports.format.apply(exports, arguments));
|
||
|
};
|
||
|
|
||
|
|
||
|
/**
|
||
|
* Inherit the prototype methods from one constructor into another.
|
||
|
*
|
||
|
* The Function.prototype.inherits from lang.js rewritten as a standalone
|
||
|
* function (not on Function.prototype). NOTE: If this file is to be loaded
|
||
|
* during bootstrapping this function needs to be rewritten using some native
|
||
|
* functions as prototype setup using normal JavaScript does not work as
|
||
|
* expected during bootstrapping (see mirror.js in r114903).
|
||
|
*
|
||
|
* @param {function} ctor Constructor function which needs to inherit the
|
||
|
* prototype.
|
||
|
* @param {function} superCtor Constructor function to inherit prototype from.
|
||
|
*/
|
||
|
exports.inherits = inherits;
|
||
|
|
||
|
exports._extend = function(origin, add) {
|
||
|
// Don't do anything if add isn't an object
|
||
|
if (!add || !isObject(add)) return origin;
|
||
|
|
||
|
var keys = Object.keys(add);
|
||
|
var i = keys.length;
|
||
|
while (i--) {
|
||
|
origin[keys[i]] = add[keys[i]];
|
||
|
}
|
||
|
return origin;
|
||
|
};
|
||
|
|
||
|
function hasOwnProperty(obj, prop) {
|
||
|
return Object.prototype.hasOwnProperty.call(obj, prop);
|
||
|
}
|
||
|
});
|
||
|
var util_1 = util$1.format;
|
||
|
var util_2 = util$1.deprecate;
|
||
|
var util_3 = util$1.debuglog;
|
||
|
var util_4 = util$1.inspect;
|
||
|
var util_5 = util$1.isArray;
|
||
|
var util_6 = util$1.isBoolean;
|
||
|
var util_7 = util$1.isNull;
|
||
|
var util_8 = util$1.isNullOrUndefined;
|
||
|
var util_9 = util$1.isNumber;
|
||
|
var util_10 = util$1.isString;
|
||
|
var util_11 = util$1.isSymbol;
|
||
|
var util_12 = util$1.isUndefined;
|
||
|
var util_13 = util$1.isRegExp;
|
||
|
var util_14 = util$1.isObject;
|
||
|
var util_15 = util$1.isDate;
|
||
|
var util_16 = util$1.isError;
|
||
|
var util_17 = util$1.isFunction;
|
||
|
var util_18 = util$1.isPrimitive;
|
||
|
var util_19 = util$1.isBuffer;
|
||
|
var util_20 = util$1.log;
|
||
|
var util_21 = util$1.inherits;
|
||
|
var util_22 = util$1._extend;
|
||
|
|
||
|
var Dimensions = /** @class */ (function () {
|
||
|
function Dimensions(width, height) {
|
||
|
if (!util_9(width) || !util_9(height)) {
|
||
|
throw new Error("Dimensions.constructor - expected width and height to be valid numbers, instead have " + JSON.stringify({ width: width, height: height }));
|
||
|
}
|
||
|
this._width = width;
|
||
|
this._height = height;
|
||
|
}
|
||
|
Object.defineProperty(Dimensions.prototype, "width", {
|
||
|
get: function () { return this._width; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(Dimensions.prototype, "height", {
|
||
|
get: function () { return this._height; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Dimensions.prototype.reverse = function () {
|
||
|
return new Dimensions(1 / this.width, 1 / this.height);
|
||
|
};
|
||
|
return Dimensions;
|
||
|
}());
|
||
|
|
||
|
var Point = /** @class */ (function () {
|
||
|
function Point(x, y) {
|
||
|
this._x = x;
|
||
|
this._y = y;
|
||
|
}
|
||
|
Object.defineProperty(Point.prototype, "x", {
|
||
|
get: function () { return this._x; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(Point.prototype, "y", {
|
||
|
get: function () { return this._y; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Point.prototype.add = function (pt) {
|
||
|
return new Point(this.x + pt.x, this.y + pt.y);
|
||
|
};
|
||
|
Point.prototype.sub = function (pt) {
|
||
|
return new Point(this.x - pt.x, this.y - pt.y);
|
||
|
};
|
||
|
Point.prototype.mul = function (pt) {
|
||
|
return new Point(this.x * pt.x, this.y * pt.y);
|
||
|
};
|
||
|
Point.prototype.div = function (pt) {
|
||
|
return new Point(this.x / pt.x, this.y / pt.y);
|
||
|
};
|
||
|
Point.prototype.abs = function () {
|
||
|
return new Point(Math.abs(this.x), Math.abs(this.y));
|
||
|
};
|
||
|
Point.prototype.magnitude = function () {
|
||
|
return Math.sqrt(Math.pow(this.x, 2) + Math.pow(this.y, 2));
|
||
|
};
|
||
|
Point.prototype.floor = function () {
|
||
|
return new Point(Math.floor(this.x), Math.floor(this.y));
|
||
|
};
|
||
|
return Point;
|
||
|
}());
|
||
|
|
||
|
function isTensor(tensor$$1, dim) {
|
||
|
return tensor$$1 instanceof Tensor && tensor$$1.shape.length === dim;
|
||
|
}
|
||
|
function isTensor1D(tensor$$1) {
|
||
|
return isTensor(tensor$$1, 1);
|
||
|
}
|
||
|
function isTensor2D(tensor$$1) {
|
||
|
return isTensor(tensor$$1, 2);
|
||
|
}
|
||
|
function isTensor3D(tensor$$1) {
|
||
|
return isTensor(tensor$$1, 3);
|
||
|
}
|
||
|
function isTensor4D(tensor$$1) {
|
||
|
return isTensor(tensor$$1, 4);
|
||
|
}
|
||
|
function isFloat(num) {
|
||
|
return num % 1 !== 0;
|
||
|
}
|
||
|
function isEven(num) {
|
||
|
return num % 2 === 0;
|
||
|
}
|
||
|
function round$1(num, prec) {
|
||
|
if (prec === void 0) { prec = 2; }
|
||
|
var f = Math.pow(10, prec);
|
||
|
return Math.floor(num * f) / f;
|
||
|
}
|
||
|
function isDimensions(obj) {
|
||
|
return obj && obj.width && obj.height;
|
||
|
}
|
||
|
function computeReshapedDimensions(_a, inputSize) {
|
||
|
var width = _a.width, height = _a.height;
|
||
|
var scale = inputSize / Math.max(height, width);
|
||
|
return new Dimensions(Math.round(width * scale), Math.round(height * scale));
|
||
|
}
|
||
|
function getCenterPoint(pts) {
|
||
|
return pts.reduce(function (sum$$1, pt) { return sum$$1.add(pt); }, new Point(0, 0))
|
||
|
.div(new Point(pts.length, pts.length));
|
||
|
}
|
||
|
function range$1(num, start, step$$1) {
|
||
|
return Array(num).fill(0).map(function (_, i) { return start + (i * step$$1); });
|
||
|
}
|
||
|
function isValidNumber(num) {
|
||
|
return !!num && num !== Infinity && num !== -Infinity && !isNaN(num) || num === 0;
|
||
|
}
|
||
|
function isValidProbablitiy(num) {
|
||
|
return isValidNumber(num) && 0 <= num && num <= 1.0;
|
||
|
}
|
||
|
|
||
|
var Box = /** @class */ (function () {
|
||
|
// TODO: MTCNN boxes sometimes have negative widths or heights, figure out why and remove
|
||
|
// allowNegativeDimensions flag again
|
||
|
function Box(_box, allowNegativeDimensions) {
|
||
|
if (allowNegativeDimensions === void 0) { allowNegativeDimensions = false; }
|
||
|
var box = (_box || {});
|
||
|
var isBbox = [box.left, box.top, box.right, box.bottom].every(isValidNumber);
|
||
|
var isRect = [box.x, box.y, box.width, box.height].every(isValidNumber);
|
||
|
if (!isRect && !isBbox) {
|
||
|
throw new Error("Box.constructor - expected box to be IBoundingBox | IRect, instead have " + JSON.stringify(box));
|
||
|
}
|
||
|
var _a = isRect
|
||
|
? [box.x, box.y, box.width, box.height]
|
||
|
: [box.left, box.top, box.right - box.left, box.bottom - box.top], x = _a[0], y = _a[1], width = _a[2], height = _a[3];
|
||
|
Box.assertIsValidBox({ x: x, y: y, width: width, height: height }, 'Box.constructor', allowNegativeDimensions);
|
||
|
this._x = x;
|
||
|
this._y = y;
|
||
|
this._width = width;
|
||
|
this._height = height;
|
||
|
}
|
||
|
Box.isRect = function (rect) {
|
||
|
return !!rect && [rect.x, rect.y, rect.width, rect.height].every(isValidNumber);
|
||
|
};
|
||
|
Box.assertIsValidBox = function (box, callee, allowNegativeDimensions) {
|
||
|
if (allowNegativeDimensions === void 0) { allowNegativeDimensions = false; }
|
||
|
if (!Box.isRect(box)) {
|
||
|
throw new Error(callee + " - invalid box: " + JSON.stringify(box) + ", expected object with properties x, y, width, height");
|
||
|
}
|
||
|
if (!allowNegativeDimensions && (box.width < 0 || box.height < 0)) {
|
||
|
throw new Error(callee + " - width (" + box.width + ") and height (" + box.height + ") must be positive numbers");
|
||
|
}
|
||
|
};
|
||
|
Object.defineProperty(Box.prototype, "x", {
|
||
|
get: function () { return this._x; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(Box.prototype, "y", {
|
||
|
get: function () { return this._y; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(Box.prototype, "width", {
|
||
|
get: function () { return this._width; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(Box.prototype, "height", {
|
||
|
get: function () { return this._height; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(Box.prototype, "left", {
|
||
|
get: function () { return this.x; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(Box.prototype, "top", {
|
||
|
get: function () { return this.y; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(Box.prototype, "right", {
|
||
|
get: function () { return this.x + this.width; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(Box.prototype, "bottom", {
|
||
|
get: function () { return this.y + this.height; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(Box.prototype, "area", {
|
||
|
get: function () { return this.width * this.height; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Box.prototype.round = function () {
|
||
|
var _a = [this.x, this.y, this.width, this.height]
|
||
|
.map(function (val) { return Math.round(val); }), x = _a[0], y = _a[1], width = _a[2], height = _a[3];
|
||
|
return new Box({ x: x, y: y, width: width, height: height });
|
||
|
};
|
||
|
Box.prototype.floor = function () {
|
||
|
var _a = [this.x, this.y, this.width, this.height]
|
||
|
.map(function (val) { return Math.floor(val); }), x = _a[0], y = _a[1], width = _a[2], height = _a[3];
|
||
|
return new Box({ x: x, y: y, width: width, height: height });
|
||
|
};
|
||
|
Box.prototype.toSquare = function () {
|
||
|
var _a = this, x = _a.x, y = _a.y, width = _a.width, height = _a.height;
|
||
|
var diff = Math.abs(width - height);
|
||
|
if (width < height) {
|
||
|
x -= (diff / 2);
|
||
|
width += diff;
|
||
|
}
|
||
|
if (height < width) {
|
||
|
y -= (diff / 2);
|
||
|
height += diff;
|
||
|
}
|
||
|
return new Box({ x: x, y: y, width: width, height: height });
|
||
|
};
|
||
|
Box.prototype.rescale = function (s) {
|
||
|
var scaleX = isDimensions(s) ? s.width : s;
|
||
|
var scaleY = isDimensions(s) ? s.height : s;
|
||
|
return new Box({
|
||
|
x: this.x * scaleX,
|
||
|
y: this.y * scaleY,
|
||
|
width: this.width * scaleX,
|
||
|
height: this.height * scaleY
|
||
|
});
|
||
|
};
|
||
|
Box.prototype.pad = function (padX, padY) {
|
||
|
var _a = [
|
||
|
this.x - (padX / 2),
|
||
|
this.y - (padY / 2),
|
||
|
this.width + padX,
|
||
|
this.height + padY
|
||
|
], x = _a[0], y = _a[1], width = _a[2], height = _a[3];
|
||
|
return new Box({ x: x, y: y, width: width, height: height });
|
||
|
};
|
||
|
Box.prototype.clipAtImageBorders = function (imgWidth, imgHeight) {
|
||
|
var _a = this, x = _a.x, y = _a.y, right = _a.right, bottom = _a.bottom;
|
||
|
var clippedX = Math.max(x, 0);
|
||
|
var clippedY = Math.max(y, 0);
|
||
|
var newWidth = right - clippedX;
|
||
|
var newHeight = bottom - clippedY;
|
||
|
var clippedWidth = Math.min(newWidth, imgWidth - clippedX);
|
||
|
var clippedHeight = Math.min(newHeight, imgHeight - clippedY);
|
||
|
return (new Box({ x: clippedX, y: clippedY, width: clippedWidth, height: clippedHeight })).floor();
|
||
|
};
|
||
|
Box.prototype.padAtBorders = function (imageHeight, imageWidth) {
|
||
|
var w = this.width + 1;
|
||
|
var h = this.height + 1;
|
||
|
var dx = 1;
|
||
|
var dy = 1;
|
||
|
var edx = w;
|
||
|
var edy = h;
|
||
|
var x = this.left;
|
||
|
var y = this.top;
|
||
|
var ex = this.right;
|
||
|
var ey = this.bottom;
|
||
|
if (ex > imageWidth) {
|
||
|
edx = -ex + imageWidth + w;
|
||
|
ex = imageWidth;
|
||
|
}
|
||
|
if (ey > imageHeight) {
|
||
|
edy = -ey + imageHeight + h;
|
||
|
ey = imageHeight;
|
||
|
}
|
||
|
if (x < 1) {
|
||
|
edy = 2 - x;
|
||
|
x = 1;
|
||
|
}
|
||
|
if (y < 1) {
|
||
|
edy = 2 - y;
|
||
|
y = 1;
|
||
|
}
|
||
|
return { dy: dy, edy: edy, dx: dx, edx: edx, y: y, ey: ey, x: x, ex: ex, w: w, h: h };
|
||
|
};
|
||
|
Box.prototype.calibrate = function (region) {
|
||
|
return new Box({
|
||
|
left: this.left + (region.left * this.width),
|
||
|
top: this.top + (region.top * this.height),
|
||
|
right: this.right + (region.right * this.width),
|
||
|
bottom: this.bottom + (region.bottom * this.height)
|
||
|
}).toSquare().round();
|
||
|
};
|
||
|
return Box;
|
||
|
}());
|
||
|
|
||
|
var BoundingBox = /** @class */ (function (_super) {
|
||
|
__extends$1(BoundingBox, _super);
|
||
|
function BoundingBox(left, top, right, bottom) {
|
||
|
return _super.call(this, { left: left, top: top, right: right, bottom: bottom }) || this;
|
||
|
}
|
||
|
return BoundingBox;
|
||
|
}(Box));
|
||
|
|
||
|
var BoxWithText = /** @class */ (function (_super) {
|
||
|
__extends$1(BoxWithText, _super);
|
||
|
function BoxWithText(box, text) {
|
||
|
var _this = _super.call(this, box) || this;
|
||
|
_this._text = text;
|
||
|
return _this;
|
||
|
}
|
||
|
Object.defineProperty(BoxWithText.prototype, "text", {
|
||
|
get: function () { return this._text; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
return BoxWithText;
|
||
|
}(Box));
|
||
|
|
||
|
var LabeledBox = /** @class */ (function (_super) {
|
||
|
__extends$1(LabeledBox, _super);
|
||
|
function LabeledBox(box, label) {
|
||
|
var _this = _super.call(this, box) || this;
|
||
|
_this._label = label;
|
||
|
return _this;
|
||
|
}
|
||
|
LabeledBox.assertIsValidLabeledBox = function (box, callee) {
|
||
|
Box.assertIsValidBox(box, callee);
|
||
|
if (!isValidNumber(box.label)) {
|
||
|
throw new Error(callee + " - expected property label (" + box.label + ") to be a number");
|
||
|
}
|
||
|
};
|
||
|
Object.defineProperty(LabeledBox.prototype, "label", {
|
||
|
get: function () { return this._label; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
return LabeledBox;
|
||
|
}(Box));
|
||
|
|
||
|
var ObjectDetection = /** @class */ (function () {
|
||
|
function ObjectDetection(score, classScore, className, relativeBox, imageDims) {
|
||
|
this._imageDims = new Dimensions(imageDims.width, imageDims.height);
|
||
|
this._score = score;
|
||
|
this._classScore = classScore;
|
||
|
this._className = className;
|
||
|
this._box = new Box(relativeBox).rescale(this._imageDims);
|
||
|
}
|
||
|
Object.defineProperty(ObjectDetection.prototype, "score", {
|
||
|
get: function () { return this._score; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(ObjectDetection.prototype, "classScore", {
|
||
|
get: function () { return this._classScore; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(ObjectDetection.prototype, "className", {
|
||
|
get: function () { return this._className; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(ObjectDetection.prototype, "box", {
|
||
|
get: function () { return this._box; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(ObjectDetection.prototype, "imageDims", {
|
||
|
get: function () { return this._imageDims; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(ObjectDetection.prototype, "imageWidth", {
|
||
|
get: function () { return this.imageDims.width; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(ObjectDetection.prototype, "imageHeight", {
|
||
|
get: function () { return this.imageDims.height; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(ObjectDetection.prototype, "relativeBox", {
|
||
|
get: function () { return new Box(this._box).rescale(this.imageDims.reverse()); },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
ObjectDetection.prototype.forSize = function (width, height) {
|
||
|
return new ObjectDetection(this.score, this.classScore, this.className, this.relativeBox, { width: width, height: height });
|
||
|
};
|
||
|
return ObjectDetection;
|
||
|
}());
|
||
|
|
||
|
var PredictedBox = /** @class */ (function (_super) {
|
||
|
__extends$1(PredictedBox, _super);
|
||
|
function PredictedBox(box, label, score, classScore) {
|
||
|
var _this = _super.call(this, box, label) || this;
|
||
|
_this._score = score;
|
||
|
_this._classScore = classScore;
|
||
|
return _this;
|
||
|
}
|
||
|
PredictedBox.assertIsValidPredictedBox = function (box, callee) {
|
||
|
LabeledBox.assertIsValidLabeledBox(box, callee);
|
||
|
if (!isValidProbablitiy(box.score)
|
||
|
|| !isValidProbablitiy(box.classScore)) {
|
||
|
throw new Error(callee + " - expected properties score (" + box.score + ") and (" + box.classScore + ") to be a number between [0, 1]");
|
||
|
}
|
||
|
};
|
||
|
Object.defineProperty(PredictedBox.prototype, "score", {
|
||
|
get: function () { return this._score; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(PredictedBox.prototype, "classScore", {
|
||
|
get: function () { return this._classScore; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
return PredictedBox;
|
||
|
}(LabeledBox));
|
||
|
|
||
|
var Rect = /** @class */ (function (_super) {
|
||
|
__extends$1(Rect, _super);
|
||
|
function Rect(x, y, width, height) {
|
||
|
return _super.call(this, { x: x, y: y, width: width, height: height }) || this;
|
||
|
}
|
||
|
return Rect;
|
||
|
}(Box));
|
||
|
|
||
|
function disposeUnusedWeightTensors(weightMap, paramMappings) {
|
||
|
Object.keys(weightMap).forEach(function (path) {
|
||
|
if (!paramMappings.some(function (pm) { return pm.originalPath === path; })) {
|
||
|
weightMap[path].dispose();
|
||
|
}
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function extractWeightEntryFactory(weightMap, paramMappings) {
|
||
|
return function (originalPath, paramRank, mappedPath) {
|
||
|
var tensor = weightMap[originalPath];
|
||
|
if (!isTensor(tensor, paramRank)) {
|
||
|
throw new Error("expected weightMap[" + originalPath + "] to be a Tensor" + paramRank + "D, instead have " + tensor);
|
||
|
}
|
||
|
paramMappings.push({ originalPath: originalPath, paramPath: mappedPath || originalPath });
|
||
|
return tensor;
|
||
|
};
|
||
|
}
|
||
|
|
||
|
function extractWeightsFactory(weights) {
|
||
|
var remainingWeights = weights;
|
||
|
function extractWeights(numWeights) {
|
||
|
var ret = remainingWeights.slice(0, numWeights);
|
||
|
remainingWeights = remainingWeights.slice(numWeights);
|
||
|
return ret;
|
||
|
}
|
||
|
function getRemainingWeights() {
|
||
|
return remainingWeights;
|
||
|
}
|
||
|
return {
|
||
|
extractWeights: extractWeights,
|
||
|
getRemainingWeights: getRemainingWeights
|
||
|
};
|
||
|
}
|
||
|
|
||
|
function getModelUris(uri, defaultModelName) {
|
||
|
var defaultManifestFilename = defaultModelName + "-weights_manifest.json";
|
||
|
if (!uri) {
|
||
|
return {
|
||
|
modelBaseUri: '',
|
||
|
manifestUri: defaultManifestFilename
|
||
|
};
|
||
|
}
|
||
|
if (uri === '/') {
|
||
|
return {
|
||
|
modelBaseUri: '/',
|
||
|
manifestUri: "/" + defaultManifestFilename
|
||
|
};
|
||
|
}
|
||
|
var protocol = uri.startsWith('http://') ? 'http://' : uri.startsWith('https://') ? 'https://' : '';
|
||
|
uri = uri.replace(protocol, '');
|
||
|
var parts = uri.split('/').filter(function (s) { return s; });
|
||
|
var manifestFile = uri.endsWith('.json')
|
||
|
? parts[parts.length - 1]
|
||
|
: defaultManifestFilename;
|
||
|
var modelBaseUri = protocol + (uri.endsWith('.json') ? parts.slice(0, parts.length - 1) : parts).join('/');
|
||
|
modelBaseUri = uri.startsWith('/') ? "/" + modelBaseUri : modelBaseUri;
|
||
|
return {
|
||
|
modelBaseUri: modelBaseUri,
|
||
|
manifestUri: modelBaseUri === '/' ? "/" + manifestFile : modelBaseUri + "/" + manifestFile
|
||
|
};
|
||
|
}
|
||
|
|
||
|
function isMediaLoaded(media) {
|
||
|
return (media instanceof HTMLImageElement && media.complete)
|
||
|
|| (media instanceof HTMLVideoElement && media.readyState >= 3);
|
||
|
}
|
||
|
|
||
|
function awaitMediaLoaded(media) {
|
||
|
return new Promise(function (resolve, reject) {
|
||
|
if (media instanceof HTMLCanvasElement || isMediaLoaded(media)) {
|
||
|
return resolve();
|
||
|
}
|
||
|
function onLoad(e) {
|
||
|
if (!e.currentTarget)
|
||
|
return;
|
||
|
e.currentTarget.removeEventListener('load', onLoad);
|
||
|
e.currentTarget.removeEventListener('error', onError);
|
||
|
resolve(e);
|
||
|
}
|
||
|
function onError(e) {
|
||
|
if (!e.currentTarget)
|
||
|
return;
|
||
|
e.currentTarget.removeEventListener('load', onLoad);
|
||
|
e.currentTarget.removeEventListener('error', onError);
|
||
|
reject(e);
|
||
|
}
|
||
|
media.addEventListener('load', onLoad);
|
||
|
media.addEventListener('error', onError);
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function bufferToImage(buf) {
|
||
|
return new Promise(function (resolve, reject) {
|
||
|
if (!(buf instanceof Blob)) {
|
||
|
return reject('bufferToImage - expected buf to be of type: Blob');
|
||
|
}
|
||
|
var reader = new FileReader();
|
||
|
reader.onload = function () {
|
||
|
if (typeof reader.result !== 'string') {
|
||
|
return reject('bufferToImage - expected reader.result to be a string, in onload');
|
||
|
}
|
||
|
var img = new Image();
|
||
|
img.onload = function () { return resolve(img); };
|
||
|
img.onerror = reject;
|
||
|
img.src = reader.result;
|
||
|
};
|
||
|
reader.onerror = reject;
|
||
|
reader.readAsDataURL(buf);
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function getContext2dOrThrow(canvas) {
|
||
|
var ctx = canvas.getContext('2d');
|
||
|
if (!ctx) {
|
||
|
throw new Error('canvas 2d context is null');
|
||
|
}
|
||
|
return ctx;
|
||
|
}
|
||
|
|
||
|
function getMediaDimensions(input) {
|
||
|
if (input instanceof HTMLImageElement) {
|
||
|
return new Dimensions(input.naturalWidth, input.naturalHeight);
|
||
|
}
|
||
|
if (input instanceof HTMLVideoElement) {
|
||
|
return new Dimensions(input.videoWidth, input.videoHeight);
|
||
|
}
|
||
|
return new Dimensions(input.width, input.height);
|
||
|
}
|
||
|
|
||
|
function createCanvas(_a) {
|
||
|
var width = _a.width, height = _a.height;
|
||
|
var canvas = document.createElement('canvas');
|
||
|
canvas.width = width;
|
||
|
canvas.height = height;
|
||
|
return canvas;
|
||
|
}
|
||
|
function createCanvasFromMedia(media, dims) {
|
||
|
if (!isMediaLoaded(media)) {
|
||
|
throw new Error('createCanvasFromMedia - media has not finished loading yet');
|
||
|
}
|
||
|
var _a = dims || getMediaDimensions(media), width = _a.width, height = _a.height;
|
||
|
var canvas = createCanvas({ width: width, height: height });
|
||
|
getContext2dOrThrow(canvas).drawImage(media, 0, 0, width, height);
|
||
|
return canvas;
|
||
|
}
|
||
|
|
||
|
function getDefaultDrawOptions(options) {
|
||
|
if (options === void 0) { options = {}; }
|
||
|
return Object.assign({}, {
|
||
|
boxColor: 'blue',
|
||
|
textColor: 'red',
|
||
|
lineWidth: 2,
|
||
|
fontSize: 20,
|
||
|
fontStyle: 'Georgia',
|
||
|
withScore: true,
|
||
|
withClassName: true
|
||
|
}, options);
|
||
|
}
|
||
|
|
||
|
function drawBox(ctx, x, y, w, h, options) {
|
||
|
var drawOptions = Object.assign(getDefaultDrawOptions(), (options || {}));
|
||
|
ctx.strokeStyle = drawOptions.boxColor;
|
||
|
ctx.lineWidth = drawOptions.lineWidth;
|
||
|
ctx.strokeRect(x, y, w, h);
|
||
|
}
|
||
|
|
||
|
function drawText(ctx, x, y, text, options) {
|
||
|
if (options === void 0) { options = {}; }
|
||
|
var drawOptions = Object.assign(getDefaultDrawOptions(), options);
|
||
|
var padText = 2 + drawOptions.lineWidth;
|
||
|
ctx.fillStyle = drawOptions.textColor;
|
||
|
ctx.font = drawOptions.fontSize + "px " + drawOptions.fontStyle;
|
||
|
ctx.fillText(text, x + padText, y + padText + (drawOptions.fontSize * 0.6));
|
||
|
}
|
||
|
|
||
|
function resolveInput(arg) {
|
||
|
if (typeof arg === 'string') {
|
||
|
return document.getElementById(arg);
|
||
|
}
|
||
|
return arg;
|
||
|
}
|
||
|
|
||
|
function drawDetection(canvasArg, detection, options) {
|
||
|
var canvas = resolveInput(canvasArg);
|
||
|
if (!(canvas instanceof HTMLCanvasElement)) {
|
||
|
throw new Error('drawDetection - expected canvas to be of type: HTMLCanvasElement');
|
||
|
}
|
||
|
var detectionArray = Array.isArray(detection)
|
||
|
? detection
|
||
|
: [detection];
|
||
|
detectionArray.forEach(function (det) {
|
||
|
var _a = det instanceof ObjectDetection ? det.box : det, x = _a.x, y = _a.y, width = _a.width, height = _a.height;
|
||
|
var drawOptions = getDefaultDrawOptions(options);
|
||
|
var ctx = getContext2dOrThrow(canvas);
|
||
|
drawBox(ctx, x, y, width, height, drawOptions);
|
||
|
var withScore = drawOptions.withScore;
|
||
|
var text = det instanceof BoxWithText
|
||
|
? det.text
|
||
|
: ((withScore && det instanceof PredictedBox)
|
||
|
? "" + round$1(det.score)
|
||
|
: (det instanceof ObjectDetection
|
||
|
? "" + det.className + (withScore ? " (" + round$1(det.score) + ")" : '')
|
||
|
: ''));
|
||
|
if (text) {
|
||
|
drawText(ctx, x, y + height, text, drawOptions);
|
||
|
}
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function fetchOrThrow(url, init) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var res;
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, fetch(url, init)];
|
||
|
case 1:
|
||
|
res = _a.sent();
|
||
|
if (!(res.status < 400)) {
|
||
|
throw new Error("failed to fetch: (" + res.status + ") " + res.statusText + ", from url: " + res.url);
|
||
|
}
|
||
|
return [2 /*return*/, res];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function fetchImage(uri) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var res, blob;
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, fetchOrThrow(uri)];
|
||
|
case 1:
|
||
|
res = _a.sent();
|
||
|
return [4 /*yield*/, (res).blob()];
|
||
|
case 2:
|
||
|
blob = _a.sent();
|
||
|
if (!blob.type.startsWith('image/')) {
|
||
|
throw new Error("fetchImage - expected blob type to be of type image/*, instead have: " + blob.type + ", for url: " + res.url);
|
||
|
}
|
||
|
return [2 /*return*/, bufferToImage(blob)];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function fetchJson(uri) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, fetchOrThrow(uri)];
|
||
|
case 1: return [2 /*return*/, (_a.sent()).json()];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function fetchNetWeights(uri) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _a;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
_a = Float32Array.bind;
|
||
|
return [4 /*yield*/, fetchOrThrow(uri)];
|
||
|
case 1: return [4 /*yield*/, (_b.sent()).arrayBuffer()];
|
||
|
case 2: return [2 /*return*/, new (_a.apply(Float32Array, [void 0, _b.sent()]))()];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function imageTensorToCanvas(imgTensor, canvas) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var targetCanvas, _a, height, width, numChannels, imgTensor3D;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
targetCanvas = canvas || document.createElement('canvas');
|
||
|
_a = imgTensor.shape.slice(isTensor4D(imgTensor) ? 1 : 0), height = _a[0], width = _a[1], numChannels = _a[2];
|
||
|
imgTensor3D = tidy(function () { return imgTensor.as3D(height, width, numChannels).toInt(); });
|
||
|
return [4 /*yield*/, toPixels(imgTensor3D, targetCanvas)];
|
||
|
case 1:
|
||
|
_b.sent();
|
||
|
imgTensor3D.dispose();
|
||
|
return [2 /*return*/, targetCanvas];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function imageToSquare(input, inputSize, centerImage) {
|
||
|
if (centerImage === void 0) { centerImage = false; }
|
||
|
if (!(input instanceof HTMLImageElement || input instanceof HTMLCanvasElement)) {
|
||
|
throw new Error('imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement');
|
||
|
}
|
||
|
var dims = getMediaDimensions(input);
|
||
|
var scale = inputSize / Math.max(dims.height, dims.width);
|
||
|
var width = scale * dims.width;
|
||
|
var height = scale * dims.height;
|
||
|
var targetCanvas = createCanvas({ width: inputSize, height: inputSize });
|
||
|
var inputCanvas = input instanceof HTMLCanvasElement ? input : createCanvasFromMedia(input);
|
||
|
var offset = Math.abs(width - height) / 2;
|
||
|
var dx = centerImage && width < height ? offset : 0;
|
||
|
var dy = centerImage && height < width ? offset : 0;
|
||
|
getContext2dOrThrow(targetCanvas).drawImage(inputCanvas, dx, dy, width, height);
|
||
|
return targetCanvas;
|
||
|
}
|
||
|
|
||
|
function isMediaElement(input) {
|
||
|
return input instanceof HTMLImageElement
|
||
|
|| input instanceof HTMLVideoElement
|
||
|
|| input instanceof HTMLCanvasElement;
|
||
|
}
|
||
|
|
||
|
function loadWeightMap(uri, defaultModelName) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _a, manifestUri, modelBaseUri, manifest;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
_a = getModelUris(uri, defaultModelName), manifestUri = _a.manifestUri, modelBaseUri = _a.modelBaseUri;
|
||
|
return [4 /*yield*/, fetchJson(manifestUri)];
|
||
|
case 1:
|
||
|
manifest = _b.sent();
|
||
|
return [2 /*return*/, io.loadWeights(manifest, modelBaseUri)];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* Pads the smaller dimension of an image tensor with zeros, such that width === height.
|
||
|
*
|
||
|
* @param imgTensor The image tensor.
|
||
|
* @param isCenterImage (optional, default: false) If true, add an equal amount of padding on
|
||
|
* both sides of the minor dimension oof the image.
|
||
|
* @returns The padded tensor with width === height.
|
||
|
*/
|
||
|
function padToSquare(imgTensor, isCenterImage) {
|
||
|
if (isCenterImage === void 0) { isCenterImage = false; }
|
||
|
return tidy(function () {
|
||
|
var _a = imgTensor.shape.slice(1), height = _a[0], width = _a[1];
|
||
|
if (height === width) {
|
||
|
return imgTensor;
|
||
|
}
|
||
|
var dimDiff = Math.abs(height - width);
|
||
|
var paddingAmount = Math.round(dimDiff * (isCenterImage ? 0.5 : 1));
|
||
|
var paddingAxis = height > width ? 2 : 1;
|
||
|
var createPaddingTensor = function (paddingAmount) {
|
||
|
var paddingTensorShape = imgTensor.shape.slice();
|
||
|
paddingTensorShape[paddingAxis] = paddingAmount;
|
||
|
return fill(paddingTensorShape, 0);
|
||
|
};
|
||
|
var paddingTensorAppend = createPaddingTensor(paddingAmount);
|
||
|
var remainingPaddingAmount = dimDiff - paddingTensorAppend.shape[paddingAxis];
|
||
|
var paddingTensorPrepend = isCenterImage && remainingPaddingAmount
|
||
|
? createPaddingTensor(remainingPaddingAmount)
|
||
|
: null;
|
||
|
var tensorsToStack = [
|
||
|
paddingTensorPrepend,
|
||
|
imgTensor,
|
||
|
paddingTensorAppend
|
||
|
]
|
||
|
.filter(function (t) { return t !== null; });
|
||
|
return concat(tensorsToStack, paddingAxis);
|
||
|
});
|
||
|
}
|
||
|
|
||
|
var NetInput = /** @class */ (function () {
|
||
|
function NetInput(inputs, treatAsBatchInput) {
|
||
|
if (treatAsBatchInput === void 0) { treatAsBatchInput = false; }
|
||
|
var _this = this;
|
||
|
this._imageTensors = [];
|
||
|
this._canvases = [];
|
||
|
this._treatAsBatchInput = false;
|
||
|
this._inputDimensions = [];
|
||
|
if (!Array.isArray(inputs)) {
|
||
|
throw new Error("NetInput.constructor - expected inputs to be an Array of TResolvedNetInput or to be instanceof tf.Tensor4D, instead have " + inputs);
|
||
|
}
|
||
|
this._treatAsBatchInput = treatAsBatchInput;
|
||
|
this._batchSize = inputs.length;
|
||
|
inputs.forEach(function (input, idx) {
|
||
|
if (isTensor3D(input)) {
|
||
|
_this._imageTensors[idx] = input;
|
||
|
_this._inputDimensions[idx] = input.shape;
|
||
|
return;
|
||
|
}
|
||
|
if (isTensor4D(input)) {
|
||
|
var batchSize = input.shape[0];
|
||
|
if (batchSize !== 1) {
|
||
|
throw new Error("NetInput - tf.Tensor4D with batchSize " + batchSize + " passed, but not supported in input array");
|
||
|
}
|
||
|
_this._imageTensors[idx] = input;
|
||
|
_this._inputDimensions[idx] = input.shape.slice(1);
|
||
|
return;
|
||
|
}
|
||
|
var canvas = input instanceof HTMLCanvasElement ? input : createCanvasFromMedia(input);
|
||
|
_this._canvases[idx] = canvas;
|
||
|
_this._inputDimensions[idx] = [canvas.height, canvas.width, 3];
|
||
|
});
|
||
|
}
|
||
|
Object.defineProperty(NetInput.prototype, "imageTensors", {
|
||
|
get: function () {
|
||
|
return this._imageTensors;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(NetInput.prototype, "canvases", {
|
||
|
get: function () {
|
||
|
return this._canvases;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(NetInput.prototype, "isBatchInput", {
|
||
|
get: function () {
|
||
|
return this.batchSize > 1 || this._treatAsBatchInput;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(NetInput.prototype, "batchSize", {
|
||
|
get: function () {
|
||
|
return this._batchSize;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(NetInput.prototype, "inputDimensions", {
|
||
|
get: function () {
|
||
|
return this._inputDimensions;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(NetInput.prototype, "inputSize", {
|
||
|
get: function () {
|
||
|
return this._inputSize;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(NetInput.prototype, "reshapedInputDimensions", {
|
||
|
get: function () {
|
||
|
var _this = this;
|
||
|
return range$1(this.batchSize, 0, 1).map(function (_, batchIdx) { return _this.getReshapedInputDimensions(batchIdx); });
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
NetInput.prototype.getInput = function (batchIdx) {
|
||
|
return this.canvases[batchIdx] || this.imageTensors[batchIdx];
|
||
|
};
|
||
|
NetInput.prototype.getInputDimensions = function (batchIdx) {
|
||
|
return this._inputDimensions[batchIdx];
|
||
|
};
|
||
|
NetInput.prototype.getInputHeight = function (batchIdx) {
|
||
|
return this._inputDimensions[batchIdx][0];
|
||
|
};
|
||
|
NetInput.prototype.getInputWidth = function (batchIdx) {
|
||
|
return this._inputDimensions[batchIdx][1];
|
||
|
};
|
||
|
NetInput.prototype.getReshapedInputDimensions = function (batchIdx) {
|
||
|
if (typeof this.inputSize !== 'number') {
|
||
|
throw new Error('getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet');
|
||
|
}
|
||
|
var width = this.getInputWidth(batchIdx);
|
||
|
var height = this.getInputHeight(batchIdx);
|
||
|
return computeReshapedDimensions({ width: width, height: height }, this.inputSize);
|
||
|
};
|
||
|
/**
|
||
|
* Create a batch tensor from all input canvases and tensors
|
||
|
* with size [batchSize, inputSize, inputSize, 3].
|
||
|
*
|
||
|
* @param inputSize Height and width of the tensor.
|
||
|
* @param isCenterImage (optional, default: false) If true, add an equal amount of padding on
|
||
|
* both sides of the minor dimension oof the image.
|
||
|
* @returns The batch tensor.
|
||
|
*/
|
||
|
NetInput.prototype.toBatchTensor = function (inputSize, isCenterInputs) {
|
||
|
var _this = this;
|
||
|
if (isCenterInputs === void 0) { isCenterInputs = true; }
|
||
|
this._inputSize = inputSize;
|
||
|
return tidy(function () {
|
||
|
var inputTensors = range$1(_this.batchSize, 0, 1).map(function (batchIdx) {
|
||
|
var input = _this.getInput(batchIdx);
|
||
|
if (input instanceof Tensor) {
|
||
|
var imgTensor = isTensor4D(input) ? input : input.expandDims();
|
||
|
imgTensor = padToSquare(imgTensor, isCenterInputs);
|
||
|
if (imgTensor.shape[1] !== inputSize || imgTensor.shape[2] !== inputSize) {
|
||
|
imgTensor = image_ops.resizeBilinear(imgTensor, [inputSize, inputSize]);
|
||
|
}
|
||
|
return imgTensor.as3D(inputSize, inputSize, 3);
|
||
|
}
|
||
|
if (input instanceof HTMLCanvasElement) {
|
||
|
return fromPixels(imageToSquare(input, inputSize, isCenterInputs));
|
||
|
}
|
||
|
throw new Error("toBatchTensor - at batchIdx " + batchIdx + ", expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have " + input);
|
||
|
});
|
||
|
var batchTensor = stack(inputTensors.map(function (t) { return t.toFloat(); })).as4D(_this.batchSize, inputSize, inputSize, 3);
|
||
|
return batchTensor;
|
||
|
});
|
||
|
};
|
||
|
return NetInput;
|
||
|
}());
|
||
|
|
||
|
/**
|
||
|
* Validates the input to make sure, they are valid net inputs and awaits all media elements
|
||
|
* to be finished loading.
|
||
|
*
|
||
|
* @param input The input, which can be a media element or an array of different media elements.
|
||
|
* @returns A NetInput instance, which can be passed into one of the neural networks.
|
||
|
*/
|
||
|
function toNetInput(inputs) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var inputArgArray, getIdxHint, inputArray;
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0:
|
||
|
if (inputs instanceof NetInput) {
|
||
|
return [2 /*return*/, inputs];
|
||
|
}
|
||
|
inputArgArray = Array.isArray(inputs)
|
||
|
? inputs
|
||
|
: [inputs];
|
||
|
if (!inputArgArray.length) {
|
||
|
throw new Error('toNetInput - empty array passed as input');
|
||
|
}
|
||
|
getIdxHint = function (idx) { return Array.isArray(inputs) ? " at input index " + idx + ":" : ''; };
|
||
|
inputArray = inputArgArray.map(resolveInput);
|
||
|
inputArray.forEach(function (input, i) {
|
||
|
if (!isMediaElement(input) && !isTensor3D(input) && !isTensor4D(input)) {
|
||
|
if (typeof inputArgArray[i] === 'string') {
|
||
|
throw new Error("toNetInput -" + getIdxHint(i) + " string passed, but could not resolve HTMLElement for element id " + inputArgArray[i]);
|
||
|
}
|
||
|
throw new Error("toNetInput -" + getIdxHint(i) + " expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id");
|
||
|
}
|
||
|
if (isTensor4D(input)) {
|
||
|
// if tf.Tensor4D is passed in the input array, the batch size has to be 1
|
||
|
var batchSize = input.shape[0];
|
||
|
if (batchSize !== 1) {
|
||
|
throw new Error("toNetInput -" + getIdxHint(i) + " tf.Tensor4D with batchSize " + batchSize + " passed, but not supported in input array");
|
||
|
}
|
||
|
}
|
||
|
});
|
||
|
// wait for all media elements being loaded
|
||
|
return [4 /*yield*/, Promise.all(inputArray.map(function (input) { return isMediaElement(input) && awaitMediaLoaded(input); }))];
|
||
|
case 1:
|
||
|
// wait for all media elements being loaded
|
||
|
_a.sent();
|
||
|
return [2 /*return*/, new NetInput(inputArray, Array.isArray(inputs))];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function iou(box1, box2, isIOU) {
|
||
|
if (isIOU === void 0) { isIOU = true; }
|
||
|
var width = Math.max(0.0, Math.min(box1.right, box2.right) - Math.max(box1.left, box2.left));
|
||
|
var height = Math.max(0.0, Math.min(box1.bottom, box2.bottom) - Math.max(box1.top, box2.top));
|
||
|
var interSection = width * height;
|
||
|
return isIOU
|
||
|
? interSection / (box1.area + box2.area - interSection)
|
||
|
: interSection / Math.min(box1.area, box2.area);
|
||
|
}
|
||
|
|
||
|
function nonMaxSuppression$1(boxes, scores, iouThreshold, isIOU) {
|
||
|
if (isIOU === void 0) { isIOU = true; }
|
||
|
var indicesSortedByScore = scores
|
||
|
.map(function (score, boxIndex) { return ({ score: score, boxIndex: boxIndex }); })
|
||
|
.sort(function (c1, c2) { return c1.score - c2.score; })
|
||
|
.map(function (c) { return c.boxIndex; });
|
||
|
var pick = [];
|
||
|
var _loop_1 = function () {
|
||
|
var curr = indicesSortedByScore.pop();
|
||
|
pick.push(curr);
|
||
|
var indices = indicesSortedByScore;
|
||
|
var outputs = [];
|
||
|
for (var i = 0; i < indices.length; i++) {
|
||
|
var idx = indices[i];
|
||
|
var currBox = boxes[curr];
|
||
|
var idxBox = boxes[idx];
|
||
|
outputs.push(iou(currBox, idxBox, isIOU));
|
||
|
}
|
||
|
indicesSortedByScore = indicesSortedByScore.filter(function (_, j) { return outputs[j] <= iouThreshold; });
|
||
|
};
|
||
|
while (indicesSortedByScore.length > 0) {
|
||
|
_loop_1();
|
||
|
}
|
||
|
return pick;
|
||
|
}
|
||
|
|
||
|
function normalize(x, meanRgb) {
|
||
|
return tidy(function () {
|
||
|
var r = meanRgb[0], g = meanRgb[1], b = meanRgb[2];
|
||
|
var avg_r = fill(x.shape.slice(0, 3).concat([1]), r);
|
||
|
var avg_g = fill(x.shape.slice(0, 3).concat([1]), g);
|
||
|
var avg_b = fill(x.shape.slice(0, 3).concat([1]), b);
|
||
|
var avg_rgb = concat([avg_r, avg_g, avg_b], 3);
|
||
|
return sub(x, avg_rgb);
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function shuffleArray(inputArray) {
|
||
|
var array = inputArray.slice();
|
||
|
for (var i = array.length - 1; i > 0; i--) {
|
||
|
var j = Math.floor(Math.random() * (i + 1));
|
||
|
var x = array[i];
|
||
|
array[i] = array[j];
|
||
|
array[j] = x;
|
||
|
}
|
||
|
return array;
|
||
|
}
|
||
|
|
||
|
function sigmoid$1(x) {
|
||
|
return 1 / (1 + Math.exp(-x));
|
||
|
}
|
||
|
function inverseSigmoid(x) {
|
||
|
return Math.log(x / (1 - x));
|
||
|
}
|
||
|
|
||
|
var NeuralNetwork = /** @class */ (function () {
|
||
|
function NeuralNetwork(_name) {
|
||
|
this._name = _name;
|
||
|
this._params = undefined;
|
||
|
this._paramMappings = [];
|
||
|
}
|
||
|
Object.defineProperty(NeuralNetwork.prototype, "params", {
|
||
|
get: function () { return this._params; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(NeuralNetwork.prototype, "paramMappings", {
|
||
|
get: function () { return this._paramMappings; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(NeuralNetwork.prototype, "isLoaded", {
|
||
|
get: function () { return !!this.params; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
NeuralNetwork.prototype.getParamFromPath = function (paramPath) {
|
||
|
var _a = this.traversePropertyPath(paramPath), obj = _a.obj, objProp = _a.objProp;
|
||
|
return obj[objProp];
|
||
|
};
|
||
|
NeuralNetwork.prototype.reassignParamFromPath = function (paramPath, tensor$$1) {
|
||
|
var _a = this.traversePropertyPath(paramPath), obj = _a.obj, objProp = _a.objProp;
|
||
|
obj[objProp].dispose();
|
||
|
obj[objProp] = tensor$$1;
|
||
|
};
|
||
|
NeuralNetwork.prototype.getParamList = function () {
|
||
|
var _this = this;
|
||
|
return this._paramMappings.map(function (_a) {
|
||
|
var paramPath = _a.paramPath;
|
||
|
return ({
|
||
|
path: paramPath,
|
||
|
tensor: _this.getParamFromPath(paramPath)
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
NeuralNetwork.prototype.getTrainableParams = function () {
|
||
|
return this.getParamList().filter(function (param) { return param.tensor instanceof Variable; });
|
||
|
};
|
||
|
NeuralNetwork.prototype.getFrozenParams = function () {
|
||
|
return this.getParamList().filter(function (param) { return !(param.tensor instanceof Variable); });
|
||
|
};
|
||
|
NeuralNetwork.prototype.variable = function () {
|
||
|
var _this = this;
|
||
|
this.getFrozenParams().forEach(function (_a) {
|
||
|
var path = _a.path, tensor$$1 = _a.tensor;
|
||
|
_this.reassignParamFromPath(path, tensor$$1.variable());
|
||
|
});
|
||
|
};
|
||
|
NeuralNetwork.prototype.freeze = function () {
|
||
|
var _this = this;
|
||
|
this.getTrainableParams().forEach(function (_a) {
|
||
|
var path = _a.path, variable$$1 = _a.tensor;
|
||
|
var tensor$$1 = tensor(variable$$1.dataSync());
|
||
|
variable$$1.dispose();
|
||
|
_this.reassignParamFromPath(path, tensor$$1);
|
||
|
});
|
||
|
};
|
||
|
NeuralNetwork.prototype.dispose = function (throwOnRedispose) {
|
||
|
if (throwOnRedispose === void 0) { throwOnRedispose = true; }
|
||
|
this.getParamList().forEach(function (param) {
|
||
|
if (throwOnRedispose && param.tensor.isDisposed) {
|
||
|
throw new Error("param tensor has already been disposed for path " + param.path);
|
||
|
}
|
||
|
param.tensor.dispose();
|
||
|
});
|
||
|
this._params = undefined;
|
||
|
};
|
||
|
NeuralNetwork.prototype.serializeParams = function () {
|
||
|
return new Float32Array(this.getParamList()
|
||
|
.map(function (_a) {
|
||
|
var tensor$$1 = _a.tensor;
|
||
|
return Array.from(tensor$$1.dataSync());
|
||
|
})
|
||
|
.reduce(function (flat, arr) { return flat.concat(arr); }));
|
||
|
};
|
||
|
NeuralNetwork.prototype.load = function (weightsOrUrl) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _a, paramMappings, params;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
if (weightsOrUrl instanceof Float32Array) {
|
||
|
this.extractWeights(weightsOrUrl);
|
||
|
return [2 /*return*/];
|
||
|
}
|
||
|
if (weightsOrUrl && typeof weightsOrUrl !== 'string') {
|
||
|
throw new Error(this._name + ".load - expected model uri, or weights as Float32Array");
|
||
|
}
|
||
|
return [4 /*yield*/, this.loadQuantizedParams(weightsOrUrl)];
|
||
|
case 1:
|
||
|
_a = _b.sent(), paramMappings = _a.paramMappings, params = _a.params;
|
||
|
this._paramMappings = paramMappings;
|
||
|
this._params = params;
|
||
|
return [2 /*return*/];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
NeuralNetwork.prototype.extractWeights = function (weights) {
|
||
|
var _a = this.extractParams(weights), paramMappings = _a.paramMappings, params = _a.params;
|
||
|
this._paramMappings = paramMappings;
|
||
|
this._params = params;
|
||
|
};
|
||
|
NeuralNetwork.prototype.traversePropertyPath = function (paramPath) {
|
||
|
if (!this.params) {
|
||
|
throw new Error("traversePropertyPath - model has no loaded params");
|
||
|
}
|
||
|
var result = paramPath.split('/').reduce(function (res, objProp) {
|
||
|
if (!res.nextObj.hasOwnProperty(objProp)) {
|
||
|
throw new Error("traversePropertyPath - object does not have property " + objProp + ", for path " + paramPath);
|
||
|
}
|
||
|
return { obj: res.nextObj, objProp: objProp, nextObj: res.nextObj[objProp] };
|
||
|
}, { nextObj: this.params });
|
||
|
var obj = result.obj, objProp = result.objProp;
|
||
|
if (!obj || !objProp || !(obj[objProp] instanceof Tensor)) {
|
||
|
throw new Error("traversePropertyPath - parameter is not a tensor, for path " + paramPath);
|
||
|
}
|
||
|
return { obj: obj, objProp: objProp };
|
||
|
};
|
||
|
NeuralNetwork.prototype.loadQuantizedParams = function (_) {
|
||
|
throw new Error(this._name + ".loadQuantizedParams - not implemented");
|
||
|
};
|
||
|
NeuralNetwork.prototype.extractParams = function (_) {
|
||
|
throw new Error(this._name + ".extractParams - not implemented");
|
||
|
};
|
||
|
return NeuralNetwork;
|
||
|
}());
|
||
|
|
||
|
var FaceDetection = /** @class */ (function (_super) {
|
||
|
__extends$1(FaceDetection, _super);
|
||
|
function FaceDetection(score, relativeBox, imageDims) {
|
||
|
return _super.call(this, score, score, '', relativeBox, imageDims) || this;
|
||
|
}
|
||
|
return FaceDetection;
|
||
|
}(ObjectDetection));
|
||
|
|
||
|
var FaceDetectionWithLandmarks = /** @class */ (function () {
|
||
|
function FaceDetectionWithLandmarks(detection, unshiftedLandmarks) {
|
||
|
this._detection = detection;
|
||
|
this._unshiftedLandmarks = unshiftedLandmarks;
|
||
|
}
|
||
|
Object.defineProperty(FaceDetectionWithLandmarks.prototype, "detection", {
|
||
|
get: function () { return this._detection; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(FaceDetectionWithLandmarks.prototype, "unshiftedLandmarks", {
|
||
|
get: function () { return this._unshiftedLandmarks; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(FaceDetectionWithLandmarks.prototype, "alignedRect", {
|
||
|
get: function () {
|
||
|
var rect = this.landmarks.align();
|
||
|
var imageDims = this.detection.imageDims;
|
||
|
return new FaceDetection(this._detection.score, rect.rescale(imageDims.reverse()), imageDims);
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(FaceDetectionWithLandmarks.prototype, "landmarks", {
|
||
|
get: function () {
|
||
|
var _a = this.detection.box, x = _a.x, y = _a.y;
|
||
|
return this._unshiftedLandmarks.shiftBy(x, y);
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(FaceDetectionWithLandmarks.prototype, "faceDetection", {
|
||
|
// aliases for backward compatibily
|
||
|
get: function () { return this.detection; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(FaceDetectionWithLandmarks.prototype, "faceLandmarks", {
|
||
|
get: function () { return this.landmarks; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
FaceDetectionWithLandmarks.prototype.forSize = function (width, height) {
|
||
|
var resizedDetection = this._detection.forSize(width, height);
|
||
|
var resizedLandmarks = this._unshiftedLandmarks.forSize(resizedDetection.box.width, resizedDetection.box.height);
|
||
|
return new FaceDetectionWithLandmarks(resizedDetection, resizedLandmarks);
|
||
|
};
|
||
|
return FaceDetectionWithLandmarks;
|
||
|
}());
|
||
|
|
||
|
// face alignment constants
|
||
|
var relX = 0.5;
|
||
|
var relY = 0.43;
|
||
|
var relScale = 0.45;
|
||
|
var FaceLandmarks = /** @class */ (function () {
|
||
|
function FaceLandmarks(relativeFaceLandmarkPositions, imgDims, shift) {
|
||
|
if (shift === void 0) { shift = new Point(0, 0); }
|
||
|
var width = imgDims.width, height = imgDims.height;
|
||
|
this._imgDims = new Dimensions(width, height);
|
||
|
this._shift = shift;
|
||
|
this._positions = relativeFaceLandmarkPositions.map(function (pt) { return pt.mul(new Point(width, height)).add(shift); });
|
||
|
}
|
||
|
Object.defineProperty(FaceLandmarks.prototype, "shift", {
|
||
|
get: function () { return new Point(this._shift.x, this._shift.y); },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(FaceLandmarks.prototype, "imageWidth", {
|
||
|
get: function () { return this._imgDims.width; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(FaceLandmarks.prototype, "imageHeight", {
|
||
|
get: function () { return this._imgDims.height; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(FaceLandmarks.prototype, "positions", {
|
||
|
get: function () { return this._positions; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(FaceLandmarks.prototype, "relativePositions", {
|
||
|
get: function () {
|
||
|
var _this = this;
|
||
|
return this._positions.map(function (pt) { return pt.sub(_this._shift).div(new Point(_this.imageWidth, _this.imageHeight)); });
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
FaceLandmarks.prototype.forSize = function (width, height) {
|
||
|
return new this.constructor(this.relativePositions, { width: width, height: height });
|
||
|
};
|
||
|
FaceLandmarks.prototype.shiftBy = function (x, y) {
|
||
|
return new this.constructor(this.relativePositions, this._imgDims, new Point(x, y));
|
||
|
};
|
||
|
FaceLandmarks.prototype.shiftByPoint = function (pt) {
|
||
|
return this.shiftBy(pt.x, pt.y);
|
||
|
};
|
||
|
/**
|
||
|
* Aligns the face landmarks after face detection from the relative positions of the faces
|
||
|
* bounding box, or it's current shift. This function should be used to align the face images
|
||
|
* after face detection has been performed, before they are passed to the face recognition net.
|
||
|
* This will make the computed face descriptor more accurate.
|
||
|
*
|
||
|
* @param detection (optional) The bounding box of the face or the face detection result. If
|
||
|
* no argument was passed the position of the face landmarks are assumed to be relative to
|
||
|
* it's current shift.
|
||
|
* @returns The bounding box of the aligned face.
|
||
|
*/
|
||
|
FaceLandmarks.prototype.align = function (detection) {
|
||
|
if (detection) {
|
||
|
var box = detection instanceof FaceDetection
|
||
|
? detection.box.floor()
|
||
|
: detection;
|
||
|
return this.shiftBy(box.x, box.y).align();
|
||
|
}
|
||
|
var centers = this.getRefPointsForAlignment();
|
||
|
var leftEyeCenter = centers[0], rightEyeCenter = centers[1], mouthCenter = centers[2];
|
||
|
var distToMouth = function (pt) { return mouthCenter.sub(pt).magnitude(); };
|
||
|
var eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2;
|
||
|
var size = Math.floor(eyeToMouthDist / relScale);
|
||
|
var refPoint = getCenterPoint(centers);
|
||
|
// TODO: pad in case rectangle is out of image bounds
|
||
|
var x = Math.floor(Math.max(0, refPoint.x - (relX * size)));
|
||
|
var y = Math.floor(Math.max(0, refPoint.y - (relY * size)));
|
||
|
return new Rect(x, y, Math.min(size, this.imageWidth + x), Math.min(size, this.imageHeight + y));
|
||
|
};
|
||
|
FaceLandmarks.prototype.getRefPointsForAlignment = function () {
|
||
|
throw new Error('getRefPointsForAlignment not implemented by base class');
|
||
|
};
|
||
|
return FaceLandmarks;
|
||
|
}());
|
||
|
|
||
|
var FaceLandmarks5 = /** @class */ (function (_super) {
|
||
|
__extends$1(FaceLandmarks5, _super);
|
||
|
function FaceLandmarks5() {
|
||
|
return _super !== null && _super.apply(this, arguments) || this;
|
||
|
}
|
||
|
FaceLandmarks5.prototype.getRefPointsForAlignment = function () {
|
||
|
var pts = this.positions;
|
||
|
return [
|
||
|
pts[0],
|
||
|
pts[1],
|
||
|
getCenterPoint([pts[3], pts[4]])
|
||
|
];
|
||
|
};
|
||
|
return FaceLandmarks5;
|
||
|
}(FaceLandmarks));
|
||
|
|
||
|
var FaceLandmarks68 = /** @class */ (function (_super) {
|
||
|
__extends$1(FaceLandmarks68, _super);
|
||
|
function FaceLandmarks68() {
|
||
|
return _super !== null && _super.apply(this, arguments) || this;
|
||
|
}
|
||
|
FaceLandmarks68.prototype.getJawOutline = function () {
|
||
|
return this.positions.slice(0, 17);
|
||
|
};
|
||
|
FaceLandmarks68.prototype.getLeftEyeBrow = function () {
|
||
|
return this.positions.slice(17, 22);
|
||
|
};
|
||
|
FaceLandmarks68.prototype.getRightEyeBrow = function () {
|
||
|
return this.positions.slice(22, 27);
|
||
|
};
|
||
|
FaceLandmarks68.prototype.getNose = function () {
|
||
|
return this.positions.slice(27, 36);
|
||
|
};
|
||
|
FaceLandmarks68.prototype.getLeftEye = function () {
|
||
|
return this.positions.slice(36, 42);
|
||
|
};
|
||
|
FaceLandmarks68.prototype.getRightEye = function () {
|
||
|
return this.positions.slice(42, 48);
|
||
|
};
|
||
|
FaceLandmarks68.prototype.getMouth = function () {
|
||
|
return this.positions.slice(48, 68);
|
||
|
};
|
||
|
FaceLandmarks68.prototype.getRefPointsForAlignment = function () {
|
||
|
return [
|
||
|
this.getLeftEye(),
|
||
|
this.getRightEye(),
|
||
|
this.getMouth()
|
||
|
].map(getCenterPoint);
|
||
|
};
|
||
|
return FaceLandmarks68;
|
||
|
}(FaceLandmarks));
|
||
|
|
||
|
var FaceMatch = /** @class */ (function () {
|
||
|
function FaceMatch(label, distance) {
|
||
|
this._label = label;
|
||
|
this._distance = distance;
|
||
|
}
|
||
|
Object.defineProperty(FaceMatch.prototype, "label", {
|
||
|
get: function () { return this._label; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(FaceMatch.prototype, "distance", {
|
||
|
get: function () { return this._distance; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
FaceMatch.prototype.toString = function (withDistance) {
|
||
|
if (withDistance === void 0) { withDistance = true; }
|
||
|
return "" + this.label + (withDistance ? " (" + round$1(this.distance) + ")" : '');
|
||
|
};
|
||
|
return FaceMatch;
|
||
|
}());
|
||
|
|
||
|
var FullFaceDescription = /** @class */ (function (_super) {
|
||
|
__extends$1(FullFaceDescription, _super);
|
||
|
function FullFaceDescription(detection, unshiftedLandmarks, descriptor) {
|
||
|
var _this = _super.call(this, detection, unshiftedLandmarks) || this;
|
||
|
_this._descriptor = descriptor;
|
||
|
return _this;
|
||
|
}
|
||
|
Object.defineProperty(FullFaceDescription.prototype, "descriptor", {
|
||
|
get: function () {
|
||
|
return this._descriptor;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
FullFaceDescription.prototype.forSize = function (width, height) {
|
||
|
var _a = _super.prototype.forSize.call(this, width, height), detection = _a.detection, landmarks = _a.landmarks;
|
||
|
return new FullFaceDescription(detection, landmarks, this.descriptor);
|
||
|
};
|
||
|
return FullFaceDescription;
|
||
|
}(FaceDetectionWithLandmarks));
|
||
|
|
||
|
var LabeledFaceDescriptors = /** @class */ (function () {
|
||
|
function LabeledFaceDescriptors(label, descriptors) {
|
||
|
if (!(typeof label === 'string')) {
|
||
|
throw new Error('LabeledFaceDescriptors - constructor expected label to be a string');
|
||
|
}
|
||
|
if (!Array.isArray(descriptors) || descriptors.some(function (desc) { return !(desc instanceof Float32Array); })) {
|
||
|
throw new Error('LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array');
|
||
|
}
|
||
|
this._label = label;
|
||
|
this._descriptors = descriptors;
|
||
|
}
|
||
|
Object.defineProperty(LabeledFaceDescriptors.prototype, "label", {
|
||
|
get: function () { return this._label; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(LabeledFaceDescriptors.prototype, "descriptors", {
|
||
|
get: function () { return this._descriptors; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
return LabeledFaceDescriptors;
|
||
|
}());
|
||
|
|
||
|
function drawContour(ctx, points, isClosed) {
|
||
|
if (isClosed === void 0) { isClosed = false; }
|
||
|
ctx.beginPath();
|
||
|
points.slice(1).forEach(function (_a, prevIdx) {
|
||
|
var x = _a.x, y = _a.y;
|
||
|
var from = points[prevIdx];
|
||
|
ctx.moveTo(from.x, from.y);
|
||
|
ctx.lineTo(x, y);
|
||
|
});
|
||
|
if (isClosed) {
|
||
|
var from = points[points.length - 1];
|
||
|
var to = points[0];
|
||
|
if (!from || !to) {
|
||
|
return;
|
||
|
}
|
||
|
ctx.moveTo(from.x, from.y);
|
||
|
ctx.lineTo(to.x, to.y);
|
||
|
}
|
||
|
ctx.stroke();
|
||
|
}
|
||
|
|
||
|
function drawLandmarks(canvasArg, faceLandmarks, options) {
|
||
|
var canvas = resolveInput(canvasArg);
|
||
|
if (!(canvas instanceof HTMLCanvasElement)) {
|
||
|
throw new Error('drawLandmarks - expected canvas to be of type: HTMLCanvasElement');
|
||
|
}
|
||
|
var drawOptions = Object.assign(getDefaultDrawOptions(options), (options || {}));
|
||
|
var drawLines = Object.assign({ drawLines: false }, (options || {})).drawLines;
|
||
|
var ctx = getContext2dOrThrow(canvas);
|
||
|
var lineWidth = drawOptions.lineWidth, _a = drawOptions.color, color = _a === void 0 ? 'blue' : _a;
|
||
|
var faceLandmarksArray = Array.isArray(faceLandmarks) ? faceLandmarks : [faceLandmarks];
|
||
|
faceLandmarksArray.forEach(function (landmarks) {
|
||
|
if (drawLines && landmarks instanceof FaceLandmarks68) {
|
||
|
ctx.strokeStyle = color;
|
||
|
ctx.lineWidth = lineWidth;
|
||
|
drawContour(ctx, landmarks.getJawOutline());
|
||
|
drawContour(ctx, landmarks.getLeftEyeBrow());
|
||
|
drawContour(ctx, landmarks.getRightEyeBrow());
|
||
|
drawContour(ctx, landmarks.getNose());
|
||
|
drawContour(ctx, landmarks.getLeftEye(), true);
|
||
|
drawContour(ctx, landmarks.getRightEye(), true);
|
||
|
drawContour(ctx, landmarks.getMouth(), true);
|
||
|
return;
|
||
|
}
|
||
|
// else draw points
|
||
|
var ptOffset = lineWidth / 2;
|
||
|
ctx.fillStyle = color;
|
||
|
landmarks.positions.forEach(function (pt) { return ctx.fillRect(pt.x - ptOffset, pt.y - ptOffset, lineWidth, lineWidth); });
|
||
|
});
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* Extracts the image regions containing the detected faces.
|
||
|
*
|
||
|
* @param input The image that face detection has been performed on.
|
||
|
* @param detections The face detection results or face bounding boxes for that image.
|
||
|
* @returns The Canvases of the corresponding image region for each detected face.
|
||
|
*/
|
||
|
function extractFaces(input, detections) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var canvas, netInput, tensorOrCanvas, _a, ctx, boxes;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
canvas = input;
|
||
|
if (!!(input instanceof HTMLCanvasElement)) return [3 /*break*/, 5];
|
||
|
return [4 /*yield*/, toNetInput(input)];
|
||
|
case 1:
|
||
|
netInput = _b.sent();
|
||
|
if (netInput.batchSize > 1) {
|
||
|
throw new Error('extractFaces - batchSize > 1 not supported');
|
||
|
}
|
||
|
tensorOrCanvas = netInput.getInput(0);
|
||
|
if (!(tensorOrCanvas instanceof HTMLCanvasElement)) return [3 /*break*/, 2];
|
||
|
_a = tensorOrCanvas;
|
||
|
return [3 /*break*/, 4];
|
||
|
case 2: return [4 /*yield*/, imageTensorToCanvas(tensorOrCanvas)];
|
||
|
case 3:
|
||
|
_a = _b.sent();
|
||
|
_b.label = 4;
|
||
|
case 4:
|
||
|
canvas = _a;
|
||
|
_b.label = 5;
|
||
|
case 5:
|
||
|
ctx = getContext2dOrThrow(canvas);
|
||
|
boxes = detections.map(function (det) { return det instanceof FaceDetection
|
||
|
? det.forSize(canvas.width, canvas.height).box.floor()
|
||
|
: det; })
|
||
|
.map(function (box) { return box.clipAtImageBorders(canvas.width, canvas.height); });
|
||
|
return [2 /*return*/, boxes.map(function (_a) {
|
||
|
var x = _a.x, y = _a.y, width = _a.width, height = _a.height;
|
||
|
var faceImg = createCanvas({ width: width, height: height });
|
||
|
getContext2dOrThrow(faceImg)
|
||
|
.putImageData(ctx.getImageData(x, y, width, height), 0, 0);
|
||
|
return faceImg;
|
||
|
})];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* Extracts the tensors of the image regions containing the detected faces.
|
||
|
* Useful if you want to compute the face descriptors for the face images.
|
||
|
* Using this method is faster then extracting a canvas for each face and
|
||
|
* converting them to tensors individually.
|
||
|
*
|
||
|
* @param imageTensor The image tensor that face detection has been performed on.
|
||
|
* @param detections The face detection results or face bounding boxes for that image.
|
||
|
* @returns Tensors of the corresponding image region for each detected face.
|
||
|
*/
|
||
|
function extractFaceTensors(imageTensor, detections) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
return __generator$1(this, function (_a) {
|
||
|
if (isTensor4D(imageTensor) && imageTensor.shape[0] > 1) {
|
||
|
throw new Error('extractFaceTensors - batchSize > 1 not supported');
|
||
|
}
|
||
|
return [2 /*return*/, tidy(function () {
|
||
|
var _a = imageTensor.shape.slice(isTensor4D(imageTensor) ? 1 : 0), imgHeight = _a[0], imgWidth = _a[1], numChannels = _a[2];
|
||
|
var boxes = detections.map(function (det) { return det instanceof FaceDetection
|
||
|
? det.forSize(imgWidth, imgHeight).box
|
||
|
: det; })
|
||
|
.map(function (box) { return box.clipAtImageBorders(imgWidth, imgHeight); });
|
||
|
var faceTensors = boxes.map(function (_a) {
|
||
|
var x = _a.x, y = _a.y, width = _a.width, height = _a.height;
|
||
|
return slice3d(imageTensor.as3D(imgHeight, imgWidth, numChannels), [y, x, 0], [height, width, numChannels]);
|
||
|
});
|
||
|
return faceTensors;
|
||
|
})];
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function depthwiseSeparableConv(x, params, stride) {
|
||
|
return tidy(function () {
|
||
|
var out = separableConv2d(x, params.depthwise_filter, params.pointwise_filter, stride, 'same');
|
||
|
out = add(out, params.bias);
|
||
|
return out;
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function convLayer(x, params, padding, withRelu) {
|
||
|
if (padding === void 0) { padding = 'same'; }
|
||
|
if (withRelu === void 0) { withRelu = false; }
|
||
|
return tidy(function () {
|
||
|
var out = add(conv2d(x, params.filters, [1, 1], padding), params.bias);
|
||
|
return withRelu ? relu(out) : out;
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function extractConvParamsFactory(extractWeights, paramMappings) {
|
||
|
return function (channelsIn, channelsOut, filterSize, mappedPrefix) {
|
||
|
var filters = tensor4d(extractWeights(channelsIn * channelsOut * filterSize * filterSize), [filterSize, filterSize, channelsIn, channelsOut]);
|
||
|
var bias = tensor1d(extractWeights(channelsOut));
|
||
|
paramMappings.push({ paramPath: mappedPrefix + "/filters" }, { paramPath: mappedPrefix + "/bias" });
|
||
|
return { filters: filters, bias: bias };
|
||
|
};
|
||
|
}
|
||
|
|
||
|
function extractFCParamsFactory(extractWeights, paramMappings) {
|
||
|
return function (channelsIn, channelsOut, mappedPrefix) {
|
||
|
var fc_weights = tensor2d(extractWeights(channelsIn * channelsOut), [channelsIn, channelsOut]);
|
||
|
var fc_bias = tensor1d(extractWeights(channelsOut));
|
||
|
paramMappings.push({ paramPath: mappedPrefix + "/weights" }, { paramPath: mappedPrefix + "/bias" });
|
||
|
return {
|
||
|
weights: fc_weights,
|
||
|
bias: fc_bias
|
||
|
};
|
||
|
};
|
||
|
}
|
||
|
|
||
|
var SeparableConvParams = /** @class */ (function () {
|
||
|
function SeparableConvParams(depthwise_filter, pointwise_filter, bias) {
|
||
|
this.depthwise_filter = depthwise_filter;
|
||
|
this.pointwise_filter = pointwise_filter;
|
||
|
this.bias = bias;
|
||
|
}
|
||
|
return SeparableConvParams;
|
||
|
}());
|
||
|
|
||
|
function extractSeparableConvParamsFactory(extractWeights, paramMappings) {
|
||
|
return function (channelsIn, channelsOut, mappedPrefix) {
|
||
|
var depthwise_filter = tensor4d(extractWeights(3 * 3 * channelsIn), [3, 3, channelsIn, 1]);
|
||
|
var pointwise_filter = tensor4d(extractWeights(channelsIn * channelsOut), [1, 1, channelsIn, channelsOut]);
|
||
|
var bias = tensor1d(extractWeights(channelsOut));
|
||
|
paramMappings.push({ paramPath: mappedPrefix + "/depthwise_filter" }, { paramPath: mappedPrefix + "/pointwise_filter" }, { paramPath: mappedPrefix + "/bias" });
|
||
|
return new SeparableConvParams(depthwise_filter, pointwise_filter, bias);
|
||
|
};
|
||
|
}
|
||
|
function loadSeparableConvParamsFactory(extractWeightEntry) {
|
||
|
return function (prefix) {
|
||
|
var depthwise_filter = extractWeightEntry(prefix + "/depthwise_filter", 4);
|
||
|
var pointwise_filter = extractWeightEntry(prefix + "/pointwise_filter", 4);
|
||
|
var bias = extractWeightEntry(prefix + "/bias", 1);
|
||
|
return new SeparableConvParams(depthwise_filter, pointwise_filter, bias);
|
||
|
};
|
||
|
}
|
||
|
|
||
|
var isNumber = function (arg) { return typeof arg === 'number'; };
|
||
|
function validateConfig(config) {
|
||
|
if (!config) {
|
||
|
throw new Error("invalid config: " + config);
|
||
|
}
|
||
|
if (typeof config.withSeparableConvs !== 'boolean') {
|
||
|
throw new Error("config.withSeparableConvs has to be a boolean, have: " + config.withSeparableConvs);
|
||
|
}
|
||
|
if (!isNumber(config.iouThreshold) || config.iouThreshold < 0 || config.iouThreshold > 1.0) {
|
||
|
throw new Error("config.iouThreshold has to be a number between [0, 1], have: " + config.iouThreshold);
|
||
|
}
|
||
|
if (!Array.isArray(config.classes)
|
||
|
|| !config.classes.length
|
||
|
|| !config.classes.every(function (c) { return typeof c === 'string'; })) {
|
||
|
throw new Error("config.classes has to be an array class names: string[], have: " + JSON.stringify(config.classes));
|
||
|
}
|
||
|
if (!Array.isArray(config.anchors)
|
||
|
|| !config.anchors.length
|
||
|
|| !config.anchors.map(function (a) { return a || {}; }).every(function (a) { return isNumber(a.x) && isNumber(a.y); })) {
|
||
|
throw new Error("config.anchors has to be an array of { x: number, y: number }, have: " + JSON.stringify(config.anchors));
|
||
|
}
|
||
|
if (config.meanRgb && (!Array.isArray(config.meanRgb)
|
||
|
|| config.meanRgb.length !== 3
|
||
|
|| !config.meanRgb.every(isNumber))) {
|
||
|
throw new Error("config.meanRgb has to be an array of shape [number, number, number], have: " + JSON.stringify(config.meanRgb));
|
||
|
}
|
||
|
}
|
||
|
function validateTrainConfig(config) {
|
||
|
if (![config.noObjectScale, config.objectScale, config.coordScale, config.classScale].every(isNumber)) {
|
||
|
throw new Error("for training you have to specify noObjectScale, objectScale, coordScale, classScale parameters in your config.json file");
|
||
|
}
|
||
|
return config;
|
||
|
}
|
||
|
|
||
|
var CELL_SIZE = 32;
|
||
|
var DEFAULT_FILTER_SIZES = [
|
||
|
3, 16, 32, 64, 128, 256, 512, 1024, 1024
|
||
|
];
|
||
|
|
||
|
function leaky(x) {
|
||
|
return tidy(function () {
|
||
|
var min$$1 = mul(x, scalar(0.10000000149011612));
|
||
|
return add(relu(sub(x, min$$1)), min$$1);
|
||
|
//return tf.maximum(x, min)
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function convWithBatchNorm(x, params) {
|
||
|
return tidy(function () {
|
||
|
var out = pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]]);
|
||
|
out = conv2d(out, params.conv.filters, [1, 1], 'valid');
|
||
|
out = sub(out, params.bn.sub);
|
||
|
out = mul(out, params.bn.truediv);
|
||
|
out = add(out, params.conv.bias);
|
||
|
return leaky(out);
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function depthwiseSeparableConv$1(x, params) {
|
||
|
return tidy(function () {
|
||
|
var out = pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]]);
|
||
|
out = separableConv2d(out, params.depthwise_filter, params.pointwise_filter, [1, 1], 'valid');
|
||
|
out = add(out, params.bias);
|
||
|
return leaky(out);
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function extractorsFactory(extractWeights, paramMappings) {
|
||
|
var extractConvParams = extractConvParamsFactory(extractWeights, paramMappings);
|
||
|
function extractBatchNormParams(size, mappedPrefix) {
|
||
|
var sub$$1 = tensor1d(extractWeights(size));
|
||
|
var truediv = tensor1d(extractWeights(size));
|
||
|
paramMappings.push({ paramPath: mappedPrefix + "/sub" }, { paramPath: mappedPrefix + "/truediv" });
|
||
|
return { sub: sub$$1, truediv: truediv };
|
||
|
}
|
||
|
function extractConvWithBatchNormParams(channelsIn, channelsOut, mappedPrefix) {
|
||
|
var conv = extractConvParams(channelsIn, channelsOut, 3, mappedPrefix + "/conv");
|
||
|
var bn = extractBatchNormParams(channelsOut, mappedPrefix + "/bn");
|
||
|
return { conv: conv, bn: bn };
|
||
|
}
|
||
|
var extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings);
|
||
|
return {
|
||
|
extractConvParams: extractConvParams,
|
||
|
extractConvWithBatchNormParams: extractConvWithBatchNormParams,
|
||
|
extractSeparableConvParams: extractSeparableConvParams
|
||
|
};
|
||
|
}
|
||
|
function extractParams(weights, config, boxEncodingSize, filterSizes) {
|
||
|
var _a = extractWeightsFactory(weights), extractWeights = _a.extractWeights, getRemainingWeights = _a.getRemainingWeights;
|
||
|
var paramMappings = [];
|
||
|
var _b = extractorsFactory(extractWeights, paramMappings), extractConvParams = _b.extractConvParams, extractConvWithBatchNormParams = _b.extractConvWithBatchNormParams, extractSeparableConvParams = _b.extractSeparableConvParams;
|
||
|
var params;
|
||
|
if (config.withSeparableConvs) {
|
||
|
var s0 = filterSizes[0], s1 = filterSizes[1], s2 = filterSizes[2], s3 = filterSizes[3], s4 = filterSizes[4], s5 = filterSizes[5], s6 = filterSizes[6], s7 = filterSizes[7], s8 = filterSizes[8];
|
||
|
var conv0 = config.isFirstLayerConv2d
|
||
|
? extractConvParams(s0, s1, 3, 'conv0')
|
||
|
: extractSeparableConvParams(s0, s1, 'conv0');
|
||
|
var conv1 = extractSeparableConvParams(s1, s2, 'conv1');
|
||
|
var conv2 = extractSeparableConvParams(s2, s3, 'conv2');
|
||
|
var conv3 = extractSeparableConvParams(s3, s4, 'conv3');
|
||
|
var conv4 = extractSeparableConvParams(s4, s5, 'conv4');
|
||
|
var conv5 = extractSeparableConvParams(s5, s6, 'conv5');
|
||
|
var conv6 = s7 ? extractSeparableConvParams(s6, s7, 'conv6') : undefined;
|
||
|
var conv7 = s8 ? extractSeparableConvParams(s7, s8, 'conv7') : undefined;
|
||
|
var conv8 = extractConvParams(s8 || s7 || s6, 5 * boxEncodingSize, 1, 'conv8');
|
||
|
params = { conv0: conv0, conv1: conv1, conv2: conv2, conv3: conv3, conv4: conv4, conv5: conv5, conv6: conv6, conv7: conv7, conv8: conv8 };
|
||
|
}
|
||
|
else {
|
||
|
var s0 = filterSizes[0], s1 = filterSizes[1], s2 = filterSizes[2], s3 = filterSizes[3], s4 = filterSizes[4], s5 = filterSizes[5], s6 = filterSizes[6], s7 = filterSizes[7], s8 = filterSizes[8];
|
||
|
var conv0 = extractConvWithBatchNormParams(s0, s1, 'conv0');
|
||
|
var conv1 = extractConvWithBatchNormParams(s1, s2, 'conv1');
|
||
|
var conv2 = extractConvWithBatchNormParams(s2, s3, 'conv2');
|
||
|
var conv3 = extractConvWithBatchNormParams(s3, s4, 'conv3');
|
||
|
var conv4 = extractConvWithBatchNormParams(s4, s5, 'conv4');
|
||
|
var conv5 = extractConvWithBatchNormParams(s5, s6, 'conv5');
|
||
|
var conv6 = extractConvWithBatchNormParams(s6, s7, 'conv6');
|
||
|
var conv7 = extractConvWithBatchNormParams(s7, s8, 'conv7');
|
||
|
var conv8 = extractConvParams(s8, 5 * boxEncodingSize, 1, 'conv8');
|
||
|
params = { conv0: conv0, conv1: conv1, conv2: conv2, conv3: conv3, conv4: conv4, conv5: conv5, conv6: conv6, conv7: conv7, conv8: conv8 };
|
||
|
}
|
||
|
if (getRemainingWeights().length !== 0) {
|
||
|
throw new Error("weights remaing after extract: " + getRemainingWeights().length);
|
||
|
}
|
||
|
return { params: params, paramMappings: paramMappings };
|
||
|
}
|
||
|
|
||
|
function extractorsFactory$1(weightMap, paramMappings) {
|
||
|
var extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);
|
||
|
function extractBatchNormParams(prefix) {
|
||
|
var sub = extractWeightEntry(prefix + "/sub", 1);
|
||
|
var truediv = extractWeightEntry(prefix + "/truediv", 1);
|
||
|
return { sub: sub, truediv: truediv };
|
||
|
}
|
||
|
function extractConvParams(prefix) {
|
||
|
var filters = extractWeightEntry(prefix + "/filters", 4);
|
||
|
var bias = extractWeightEntry(prefix + "/bias", 1);
|
||
|
return { filters: filters, bias: bias };
|
||
|
}
|
||
|
function extractConvWithBatchNormParams(prefix) {
|
||
|
var conv = extractConvParams(prefix + "/conv");
|
||
|
var bn = extractBatchNormParams(prefix + "/bn");
|
||
|
return { conv: conv, bn: bn };
|
||
|
}
|
||
|
var extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry);
|
||
|
return {
|
||
|
extractConvParams: extractConvParams,
|
||
|
extractConvWithBatchNormParams: extractConvWithBatchNormParams,
|
||
|
extractSeparableConvParams: extractSeparableConvParams
|
||
|
};
|
||
|
}
|
||
|
function loadQuantizedParams(uri, config, defaultModelName) {
|
||
|
if (defaultModelName === void 0) { defaultModelName = ''; }
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var weightMap, paramMappings, _a, extractConvParams, extractConvWithBatchNormParams, extractSeparableConvParams, params, numFilters;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0: return [4 /*yield*/, loadWeightMap(uri, defaultModelName)];
|
||
|
case 1:
|
||
|
weightMap = _b.sent();
|
||
|
paramMappings = [];
|
||
|
_a = extractorsFactory$1(weightMap, paramMappings), extractConvParams = _a.extractConvParams, extractConvWithBatchNormParams = _a.extractConvWithBatchNormParams, extractSeparableConvParams = _a.extractSeparableConvParams;
|
||
|
if (config.withSeparableConvs) {
|
||
|
numFilters = (config.filterSizes && config.filterSizes.length || 9);
|
||
|
params = {
|
||
|
conv0: config.isFirstLayerConv2d ? extractConvParams('conv0') : extractSeparableConvParams('conv0'),
|
||
|
conv1: extractSeparableConvParams('conv1'),
|
||
|
conv2: extractSeparableConvParams('conv2'),
|
||
|
conv3: extractSeparableConvParams('conv3'),
|
||
|
conv4: extractSeparableConvParams('conv4'),
|
||
|
conv5: extractSeparableConvParams('conv5'),
|
||
|
conv6: numFilters > 7 ? extractSeparableConvParams('conv6') : undefined,
|
||
|
conv7: numFilters > 8 ? extractSeparableConvParams('conv7') : undefined,
|
||
|
conv8: extractConvParams('conv8')
|
||
|
};
|
||
|
}
|
||
|
else {
|
||
|
params = {
|
||
|
conv0: extractConvWithBatchNormParams('conv0'),
|
||
|
conv1: extractConvWithBatchNormParams('conv1'),
|
||
|
conv2: extractConvWithBatchNormParams('conv2'),
|
||
|
conv3: extractConvWithBatchNormParams('conv3'),
|
||
|
conv4: extractConvWithBatchNormParams('conv4'),
|
||
|
conv5: extractConvWithBatchNormParams('conv5'),
|
||
|
conv6: extractConvWithBatchNormParams('conv6'),
|
||
|
conv7: extractConvWithBatchNormParams('conv7'),
|
||
|
conv8: extractConvParams('conv8')
|
||
|
};
|
||
|
}
|
||
|
disposeUnusedWeightTensors(weightMap, paramMappings);
|
||
|
return [2 /*return*/, { params: params, paramMappings: paramMappings }];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
var TinyYolov2SizeType;
|
||
|
(function (TinyYolov2SizeType) {
|
||
|
TinyYolov2SizeType[TinyYolov2SizeType["XS"] = 224] = "XS";
|
||
|
TinyYolov2SizeType[TinyYolov2SizeType["SM"] = 320] = "SM";
|
||
|
TinyYolov2SizeType[TinyYolov2SizeType["MD"] = 416] = "MD";
|
||
|
TinyYolov2SizeType[TinyYolov2SizeType["LG"] = 608] = "LG";
|
||
|
})(TinyYolov2SizeType || (TinyYolov2SizeType = {}));
|
||
|
var TinyYolov2Options = /** @class */ (function () {
|
||
|
function TinyYolov2Options(_a) {
|
||
|
var _b = _a === void 0 ? {} : _a, inputSize = _b.inputSize, scoreThreshold = _b.scoreThreshold;
|
||
|
this._name = 'TinyYolov2Options';
|
||
|
this._inputSize = inputSize || 416;
|
||
|
this._scoreThreshold = scoreThreshold || 0.5;
|
||
|
if (typeof this._inputSize !== 'number' || this._inputSize % 32 !== 0) {
|
||
|
throw new Error(this._name + " - expected inputSize to be a number divisible by 32");
|
||
|
}
|
||
|
if (typeof this._scoreThreshold !== 'number' || this._scoreThreshold <= 0 || this._scoreThreshold >= 1) {
|
||
|
throw new Error(this._name + " - expected scoreThreshold to be a number between 0 and 1");
|
||
|
}
|
||
|
}
|
||
|
Object.defineProperty(TinyYolov2Options.prototype, "inputSize", {
|
||
|
get: function () { return this._inputSize; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2Options.prototype, "scoreThreshold", {
|
||
|
get: function () { return this._scoreThreshold; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
return TinyYolov2Options;
|
||
|
}());
|
||
|
|
||
|
var TinyYolov2 = /** @class */ (function (_super) {
|
||
|
__extends$1(TinyYolov2, _super);
|
||
|
function TinyYolov2(config) {
|
||
|
var _this = _super.call(this, 'TinyYolov2') || this;
|
||
|
validateConfig(config);
|
||
|
_this._config = config;
|
||
|
return _this;
|
||
|
}
|
||
|
Object.defineProperty(TinyYolov2.prototype, "config", {
|
||
|
get: function () {
|
||
|
return this._config;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2.prototype, "withClassScores", {
|
||
|
get: function () {
|
||
|
return this.config.withClassScores || this.config.classes.length > 1;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2.prototype, "boxEncodingSize", {
|
||
|
get: function () {
|
||
|
return 5 + (this.withClassScores ? this.config.classes.length : 0);
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
TinyYolov2.prototype.runTinyYolov2 = function (x, params) {
|
||
|
var out = convWithBatchNorm(x, params.conv0);
|
||
|
out = maxPool(out, [2, 2], [2, 2], 'same');
|
||
|
out = convWithBatchNorm(out, params.conv1);
|
||
|
out = maxPool(out, [2, 2], [2, 2], 'same');
|
||
|
out = convWithBatchNorm(out, params.conv2);
|
||
|
out = maxPool(out, [2, 2], [2, 2], 'same');
|
||
|
out = convWithBatchNorm(out, params.conv3);
|
||
|
out = maxPool(out, [2, 2], [2, 2], 'same');
|
||
|
out = convWithBatchNorm(out, params.conv4);
|
||
|
out = maxPool(out, [2, 2], [2, 2], 'same');
|
||
|
out = convWithBatchNorm(out, params.conv5);
|
||
|
out = maxPool(out, [2, 2], [1, 1], 'same');
|
||
|
out = convWithBatchNorm(out, params.conv6);
|
||
|
out = convWithBatchNorm(out, params.conv7);
|
||
|
return convLayer(out, params.conv8, 'valid', false);
|
||
|
};
|
||
|
TinyYolov2.prototype.runMobilenet = function (x, params) {
|
||
|
var out = this.config.isFirstLayerConv2d
|
||
|
? leaky(convLayer(x, params.conv0, 'valid', false))
|
||
|
: depthwiseSeparableConv$1(x, params.conv0);
|
||
|
out = maxPool(out, [2, 2], [2, 2], 'same');
|
||
|
out = depthwiseSeparableConv$1(out, params.conv1);
|
||
|
out = maxPool(out, [2, 2], [2, 2], 'same');
|
||
|
out = depthwiseSeparableConv$1(out, params.conv2);
|
||
|
out = maxPool(out, [2, 2], [2, 2], 'same');
|
||
|
out = depthwiseSeparableConv$1(out, params.conv3);
|
||
|
out = maxPool(out, [2, 2], [2, 2], 'same');
|
||
|
out = depthwiseSeparableConv$1(out, params.conv4);
|
||
|
out = maxPool(out, [2, 2], [2, 2], 'same');
|
||
|
out = depthwiseSeparableConv$1(out, params.conv5);
|
||
|
out = maxPool(out, [2, 2], [1, 1], 'same');
|
||
|
out = params.conv6 ? depthwiseSeparableConv$1(out, params.conv6) : out;
|
||
|
out = params.conv7 ? depthwiseSeparableConv$1(out, params.conv7) : out;
|
||
|
return convLayer(out, params.conv8, 'valid', false);
|
||
|
};
|
||
|
TinyYolov2.prototype.forwardInput = function (input, inputSize) {
|
||
|
var _this = this;
|
||
|
var params = this.params;
|
||
|
if (!params) {
|
||
|
throw new Error('TinyYolov2 - load model before inference');
|
||
|
}
|
||
|
return tidy(function () {
|
||
|
var batchTensor = input.toBatchTensor(inputSize, false).toFloat();
|
||
|
batchTensor = _this.config.meanRgb
|
||
|
? normalize(batchTensor, _this.config.meanRgb)
|
||
|
: batchTensor;
|
||
|
batchTensor = batchTensor.div(scalar(256));
|
||
|
return _this.config.withSeparableConvs
|
||
|
? _this.runMobilenet(batchTensor, params)
|
||
|
: _this.runTinyYolov2(batchTensor, params);
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2.prototype.forward = function (input, inputSize) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _a;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
_a = this.forwardInput;
|
||
|
return [4 /*yield*/, toNetInput(input)];
|
||
|
case 1: return [4 /*yield*/, _a.apply(this, [_b.sent(), inputSize])];
|
||
|
case 2: return [2 /*return*/, _b.sent()];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2.prototype.detect = function (input, forwardParams) {
|
||
|
if (forwardParams === void 0) { forwardParams = {}; }
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _this = this;
|
||
|
var _a, inputSize, scoreThreshold, netInput, out, out0, inputDimensions, results, boxes, scores, classScores, classNames, indices, detections;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
_a = new TinyYolov2Options(forwardParams), inputSize = _a.inputSize, scoreThreshold = _a.scoreThreshold;
|
||
|
return [4 /*yield*/, toNetInput(input)];
|
||
|
case 1:
|
||
|
netInput = _b.sent();
|
||
|
return [4 /*yield*/, this.forwardInput(netInput, inputSize)];
|
||
|
case 2:
|
||
|
out = _b.sent();
|
||
|
out0 = tidy(function () { return unstack(out)[0].expandDims(); });
|
||
|
inputDimensions = {
|
||
|
width: netInput.getInputWidth(0),
|
||
|
height: netInput.getInputHeight(0)
|
||
|
};
|
||
|
results = this.extractBoxes(out0, netInput.getReshapedInputDimensions(0), scoreThreshold);
|
||
|
out.dispose();
|
||
|
out0.dispose();
|
||
|
boxes = results.map(function (res) { return res.box; });
|
||
|
scores = results.map(function (res) { return res.score; });
|
||
|
classScores = results.map(function (res) { return res.classScore; });
|
||
|
classNames = results.map(function (res) { return _this.config.classes[res.label]; });
|
||
|
indices = nonMaxSuppression$1(boxes.map(function (box) { return box.rescale(inputSize); }), scores, this.config.iouThreshold, true);
|
||
|
detections = indices.map(function (idx) {
|
||
|
return new ObjectDetection(scores[idx], classScores[idx], classNames[idx], boxes[idx], inputDimensions);
|
||
|
});
|
||
|
return [2 /*return*/, detections];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2.prototype.loadQuantizedParams = function (modelUri, defaultModelName) {
|
||
|
if (defaultModelName === void 0) { defaultModelName = ''; }
|
||
|
if (!modelUri) {
|
||
|
throw new Error('loadQuantizedParams - please specify the modelUri');
|
||
|
}
|
||
|
return loadQuantizedParams(modelUri, this.config, defaultModelName);
|
||
|
};
|
||
|
TinyYolov2.prototype.extractParams = function (weights) {
|
||
|
var filterSizes = this.config.filterSizes || DEFAULT_FILTER_SIZES;
|
||
|
var numFilters = filterSizes ? filterSizes.length : undefined;
|
||
|
if (numFilters !== 7 && numFilters !== 8 && numFilters !== 9) {
|
||
|
throw new Error("TinyYolov2 - expected 7 | 8 | 9 convolutional filters, but found " + numFilters + " filterSizes in config");
|
||
|
}
|
||
|
return extractParams(weights, this.config, this.boxEncodingSize, filterSizes);
|
||
|
};
|
||
|
TinyYolov2.prototype.extractBoxes = function (outputTensor, inputBlobDimensions, scoreThreshold) {
|
||
|
var _this = this;
|
||
|
var width = inputBlobDimensions.width, height = inputBlobDimensions.height;
|
||
|
var inputSize = Math.max(width, height);
|
||
|
var correctionFactorX = inputSize / width;
|
||
|
var correctionFactorY = inputSize / height;
|
||
|
var numCells = outputTensor.shape[1];
|
||
|
var numBoxes = this.config.anchors.length;
|
||
|
var _a = tidy(function () {
|
||
|
var reshaped = outputTensor.reshape([numCells, numCells, numBoxes, _this.boxEncodingSize]);
|
||
|
var boxes = reshaped.slice([0, 0, 0, 0], [numCells, numCells, numBoxes, 4]);
|
||
|
var scores = reshaped.slice([0, 0, 0, 4], [numCells, numCells, numBoxes, 1]);
|
||
|
var classScores = _this.withClassScores
|
||
|
? softmax(reshaped.slice([0, 0, 0, 5], [numCells, numCells, numBoxes, _this.config.classes.length]), 3)
|
||
|
: scalar(0);
|
||
|
return [boxes, scores, classScores];
|
||
|
}), boxesTensor = _a[0], scoresTensor = _a[1], classScoresTensor = _a[2];
|
||
|
var results = [];
|
||
|
for (var row = 0; row < numCells; row++) {
|
||
|
for (var col = 0; col < numCells; col++) {
|
||
|
for (var anchor = 0; anchor < numBoxes; anchor++) {
|
||
|
var score = sigmoid$1(scoresTensor.get(row, col, anchor, 0));
|
||
|
if (!scoreThreshold || score > scoreThreshold) {
|
||
|
var ctX = ((col + sigmoid$1(boxesTensor.get(row, col, anchor, 0))) / numCells) * correctionFactorX;
|
||
|
var ctY = ((row + sigmoid$1(boxesTensor.get(row, col, anchor, 1))) / numCells) * correctionFactorY;
|
||
|
var width_1 = ((Math.exp(boxesTensor.get(row, col, anchor, 2)) * this.config.anchors[anchor].x) / numCells) * correctionFactorX;
|
||
|
var height_1 = ((Math.exp(boxesTensor.get(row, col, anchor, 3)) * this.config.anchors[anchor].y) / numCells) * correctionFactorY;
|
||
|
var x = (ctX - (width_1 / 2));
|
||
|
var y = (ctY - (height_1 / 2));
|
||
|
var pos = { row: row, col: col, anchor: anchor };
|
||
|
var _b = this.withClassScores
|
||
|
? this.extractPredictedClass(classScoresTensor, pos)
|
||
|
: { classScore: 1, label: 0 }, classScore = _b.classScore, label = _b.label;
|
||
|
results.push(__assign$1({ box: new BoundingBox(x, y, x + width_1, y + height_1), score: score, classScore: score * classScore, label: label }, pos));
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
boxesTensor.dispose();
|
||
|
scoresTensor.dispose();
|
||
|
classScoresTensor.dispose();
|
||
|
return results;
|
||
|
};
|
||
|
TinyYolov2.prototype.extractPredictedClass = function (classesTensor, pos) {
|
||
|
var row = pos.row, col = pos.col, anchor = pos.anchor;
|
||
|
return Array(this.config.classes.length).fill(0)
|
||
|
.map(function (_, i) { return classesTensor.get(row, col, anchor, i); })
|
||
|
.map(function (classScore, label) { return ({
|
||
|
classScore: classScore,
|
||
|
label: label
|
||
|
}); })
|
||
|
.reduce(function (max$$1, curr) { return max$$1.classScore > curr.classScore ? max$$1 : curr; });
|
||
|
};
|
||
|
return TinyYolov2;
|
||
|
}(NeuralNetwork));
|
||
|
|
||
|
var TinyYolov2LossFunction = /** @class */ (function () {
|
||
|
function TinyYolov2LossFunction(outputTensor, groundTruth, predictedBoxes, reshapedImgDims, config) {
|
||
|
this._config = config;
|
||
|
this._reshapedImgDims = reshapedImgDims;
|
||
|
this._outputTensor = outputTensor;
|
||
|
this._predictedBoxes = predictedBoxes;
|
||
|
this.validateGroundTruthBoxes(groundTruth);
|
||
|
this._groundTruth = this.assignGroundTruthToAnchors(groundTruth);
|
||
|
var groundTruthMask = this.createGroundTruthMask();
|
||
|
var _a = this.createCoordAndScoreMasks(), coordBoxOffsetMask = _a.coordBoxOffsetMask, coordBoxSizeMask = _a.coordBoxSizeMask, scoreMask = _a.scoreMask;
|
||
|
this.noObjectLossMask = tidy(function () { return mul(scoreMask, sub(scalar(1), groundTruthMask)); });
|
||
|
this.objectLossMask = tidy(function () { return mul(scoreMask, groundTruthMask); });
|
||
|
this.coordBoxOffsetMask = tidy(function () { return mul(coordBoxOffsetMask, groundTruthMask); });
|
||
|
this.coordBoxSizeMask = tidy(function () { return mul(coordBoxSizeMask, groundTruthMask); });
|
||
|
var classScoresMask = tidy(function () { return sub(scalar(1), coordBoxOffsetMask.add(coordBoxSizeMask).add(scoreMask)); });
|
||
|
this.groundTruthClassScoresMask = tidy(function () { return mul(classScoresMask, groundTruthMask); });
|
||
|
}
|
||
|
Object.defineProperty(TinyYolov2LossFunction.prototype, "config", {
|
||
|
get: function () {
|
||
|
return this._config;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2LossFunction.prototype, "reshapedImgDims", {
|
||
|
get: function () {
|
||
|
return this._reshapedImgDims;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2LossFunction.prototype, "outputTensor", {
|
||
|
get: function () {
|
||
|
return this._outputTensor;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2LossFunction.prototype, "groundTruth", {
|
||
|
get: function () {
|
||
|
return this._groundTruth;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2LossFunction.prototype, "predictedBoxes", {
|
||
|
get: function () {
|
||
|
return this._predictedBoxes;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2LossFunction.prototype, "inputSize", {
|
||
|
get: function () {
|
||
|
return Math.max(this.reshapedImgDims.width, this.reshapedImgDims.height);
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2LossFunction.prototype, "withClassScores", {
|
||
|
get: function () {
|
||
|
return this._config.withClassScores || this._config.classes.length > 1;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2LossFunction.prototype, "boxEncodingSize", {
|
||
|
get: function () {
|
||
|
return 5 + (this.withClassScores ? this._config.classes.length : 0);
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2LossFunction.prototype, "anchors", {
|
||
|
get: function () {
|
||
|
return this._config.anchors;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2LossFunction.prototype, "numBoxes", {
|
||
|
get: function () {
|
||
|
return this.anchors.length;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2LossFunction.prototype, "numCells", {
|
||
|
get: function () {
|
||
|
return this.inputSize / CELL_SIZE;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2LossFunction.prototype, "gridCellEncodingSize", {
|
||
|
get: function () {
|
||
|
return this.boxEncodingSize * this.numBoxes;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
TinyYolov2LossFunction.prototype.toOutputTensorShape = function (tensor$$1) {
|
||
|
var _this = this;
|
||
|
return tidy(function () { return tensor$$1.reshape([1, _this.numCells, _this.numCells, _this.gridCellEncodingSize]); });
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.computeLoss = function () {
|
||
|
var _this = this;
|
||
|
return tidy(function () {
|
||
|
var noObjectLoss = _this.computeNoObjectLoss();
|
||
|
var objectLoss = _this.computeObjectLoss();
|
||
|
var coordLoss = _this.computeCoordLoss();
|
||
|
var classLoss = _this.withClassScores
|
||
|
? _this.computeClassLoss()
|
||
|
: scalar(0);
|
||
|
var totalLoss = tidy(function () { return noObjectLoss.add(objectLoss).add(coordLoss).add(classLoss); });
|
||
|
return {
|
||
|
noObjectLoss: noObjectLoss,
|
||
|
objectLoss: objectLoss,
|
||
|
coordLoss: coordLoss,
|
||
|
classLoss: classLoss,
|
||
|
totalLoss: totalLoss
|
||
|
};
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.computeNoObjectLoss = function () {
|
||
|
var _this = this;
|
||
|
return tidy(function () {
|
||
|
return _this.computeLossTerm(_this.config.noObjectScale, _this.toOutputTensorShape(_this.noObjectLossMask), sigmoid(_this.outputTensor));
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.computeObjectLoss = function () {
|
||
|
var _this = this;
|
||
|
return tidy(function () {
|
||
|
return _this.computeLossTerm(_this.config.objectScale, _this.toOutputTensorShape(_this.objectLossMask), sub(_this.toOutputTensorShape(_this.computeIous()), sigmoid(_this.outputTensor)));
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.computeClassLoss = function () {
|
||
|
var _this = this;
|
||
|
return tidy(function () {
|
||
|
var classLossTensor = tidy(function () {
|
||
|
var predClassScores = mul(softmax(_this.outputTensor.reshape([_this.numCells, _this.numCells, _this.numBoxes, _this.boxEncodingSize]), 3), _this.groundTruthClassScoresMask);
|
||
|
var gtClassScores = _this.createOneHotClassScoreMask();
|
||
|
return sub(gtClassScores, predClassScores);
|
||
|
});
|
||
|
return _this.computeLossTerm(_this.config.classScale, scalar(1), classLossTensor);
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.computeCoordLoss = function () {
|
||
|
var _this = this;
|
||
|
return tidy(function () {
|
||
|
return _this.computeLossTerm(_this.config.coordScale, scalar(1), add(_this.computeCoordBoxOffsetError(), _this.computeCoordBoxSizeError()));
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.computeCoordBoxOffsetError = function () {
|
||
|
var _this = this;
|
||
|
return tidy(function () {
|
||
|
var mask = _this.toOutputTensorShape(_this.coordBoxOffsetMask);
|
||
|
var gtBoxOffsets = mul(mask, _this.toOutputTensorShape(_this.computeCoordBoxOffsets()));
|
||
|
var predBoxOffsets = mul(mask, sigmoid(_this.outputTensor));
|
||
|
return sub(gtBoxOffsets, predBoxOffsets);
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.computeCoordBoxSizeError = function () {
|
||
|
var _this = this;
|
||
|
return tidy(function () {
|
||
|
var mask = _this.toOutputTensorShape(_this.coordBoxSizeMask);
|
||
|
var gtBoxSizes = mul(mask, _this.toOutputTensorShape(_this.computeCoordBoxSizes()));
|
||
|
var predBoxSizes = mul(mask, _this.outputTensor);
|
||
|
return sub(gtBoxSizes, predBoxSizes);
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.computeLossTerm = function (scale, mask, lossTensor) {
|
||
|
var _this = this;
|
||
|
return tidy(function () { return mul(scalar(scale), _this.squaredSumOverMask(mask, lossTensor)); });
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.squaredSumOverMask = function (mask, lossTensor) {
|
||
|
return tidy(function () { return sum(square(mul(mask, lossTensor))); });
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.validateGroundTruthBoxes = function (groundTruth) {
|
||
|
var _this = this;
|
||
|
groundTruth.forEach(function (_a) {
|
||
|
var x = _a.x, y = _a.y, width = _a.width, height = _a.height, label = _a.label;
|
||
|
if (typeof label !== 'number' || label < 0 || label > (_this.config.classes.length - 1)) {
|
||
|
throw new Error("invalid ground truth data, expected label to be a number in [0, " + (_this.config.classes.length - 1) + "]");
|
||
|
}
|
||
|
if (x < 0 || x > 1 || y < 0 || y > 1 || width < 0 || (x + width) > 1 || height < 0 || (y + height) > 1) {
|
||
|
throw new Error("invalid ground truth data, box is out of image boundaries " + JSON.stringify({ x: x, y: y, width: width, height: height }));
|
||
|
}
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.assignGroundTruthToAnchors = function (groundTruth) {
|
||
|
var _this = this;
|
||
|
var groundTruthBoxes = groundTruth
|
||
|
.map(function (_a) {
|
||
|
var x = _a.x, y = _a.y, width = _a.width, height = _a.height, label = _a.label;
|
||
|
return ({
|
||
|
box: new Rect(x, y, width, height),
|
||
|
label: label
|
||
|
});
|
||
|
});
|
||
|
return groundTruthBoxes.map(function (_a) {
|
||
|
var box = _a.box, label = _a.label;
|
||
|
var _b = box.rescale(_this.reshapedImgDims), left = _b.left, top = _b.top, bottom = _b.bottom, right = _b.right, x = _b.x, y = _b.y, width = _b.width, height = _b.height;
|
||
|
var ctX = left + (width / 2);
|
||
|
var ctY = top + (height / 2);
|
||
|
var col = Math.floor((ctX / _this.inputSize) * _this.numCells);
|
||
|
var row = Math.floor((ctY / _this.inputSize) * _this.numCells);
|
||
|
var anchorsByIou = _this.anchors.map(function (anchor, idx) { return ({
|
||
|
idx: idx,
|
||
|
iou: iou(new BoundingBox(0, 0, anchor.x * CELL_SIZE, anchor.y * CELL_SIZE), new BoundingBox(0, 0, width, height))
|
||
|
}); }).sort(function (a1, a2) { return a2.iou - a1.iou; });
|
||
|
var anchor = anchorsByIou[0].idx;
|
||
|
return { row: row, col: col, anchor: anchor, box: box, label: label };
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.createGroundTruthMask = function () {
|
||
|
var _this = this;
|
||
|
var mask = zeros([this.numCells, this.numCells, this.numBoxes, this.boxEncodingSize]);
|
||
|
var buf = mask.buffer();
|
||
|
this.groundTruth.forEach(function (_a) {
|
||
|
var row = _a.row, col = _a.col, anchor = _a.anchor;
|
||
|
for (var i = 0; i < _this.boxEncodingSize; i++) {
|
||
|
buf.set(1, row, col, anchor, i);
|
||
|
}
|
||
|
});
|
||
|
return mask;
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.createCoordAndScoreMasks = function () {
|
||
|
var _this = this;
|
||
|
return tidy(function () {
|
||
|
var coordBoxOffsetMask = zeros([_this.numCells, _this.numCells, _this.numBoxes, _this.boxEncodingSize]);
|
||
|
var coordBoxSizeMask = zeros([_this.numCells, _this.numCells, _this.numBoxes, _this.boxEncodingSize]);
|
||
|
var scoreMask = zeros([_this.numCells, _this.numCells, _this.numBoxes, _this.boxEncodingSize]);
|
||
|
var coordBoxOffsetBuf = coordBoxOffsetMask.buffer();
|
||
|
var coordBoxSizeBuf = coordBoxSizeMask.buffer();
|
||
|
var scoreBuf = scoreMask.buffer();
|
||
|
for (var row = 0; row < _this.numCells; row++) {
|
||
|
for (var col = 0; col < _this.numCells; col++) {
|
||
|
for (var anchor = 0; anchor < _this.numBoxes; anchor++) {
|
||
|
coordBoxOffsetBuf.set(1, row, col, anchor, 0);
|
||
|
coordBoxOffsetBuf.set(1, row, col, anchor, 1);
|
||
|
coordBoxSizeBuf.set(1, row, col, anchor, 2);
|
||
|
coordBoxSizeBuf.set(1, row, col, anchor, 3);
|
||
|
scoreBuf.set(1, row, col, anchor, 4);
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
return { coordBoxOffsetMask: coordBoxOffsetMask, coordBoxSizeMask: coordBoxSizeMask, scoreMask: scoreMask };
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.createOneHotClassScoreMask = function () {
|
||
|
var mask = zeros([this.numCells, this.numCells, this.numBoxes, this.boxEncodingSize]);
|
||
|
var buf = mask.buffer();
|
||
|
var classValuesOffset = 5;
|
||
|
this.groundTruth.forEach(function (_a) {
|
||
|
var row = _a.row, col = _a.col, anchor = _a.anchor, label = _a.label;
|
||
|
buf.set(1, row, col, anchor, classValuesOffset + label);
|
||
|
});
|
||
|
return mask;
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.computeIous = function () {
|
||
|
var _this = this;
|
||
|
var isSameAnchor = function (p1) { return function (p2) {
|
||
|
return p1.row === p2.row
|
||
|
&& p1.col === p2.col
|
||
|
&& p1.anchor === p2.anchor;
|
||
|
}; };
|
||
|
var ious = zeros([this.numCells, this.numCells, this.gridCellEncodingSize]);
|
||
|
var buf = ious.buffer();
|
||
|
this.groundTruth.forEach(function (_a) {
|
||
|
var row = _a.row, col = _a.col, anchor = _a.anchor, box = _a.box;
|
||
|
var predBox = _this.predictedBoxes.find(isSameAnchor({ row: row, col: col, anchor: anchor }));
|
||
|
if (!predBox) {
|
||
|
throw new Error("no output box found for: row " + row + ", col " + col + ", anchor " + anchor);
|
||
|
}
|
||
|
var boxIou = iou(box.rescale(_this.reshapedImgDims), predBox.box.rescale(_this.reshapedImgDims));
|
||
|
var anchorOffset = _this.boxEncodingSize * anchor;
|
||
|
var scoreValueOffset = 4;
|
||
|
buf.set(boxIou, row, col, anchorOffset + scoreValueOffset);
|
||
|
});
|
||
|
return ious;
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.computeCoordBoxOffsets = function () {
|
||
|
var _this = this;
|
||
|
var offsets = zeros([this.numCells, this.numCells, this.numBoxes, this.boxEncodingSize]);
|
||
|
var buf = offsets.buffer();
|
||
|
this.groundTruth.forEach(function (_a) {
|
||
|
var row = _a.row, col = _a.col, anchor = _a.anchor, box = _a.box;
|
||
|
var _b = box.rescale(_this.reshapedImgDims), left = _b.left, top = _b.top, right = _b.right, bottom = _b.bottom;
|
||
|
var centerX = (left + right) / 2;
|
||
|
var centerY = (top + bottom) / 2;
|
||
|
var dCenterX = centerX - (col * CELL_SIZE);
|
||
|
var dCenterY = centerY - (row * CELL_SIZE);
|
||
|
// inverseSigmoid(0) === -Infinity, inverseSigmoid(1) === Infinity
|
||
|
//const dx = inverseSigmoid(Math.min(0.999, Math.max(0.001, dCenterX / CELL_SIZE)))
|
||
|
//const dy = inverseSigmoid(Math.min(0.999, Math.max(0.001, dCenterY / CELL_SIZE)))
|
||
|
var dx = dCenterX / CELL_SIZE;
|
||
|
var dy = dCenterY / CELL_SIZE;
|
||
|
buf.set(dx, row, col, anchor, 0);
|
||
|
buf.set(dy, row, col, anchor, 1);
|
||
|
});
|
||
|
return offsets;
|
||
|
};
|
||
|
TinyYolov2LossFunction.prototype.computeCoordBoxSizes = function () {
|
||
|
var _this = this;
|
||
|
var sizes = zeros([this.numCells, this.numCells, this.numBoxes, this.boxEncodingSize]);
|
||
|
var buf = sizes.buffer();
|
||
|
this.groundTruth.forEach(function (_a) {
|
||
|
var row = _a.row, col = _a.col, anchor = _a.anchor, box = _a.box;
|
||
|
var _b = box.rescale(_this.reshapedImgDims), width = _b.width, height = _b.height;
|
||
|
var dw = Math.log(width / (_this.anchors[anchor].x * CELL_SIZE));
|
||
|
var dh = Math.log(height / (_this.anchors[anchor].y * CELL_SIZE));
|
||
|
buf.set(dw, row, col, anchor, 2);
|
||
|
buf.set(dh, row, col, anchor, 3);
|
||
|
});
|
||
|
return sizes;
|
||
|
};
|
||
|
return TinyYolov2LossFunction;
|
||
|
}());
|
||
|
|
||
|
function getDefaultBackwardOptions(options) {
|
||
|
return Object.assign({}, {
|
||
|
minBoxSize: CELL_SIZE
|
||
|
}, options);
|
||
|
}
|
||
|
|
||
|
var TinyYolov2Trainable = /** @class */ (function (_super) {
|
||
|
__extends$1(TinyYolov2Trainable, _super);
|
||
|
function TinyYolov2Trainable(trainableConfig, optimizer) {
|
||
|
var _this = _super.call(this, trainableConfig) || this;
|
||
|
_this._trainableConfig = validateTrainConfig(trainableConfig);
|
||
|
_this._optimizer = optimizer;
|
||
|
return _this;
|
||
|
}
|
||
|
Object.defineProperty(TinyYolov2Trainable.prototype, "trainableConfig", {
|
||
|
get: function () {
|
||
|
return this._trainableConfig;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2Trainable.prototype, "optimizer", {
|
||
|
get: function () {
|
||
|
return this._optimizer;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
TinyYolov2Trainable.prototype.backward = function (img, groundTruth, inputSize, options) {
|
||
|
if (options === void 0) { options = {}; }
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _this = this;
|
||
|
var _a, minBoxSize, reportLosses, reshapedImgDims, filteredGroundTruthBoxes, netInput, loss;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
_a = getDefaultBackwardOptions(options), minBoxSize = _a.minBoxSize, reportLosses = _a.reportLosses;
|
||
|
reshapedImgDims = computeReshapedDimensions(getMediaDimensions(img), inputSize);
|
||
|
filteredGroundTruthBoxes = this.filterGroundTruthBoxes(groundTruth, reshapedImgDims, minBoxSize);
|
||
|
if (!filteredGroundTruthBoxes.length) {
|
||
|
return [2 /*return*/, null];
|
||
|
}
|
||
|
return [4 /*yield*/, toNetInput(imageToSquare(img, inputSize))];
|
||
|
case 1:
|
||
|
netInput = _b.sent();
|
||
|
loss = this.optimizer.minimize(function () {
|
||
|
var _a = _this.computeLoss(_this.forwardInput(netInput, inputSize), filteredGroundTruthBoxes, reshapedImgDims), noObjectLoss = _a.noObjectLoss, objectLoss = _a.objectLoss, coordLoss = _a.coordLoss, classLoss = _a.classLoss, totalLoss = _a.totalLoss;
|
||
|
if (reportLosses) {
|
||
|
var losses = {
|
||
|
totalLoss: totalLoss.dataSync()[0],
|
||
|
noObjectLoss: noObjectLoss.dataSync()[0],
|
||
|
objectLoss: objectLoss.dataSync()[0],
|
||
|
coordLoss: coordLoss.dataSync()[0],
|
||
|
classLoss: classLoss.dataSync()[0]
|
||
|
};
|
||
|
var report = {
|
||
|
losses: losses,
|
||
|
numBoxes: filteredGroundTruthBoxes.length,
|
||
|
inputSize: inputSize
|
||
|
};
|
||
|
reportLosses(report);
|
||
|
}
|
||
|
return totalLoss;
|
||
|
}, true);
|
||
|
return [2 /*return*/, loss];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2Trainable.prototype.computeLoss = function (outputTensor, groundTruth, reshapedImgDims) {
|
||
|
var config = validateTrainConfig(this.config);
|
||
|
var inputSize = Math.max(reshapedImgDims.width, reshapedImgDims.height);
|
||
|
if (!inputSize) {
|
||
|
throw new Error("computeLoss - invalid inputSize: " + inputSize);
|
||
|
}
|
||
|
var predictedBoxes = this.extractBoxes(outputTensor, reshapedImgDims);
|
||
|
return tidy(function () {
|
||
|
var lossFunction = new TinyYolov2LossFunction(outputTensor, groundTruth, predictedBoxes, reshapedImgDims, config);
|
||
|
return lossFunction.computeLoss();
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2Trainable.prototype.filterGroundTruthBoxes = function (groundTruth, imgDims, minBoxSize) {
|
||
|
var imgHeight = imgDims.height, imgWidth = imgDims.width;
|
||
|
return groundTruth.filter(function (_a) {
|
||
|
var x = _a.x, y = _a.y, width = _a.width, height = _a.height;
|
||
|
var box = (new Rect(x, y, width, height))
|
||
|
.rescale({ height: imgHeight, width: imgWidth });
|
||
|
var isTooTiny = box.width < minBoxSize || box.height < minBoxSize;
|
||
|
return !isTooTiny;
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2Trainable.prototype.load = function (weightsOrUrl) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, _super.prototype.load.call(this, weightsOrUrl)];
|
||
|
case 1:
|
||
|
_a.sent();
|
||
|
this.variable();
|
||
|
return [2 /*return*/];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
return TinyYolov2Trainable;
|
||
|
}(TinyYolov2));
|
||
|
|
||
|
function extractorsFactory$2(extractWeights, paramMappings) {
|
||
|
function extractSeparableConvParams(channelsIn, channelsOut, mappedPrefix) {
|
||
|
var depthwise_filter = tensor4d(extractWeights(3 * 3 * channelsIn), [3, 3, channelsIn, 1]);
|
||
|
var pointwise_filter = tensor4d(extractWeights(channelsIn * channelsOut), [1, 1, channelsIn, channelsOut]);
|
||
|
var bias = tensor1d(extractWeights(channelsOut));
|
||
|
paramMappings.push({ paramPath: mappedPrefix + "/depthwise_filter" }, { paramPath: mappedPrefix + "/pointwise_filter" }, { paramPath: mappedPrefix + "/bias" });
|
||
|
return new SeparableConvParams(depthwise_filter, pointwise_filter, bias);
|
||
|
}
|
||
|
function extractFCParams(channelsIn, channelsOut, mappedPrefix) {
|
||
|
var weights = tensor2d(extractWeights(channelsIn * channelsOut), [channelsIn, channelsOut]);
|
||
|
var bias = tensor1d(extractWeights(channelsOut));
|
||
|
paramMappings.push({ paramPath: mappedPrefix + "/weights" }, { paramPath: mappedPrefix + "/bias" });
|
||
|
return {
|
||
|
weights: weights,
|
||
|
bias: bias
|
||
|
};
|
||
|
}
|
||
|
var extractConvParams = extractConvParamsFactory(extractWeights, paramMappings);
|
||
|
function extractDenseBlock3Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer) {
|
||
|
if (isFirstLayer === void 0) { isFirstLayer = false; }
|
||
|
var conv0 = isFirstLayer
|
||
|
? extractConvParams(channelsIn, channelsOut, 3, mappedPrefix + "/conv0")
|
||
|
: extractSeparableConvParams(channelsIn, channelsOut, mappedPrefix + "/conv0");
|
||
|
var conv1 = extractSeparableConvParams(channelsOut, channelsOut, mappedPrefix + "/conv1");
|
||
|
var conv2 = extractSeparableConvParams(channelsOut, channelsOut, mappedPrefix + "/conv2");
|
||
|
return { conv0: conv0, conv1: conv1, conv2: conv2 };
|
||
|
}
|
||
|
function extractDenseBlock4Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer) {
|
||
|
if (isFirstLayer === void 0) { isFirstLayer = false; }
|
||
|
var _a = extractDenseBlock3Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer), conv0 = _a.conv0, conv1 = _a.conv1, conv2 = _a.conv2;
|
||
|
var conv3 = extractSeparableConvParams(channelsOut, channelsOut, mappedPrefix + "/conv3");
|
||
|
return { conv0: conv0, conv1: conv1, conv2: conv2, conv3: conv3 };
|
||
|
}
|
||
|
return {
|
||
|
extractDenseBlock3Params: extractDenseBlock3Params,
|
||
|
extractDenseBlock4Params: extractDenseBlock4Params,
|
||
|
extractFCParams: extractFCParams
|
||
|
};
|
||
|
}
|
||
|
|
||
|
function extractParams$1(weights) {
|
||
|
var paramMappings = [];
|
||
|
var _a = extractWeightsFactory(weights), extractWeights = _a.extractWeights, getRemainingWeights = _a.getRemainingWeights;
|
||
|
var _b = extractorsFactory$2(extractWeights, paramMappings), extractDenseBlock4Params = _b.extractDenseBlock4Params, extractFCParams = _b.extractFCParams;
|
||
|
var dense0 = extractDenseBlock4Params(3, 32, 'dense0', true);
|
||
|
var dense1 = extractDenseBlock4Params(32, 64, 'dense1');
|
||
|
var dense2 = extractDenseBlock4Params(64, 128, 'dense2');
|
||
|
var dense3 = extractDenseBlock4Params(128, 256, 'dense3');
|
||
|
var fc = extractFCParams(256, 136, 'fc');
|
||
|
if (getRemainingWeights().length !== 0) {
|
||
|
throw new Error("weights remaing after extract: " + getRemainingWeights().length);
|
||
|
}
|
||
|
return {
|
||
|
paramMappings: paramMappings,
|
||
|
params: { dense0: dense0, dense1: dense1, dense2: dense2, dense3: dense3, fc: fc }
|
||
|
};
|
||
|
}
|
||
|
|
||
|
var FaceLandmark68NetBase = /** @class */ (function (_super) {
|
||
|
__extends$1(FaceLandmark68NetBase, _super);
|
||
|
function FaceLandmark68NetBase(_name) {
|
||
|
var _this = _super.call(this, _name) || this;
|
||
|
_this.__name = _name;
|
||
|
return _this;
|
||
|
}
|
||
|
FaceLandmark68NetBase.prototype.runNet = function (_) {
|
||
|
throw new Error(this.__name + " - runNet not implemented");
|
||
|
};
|
||
|
FaceLandmark68NetBase.prototype.postProcess = function (output, inputSize, originalDimensions) {
|
||
|
var inputDimensions = originalDimensions.map(function (_a) {
|
||
|
var width = _a.width, height = _a.height;
|
||
|
var scale = inputSize / Math.max(height, width);
|
||
|
return {
|
||
|
width: width * scale,
|
||
|
height: height * scale
|
||
|
};
|
||
|
});
|
||
|
var batchSize = inputDimensions.length;
|
||
|
return tidy(function () {
|
||
|
var createInterleavedTensor = function (fillX, fillY) {
|
||
|
return stack([
|
||
|
fill([68], fillX),
|
||
|
fill([68], fillY)
|
||
|
], 1).as2D(1, 136).as1D();
|
||
|
};
|
||
|
var getPadding = function (batchIdx, cond) {
|
||
|
var _a = inputDimensions[batchIdx], width = _a.width, height = _a.height;
|
||
|
return cond(width, height) ? Math.abs(width - height) / 2 : 0;
|
||
|
};
|
||
|
var getPaddingX = function (batchIdx) { return getPadding(batchIdx, function (w, h) { return w < h; }); };
|
||
|
var getPaddingY = function (batchIdx) { return getPadding(batchIdx, function (w, h) { return h < w; }); };
|
||
|
var landmarkTensors = output
|
||
|
.mul(fill([batchSize, 136], inputSize))
|
||
|
.sub(stack(Array.from(Array(batchSize), function (_, batchIdx) {
|
||
|
return createInterleavedTensor(getPaddingX(batchIdx), getPaddingY(batchIdx));
|
||
|
})))
|
||
|
.div(stack(Array.from(Array(batchSize), function (_, batchIdx) {
|
||
|
return createInterleavedTensor(inputDimensions[batchIdx].width, inputDimensions[batchIdx].height);
|
||
|
})));
|
||
|
return landmarkTensors;
|
||
|
});
|
||
|
};
|
||
|
FaceLandmark68NetBase.prototype.forwardInput = function (input) {
|
||
|
var _this = this;
|
||
|
return tidy(function () {
|
||
|
var out = _this.runNet(input);
|
||
|
return _this.postProcess(out, input.inputSize, input.inputDimensions.map(function (_a) {
|
||
|
var height = _a[0], width = _a[1];
|
||
|
return ({ height: height, width: width });
|
||
|
}));
|
||
|
});
|
||
|
};
|
||
|
FaceLandmark68NetBase.prototype.forward = function (input) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _a;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
_a = this.forwardInput;
|
||
|
return [4 /*yield*/, toNetInput(input)];
|
||
|
case 1: return [2 /*return*/, _a.apply(this, [_b.sent()])];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
FaceLandmark68NetBase.prototype.detectLandmarks = function (input) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _this = this;
|
||
|
var netInput, landmarkTensors, landmarksForBatch;
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, toNetInput(input)];
|
||
|
case 1:
|
||
|
netInput = _a.sent();
|
||
|
landmarkTensors = tidy(function () { return unstack(_this.forwardInput(netInput)); });
|
||
|
return [4 /*yield*/, Promise.all(landmarkTensors.map(function (landmarkTensor, batchIdx) { return __awaiter$1(_this, void 0, void 0, function () {
|
||
|
var landmarksArray, _a, _b, xCoords, yCoords;
|
||
|
return __generator$1(this, function (_c) {
|
||
|
switch (_c.label) {
|
||
|
case 0:
|
||
|
_b = (_a = Array).from;
|
||
|
return [4 /*yield*/, landmarkTensor.data()];
|
||
|
case 1:
|
||
|
landmarksArray = _b.apply(_a, [_c.sent()]);
|
||
|
xCoords = landmarksArray.filter(function (_, i) { return isEven(i); });
|
||
|
yCoords = landmarksArray.filter(function (_, i) { return !isEven(i); });
|
||
|
return [2 /*return*/, new FaceLandmarks68(Array(68).fill(0).map(function (_, i) { return new Point(xCoords[i], yCoords[i]); }), {
|
||
|
height: netInput.getInputHeight(batchIdx),
|
||
|
width: netInput.getInputWidth(batchIdx),
|
||
|
})];
|
||
|
}
|
||
|
});
|
||
|
}); }))];
|
||
|
case 2:
|
||
|
landmarksForBatch = _a.sent();
|
||
|
landmarkTensors.forEach(function (t) { return t.dispose(); });
|
||
|
return [2 /*return*/, netInput.isBatchInput
|
||
|
? landmarksForBatch
|
||
|
: landmarksForBatch[0]];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
return FaceLandmark68NetBase;
|
||
|
}(NeuralNetwork));
|
||
|
|
||
|
function fullyConnectedLayer(x, params) {
|
||
|
return tidy(function () {
|
||
|
return add(matMul(x, params.weights), params.bias);
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function loadParamsFactory(weightMap, paramMappings) {
|
||
|
var extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);
|
||
|
function extractConvParams(prefix) {
|
||
|
var filters = extractWeightEntry(prefix + "/filters", 4);
|
||
|
var bias = extractWeightEntry(prefix + "/bias", 1);
|
||
|
return { filters: filters, bias: bias };
|
||
|
}
|
||
|
function extractSeparableConvParams(prefix) {
|
||
|
var depthwise_filter = extractWeightEntry(prefix + "/depthwise_filter", 4);
|
||
|
var pointwise_filter = extractWeightEntry(prefix + "/pointwise_filter", 4);
|
||
|
var bias = extractWeightEntry(prefix + "/bias", 1);
|
||
|
return new SeparableConvParams(depthwise_filter, pointwise_filter, bias);
|
||
|
}
|
||
|
function extractDenseBlock3Params(prefix, isFirstLayer) {
|
||
|
if (isFirstLayer === void 0) { isFirstLayer = false; }
|
||
|
var conv0 = isFirstLayer
|
||
|
? extractConvParams(prefix + "/conv0")
|
||
|
: extractSeparableConvParams(prefix + "/conv0");
|
||
|
var conv1 = extractSeparableConvParams(prefix + "/conv1");
|
||
|
var conv2 = extractSeparableConvParams(prefix + "/conv2");
|
||
|
return { conv0: conv0, conv1: conv1, conv2: conv2 };
|
||
|
}
|
||
|
function extractDenseBlock4Params(prefix, isFirstLayer) {
|
||
|
if (isFirstLayer === void 0) { isFirstLayer = false; }
|
||
|
var conv0 = isFirstLayer
|
||
|
? extractConvParams(prefix + "/conv0")
|
||
|
: extractSeparableConvParams(prefix + "/conv0");
|
||
|
var conv1 = extractSeparableConvParams(prefix + "/conv1");
|
||
|
var conv2 = extractSeparableConvParams(prefix + "/conv2");
|
||
|
var conv3 = extractSeparableConvParams(prefix + "/conv3");
|
||
|
return { conv0: conv0, conv1: conv1, conv2: conv2, conv3: conv3 };
|
||
|
}
|
||
|
function extractFcParams(prefix) {
|
||
|
var weights = extractWeightEntry(prefix + "/weights", 2);
|
||
|
var bias = extractWeightEntry(prefix + "/bias", 1);
|
||
|
return { weights: weights, bias: bias };
|
||
|
}
|
||
|
return {
|
||
|
extractDenseBlock3Params: extractDenseBlock3Params,
|
||
|
extractDenseBlock4Params: extractDenseBlock4Params,
|
||
|
extractFcParams: extractFcParams
|
||
|
};
|
||
|
}
|
||
|
|
||
|
var DEFAULT_MODEL_NAME = 'face_landmark_68_model';
|
||
|
function loadQuantizedParams$1(uri) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var weightMap, paramMappings, _a, extractDenseBlock4Params, extractFcParams, params;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0: return [4 /*yield*/, loadWeightMap(uri, DEFAULT_MODEL_NAME)];
|
||
|
case 1:
|
||
|
weightMap = _b.sent();
|
||
|
paramMappings = [];
|
||
|
_a = loadParamsFactory(weightMap, paramMappings), extractDenseBlock4Params = _a.extractDenseBlock4Params, extractFcParams = _a.extractFcParams;
|
||
|
params = {
|
||
|
dense0: extractDenseBlock4Params('dense0', true),
|
||
|
dense1: extractDenseBlock4Params('dense1'),
|
||
|
dense2: extractDenseBlock4Params('dense2'),
|
||
|
dense3: extractDenseBlock4Params('dense3'),
|
||
|
fc: extractFcParams('fc')
|
||
|
};
|
||
|
disposeUnusedWeightTensors(weightMap, paramMappings);
|
||
|
return [2 /*return*/, { params: params, paramMappings: paramMappings }];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function denseBlock(x, denseBlockParams, isFirstLayer) {
|
||
|
if (isFirstLayer === void 0) { isFirstLayer = false; }
|
||
|
return tidy(function () {
|
||
|
var out1 = relu(isFirstLayer
|
||
|
? add(conv2d(x, denseBlockParams.conv0.filters, [2, 2], 'same'), denseBlockParams.conv0.bias)
|
||
|
: depthwiseSeparableConv(x, denseBlockParams.conv0, [2, 2]));
|
||
|
var out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1]);
|
||
|
var in3 = relu(add(out1, out2));
|
||
|
var out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1]);
|
||
|
var in4 = relu(add(out1, add(out2, out3)));
|
||
|
var out4 = depthwiseSeparableConv(in4, denseBlockParams.conv3, [1, 1]);
|
||
|
return relu(add(out1, add(out2, add(out3, out4))));
|
||
|
});
|
||
|
}
|
||
|
var FaceLandmark68Net = /** @class */ (function (_super) {
|
||
|
__extends$1(FaceLandmark68Net, _super);
|
||
|
function FaceLandmark68Net() {
|
||
|
return _super.call(this, 'FaceLandmark68Net') || this;
|
||
|
}
|
||
|
FaceLandmark68Net.prototype.runNet = function (input) {
|
||
|
var params = this.params;
|
||
|
if (!params) {
|
||
|
throw new Error('FaceLandmark68Net - load model before inference');
|
||
|
}
|
||
|
return tidy(function () {
|
||
|
var batchTensor = input.toBatchTensor(112, true);
|
||
|
var meanRgb = [122.782, 117.001, 104.298];
|
||
|
var normalized = normalize(batchTensor, meanRgb).div(scalar(255));
|
||
|
var out = denseBlock(normalized, params.dense0, true);
|
||
|
out = denseBlock(out, params.dense1);
|
||
|
out = denseBlock(out, params.dense2);
|
||
|
out = denseBlock(out, params.dense3);
|
||
|
out = avgPool(out, [7, 7], [2, 2], 'valid');
|
||
|
return fullyConnectedLayer(out.as2D(out.shape[0], -1), params.fc);
|
||
|
});
|
||
|
};
|
||
|
FaceLandmark68Net.prototype.loadQuantizedParams = function (uri) {
|
||
|
return loadQuantizedParams$1(uri);
|
||
|
};
|
||
|
FaceLandmark68Net.prototype.extractParams = function (weights) {
|
||
|
return extractParams$1(weights);
|
||
|
};
|
||
|
return FaceLandmark68Net;
|
||
|
}(FaceLandmark68NetBase));
|
||
|
|
||
|
function extractParamsTiny(weights) {
|
||
|
var paramMappings = [];
|
||
|
var _a = extractWeightsFactory(weights), extractWeights = _a.extractWeights, getRemainingWeights = _a.getRemainingWeights;
|
||
|
var _b = extractorsFactory$2(extractWeights, paramMappings), extractDenseBlock3Params = _b.extractDenseBlock3Params, extractFCParams = _b.extractFCParams;
|
||
|
var dense0 = extractDenseBlock3Params(3, 32, 'dense0', true);
|
||
|
var dense1 = extractDenseBlock3Params(32, 64, 'dense1');
|
||
|
var dense2 = extractDenseBlock3Params(64, 128, 'dense2');
|
||
|
var fc = extractFCParams(128, 136, 'fc');
|
||
|
if (getRemainingWeights().length !== 0) {
|
||
|
throw new Error("weights remaing after extract: " + getRemainingWeights().length);
|
||
|
}
|
||
|
return {
|
||
|
paramMappings: paramMappings,
|
||
|
params: { dense0: dense0, dense1: dense1, dense2: dense2, fc: fc }
|
||
|
};
|
||
|
}
|
||
|
|
||
|
var DEFAULT_MODEL_NAME$1 = 'face_landmark_68_tiny_model';
|
||
|
function loadQuantizedParamsTiny(uri) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var weightMap, paramMappings, _a, extractDenseBlock3Params, extractFcParams, params;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0: return [4 /*yield*/, loadWeightMap(uri, DEFAULT_MODEL_NAME$1)];
|
||
|
case 1:
|
||
|
weightMap = _b.sent();
|
||
|
paramMappings = [];
|
||
|
_a = loadParamsFactory(weightMap, paramMappings), extractDenseBlock3Params = _a.extractDenseBlock3Params, extractFcParams = _a.extractFcParams;
|
||
|
params = {
|
||
|
dense0: extractDenseBlock3Params('dense0', true),
|
||
|
dense1: extractDenseBlock3Params('dense1'),
|
||
|
dense2: extractDenseBlock3Params('dense2'),
|
||
|
fc: extractFcParams('fc')
|
||
|
};
|
||
|
disposeUnusedWeightTensors(weightMap, paramMappings);
|
||
|
return [2 /*return*/, { params: params, paramMappings: paramMappings }];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function denseBlock$1(x, denseBlockParams, isFirstLayer) {
|
||
|
if (isFirstLayer === void 0) { isFirstLayer = false; }
|
||
|
return tidy(function () {
|
||
|
var out1 = relu(isFirstLayer
|
||
|
? add(conv2d(x, denseBlockParams.conv0.filters, [2, 2], 'same'), denseBlockParams.conv0.bias)
|
||
|
: depthwiseSeparableConv(x, denseBlockParams.conv0, [2, 2]));
|
||
|
var out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1]);
|
||
|
var in3 = relu(add(out1, out2));
|
||
|
var out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1]);
|
||
|
return relu(add(out1, add(out2, out3)));
|
||
|
});
|
||
|
}
|
||
|
var FaceLandmark68TinyNet = /** @class */ (function (_super) {
|
||
|
__extends$1(FaceLandmark68TinyNet, _super);
|
||
|
function FaceLandmark68TinyNet() {
|
||
|
return _super.call(this, 'FaceLandmark68TinyNet') || this;
|
||
|
}
|
||
|
FaceLandmark68TinyNet.prototype.runNet = function (input) {
|
||
|
var params = this.params;
|
||
|
if (!params) {
|
||
|
throw new Error('FaceLandmark68TinyNet - load model before inference');
|
||
|
}
|
||
|
return tidy(function () {
|
||
|
var batchTensor = input.toBatchTensor(112, true);
|
||
|
var meanRgb = [122.782, 117.001, 104.298];
|
||
|
var normalized = normalize(batchTensor, meanRgb).div(scalar(255));
|
||
|
var out = denseBlock$1(normalized, params.dense0, true);
|
||
|
out = denseBlock$1(out, params.dense1);
|
||
|
out = denseBlock$1(out, params.dense2);
|
||
|
out = avgPool(out, [14, 14], [2, 2], 'valid');
|
||
|
return fullyConnectedLayer(out.as2D(out.shape[0], -1), params.fc);
|
||
|
});
|
||
|
};
|
||
|
FaceLandmark68TinyNet.prototype.loadQuantizedParams = function (uri) {
|
||
|
return loadQuantizedParamsTiny(uri);
|
||
|
};
|
||
|
FaceLandmark68TinyNet.prototype.extractParams = function (weights) {
|
||
|
return extractParamsTiny(weights);
|
||
|
};
|
||
|
return FaceLandmark68TinyNet;
|
||
|
}(FaceLandmark68NetBase));
|
||
|
|
||
|
var FaceLandmarkNet = /** @class */ (function (_super) {
|
||
|
__extends$1(FaceLandmarkNet, _super);
|
||
|
function FaceLandmarkNet() {
|
||
|
return _super !== null && _super.apply(this, arguments) || this;
|
||
|
}
|
||
|
return FaceLandmarkNet;
|
||
|
}(FaceLandmark68Net));
|
||
|
function createFaceLandmarkNet(weights) {
|
||
|
var net = new FaceLandmarkNet();
|
||
|
net.extractWeights(weights);
|
||
|
return net;
|
||
|
}
|
||
|
|
||
|
function scale(x, params) {
|
||
|
return add(mul(x, params.weights), params.biases);
|
||
|
}
|
||
|
|
||
|
function convLayer$1(x, params, strides, withRelu, padding) {
|
||
|
if (padding === void 0) { padding = 'same'; }
|
||
|
var _a = params.conv, filters = _a.filters, bias = _a.bias;
|
||
|
var out = conv2d(x, filters, strides, padding);
|
||
|
out = add(out, bias);
|
||
|
out = scale(out, params.scale);
|
||
|
return withRelu ? relu(out) : out;
|
||
|
}
|
||
|
function conv(x, params) {
|
||
|
return convLayer$1(x, params, [1, 1], true);
|
||
|
}
|
||
|
function convNoRelu(x, params) {
|
||
|
return convLayer$1(x, params, [1, 1], false);
|
||
|
}
|
||
|
function convDown(x, params) {
|
||
|
return convLayer$1(x, params, [2, 2], true, 'valid');
|
||
|
}
|
||
|
|
||
|
function extractorsFactory$3(extractWeights, paramMappings) {
|
||
|
function extractFilterValues(numFilterValues, numFilters, filterSize) {
|
||
|
var weights = extractWeights(numFilterValues);
|
||
|
var depth = weights.length / (numFilters * filterSize * filterSize);
|
||
|
if (isFloat(depth)) {
|
||
|
throw new Error("depth has to be an integer: " + depth + ", weights.length: " + weights.length + ", numFilters: " + numFilters + ", filterSize: " + filterSize);
|
||
|
}
|
||
|
return tidy(function () { return transpose(tensor4d(weights, [numFilters, depth, filterSize, filterSize]), [2, 3, 1, 0]); });
|
||
|
}
|
||
|
function extractConvParams(numFilterValues, numFilters, filterSize, mappedPrefix) {
|
||
|
var filters = extractFilterValues(numFilterValues, numFilters, filterSize);
|
||
|
var bias = tensor1d(extractWeights(numFilters));
|
||
|
paramMappings.push({ paramPath: mappedPrefix + "/filters" }, { paramPath: mappedPrefix + "/bias" });
|
||
|
return { filters: filters, bias: bias };
|
||
|
}
|
||
|
function extractScaleLayerParams(numWeights, mappedPrefix) {
|
||
|
var weights = tensor1d(extractWeights(numWeights));
|
||
|
var biases = tensor1d(extractWeights(numWeights));
|
||
|
paramMappings.push({ paramPath: mappedPrefix + "/weights" }, { paramPath: mappedPrefix + "/biases" });
|
||
|
return {
|
||
|
weights: weights,
|
||
|
biases: biases
|
||
|
};
|
||
|
}
|
||
|
function extractConvLayerParams(numFilterValues, numFilters, filterSize, mappedPrefix) {
|
||
|
var conv = extractConvParams(numFilterValues, numFilters, filterSize, mappedPrefix + "/conv");
|
||
|
var scale = extractScaleLayerParams(numFilters, mappedPrefix + "/scale");
|
||
|
return { conv: conv, scale: scale };
|
||
|
}
|
||
|
function extractResidualLayerParams(numFilterValues, numFilters, filterSize, mappedPrefix, isDown) {
|
||
|
if (isDown === void 0) { isDown = false; }
|
||
|
var conv1 = extractConvLayerParams((isDown ? 0.5 : 1) * numFilterValues, numFilters, filterSize, mappedPrefix + "/conv1");
|
||
|
var conv2 = extractConvLayerParams(numFilterValues, numFilters, filterSize, mappedPrefix + "/conv2");
|
||
|
return { conv1: conv1, conv2: conv2 };
|
||
|
}
|
||
|
return {
|
||
|
extractConvLayerParams: extractConvLayerParams,
|
||
|
extractResidualLayerParams: extractResidualLayerParams
|
||
|
};
|
||
|
}
|
||
|
function extractParams$2(weights) {
|
||
|
var _a = extractWeightsFactory(weights), extractWeights = _a.extractWeights, getRemainingWeights = _a.getRemainingWeights;
|
||
|
var paramMappings = [];
|
||
|
var _b = extractorsFactory$3(extractWeights, paramMappings), extractConvLayerParams = _b.extractConvLayerParams, extractResidualLayerParams = _b.extractResidualLayerParams;
|
||
|
var conv32_down = extractConvLayerParams(4704, 32, 7, 'conv32_down');
|
||
|
var conv32_1 = extractResidualLayerParams(9216, 32, 3, 'conv32_1');
|
||
|
var conv32_2 = extractResidualLayerParams(9216, 32, 3, 'conv32_2');
|
||
|
var conv32_3 = extractResidualLayerParams(9216, 32, 3, 'conv32_3');
|
||
|
var conv64_down = extractResidualLayerParams(36864, 64, 3, 'conv64_down', true);
|
||
|
var conv64_1 = extractResidualLayerParams(36864, 64, 3, 'conv64_1');
|
||
|
var conv64_2 = extractResidualLayerParams(36864, 64, 3, 'conv64_2');
|
||
|
var conv64_3 = extractResidualLayerParams(36864, 64, 3, 'conv64_3');
|
||
|
var conv128_down = extractResidualLayerParams(147456, 128, 3, 'conv128_down', true);
|
||
|
var conv128_1 = extractResidualLayerParams(147456, 128, 3, 'conv128_1');
|
||
|
var conv128_2 = extractResidualLayerParams(147456, 128, 3, 'conv128_2');
|
||
|
var conv256_down = extractResidualLayerParams(589824, 256, 3, 'conv256_down', true);
|
||
|
var conv256_1 = extractResidualLayerParams(589824, 256, 3, 'conv256_1');
|
||
|
var conv256_2 = extractResidualLayerParams(589824, 256, 3, 'conv256_2');
|
||
|
var conv256_down_out = extractResidualLayerParams(589824, 256, 3, 'conv256_down_out');
|
||
|
var fc = tidy(function () { return transpose(tensor2d(extractWeights(256 * 128), [128, 256]), [1, 0]); });
|
||
|
paramMappings.push({ paramPath: "fc" });
|
||
|
if (getRemainingWeights().length !== 0) {
|
||
|
throw new Error("weights remaing after extract: " + getRemainingWeights().length);
|
||
|
}
|
||
|
var params = {
|
||
|
conv32_down: conv32_down,
|
||
|
conv32_1: conv32_1,
|
||
|
conv32_2: conv32_2,
|
||
|
conv32_3: conv32_3,
|
||
|
conv64_down: conv64_down,
|
||
|
conv64_1: conv64_1,
|
||
|
conv64_2: conv64_2,
|
||
|
conv64_3: conv64_3,
|
||
|
conv128_down: conv128_down,
|
||
|
conv128_1: conv128_1,
|
||
|
conv128_2: conv128_2,
|
||
|
conv256_down: conv256_down,
|
||
|
conv256_1: conv256_1,
|
||
|
conv256_2: conv256_2,
|
||
|
conv256_down_out: conv256_down_out,
|
||
|
fc: fc
|
||
|
};
|
||
|
return { params: params, paramMappings: paramMappings };
|
||
|
}
|
||
|
|
||
|
var DEFAULT_MODEL_NAME$2 = 'face_recognition_model';
|
||
|
function extractorsFactory$4(weightMap, paramMappings) {
|
||
|
var extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);
|
||
|
function extractScaleLayerParams(prefix) {
|
||
|
var weights = extractWeightEntry(prefix + "/scale/weights", 1);
|
||
|
var biases = extractWeightEntry(prefix + "/scale/biases", 1);
|
||
|
return { weights: weights, biases: biases };
|
||
|
}
|
||
|
function extractConvLayerParams(prefix) {
|
||
|
var filters = extractWeightEntry(prefix + "/conv/filters", 4);
|
||
|
var bias = extractWeightEntry(prefix + "/conv/bias", 1);
|
||
|
var scale = extractScaleLayerParams(prefix);
|
||
|
return { conv: { filters: filters, bias: bias }, scale: scale };
|
||
|
}
|
||
|
function extractResidualLayerParams(prefix) {
|
||
|
return {
|
||
|
conv1: extractConvLayerParams(prefix + "/conv1"),
|
||
|
conv2: extractConvLayerParams(prefix + "/conv2")
|
||
|
};
|
||
|
}
|
||
|
return {
|
||
|
extractConvLayerParams: extractConvLayerParams,
|
||
|
extractResidualLayerParams: extractResidualLayerParams
|
||
|
};
|
||
|
}
|
||
|
function loadQuantizedParams$2(uri) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var weightMap, paramMappings, _a, extractConvLayerParams, extractResidualLayerParams, conv32_down, conv32_1, conv32_2, conv32_3, conv64_down, conv64_1, conv64_2, conv64_3, conv128_down, conv128_1, conv128_2, conv256_down, conv256_1, conv256_2, conv256_down_out, fc, params;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0: return [4 /*yield*/, loadWeightMap(uri, DEFAULT_MODEL_NAME$2)];
|
||
|
case 1:
|
||
|
weightMap = _b.sent();
|
||
|
paramMappings = [];
|
||
|
_a = extractorsFactory$4(weightMap, paramMappings), extractConvLayerParams = _a.extractConvLayerParams, extractResidualLayerParams = _a.extractResidualLayerParams;
|
||
|
conv32_down = extractConvLayerParams('conv32_down');
|
||
|
conv32_1 = extractResidualLayerParams('conv32_1');
|
||
|
conv32_2 = extractResidualLayerParams('conv32_2');
|
||
|
conv32_3 = extractResidualLayerParams('conv32_3');
|
||
|
conv64_down = extractResidualLayerParams('conv64_down');
|
||
|
conv64_1 = extractResidualLayerParams('conv64_1');
|
||
|
conv64_2 = extractResidualLayerParams('conv64_2');
|
||
|
conv64_3 = extractResidualLayerParams('conv64_3');
|
||
|
conv128_down = extractResidualLayerParams('conv128_down');
|
||
|
conv128_1 = extractResidualLayerParams('conv128_1');
|
||
|
conv128_2 = extractResidualLayerParams('conv128_2');
|
||
|
conv256_down = extractResidualLayerParams('conv256_down');
|
||
|
conv256_1 = extractResidualLayerParams('conv256_1');
|
||
|
conv256_2 = extractResidualLayerParams('conv256_2');
|
||
|
conv256_down_out = extractResidualLayerParams('conv256_down_out');
|
||
|
fc = weightMap['fc'];
|
||
|
paramMappings.push({ originalPath: 'fc', paramPath: 'fc' });
|
||
|
if (!isTensor2D(fc)) {
|
||
|
throw new Error("expected weightMap[fc] to be a Tensor2D, instead have " + fc);
|
||
|
}
|
||
|
params = {
|
||
|
conv32_down: conv32_down,
|
||
|
conv32_1: conv32_1,
|
||
|
conv32_2: conv32_2,
|
||
|
conv32_3: conv32_3,
|
||
|
conv64_down: conv64_down,
|
||
|
conv64_1: conv64_1,
|
||
|
conv64_2: conv64_2,
|
||
|
conv64_3: conv64_3,
|
||
|
conv128_down: conv128_down,
|
||
|
conv128_1: conv128_1,
|
||
|
conv128_2: conv128_2,
|
||
|
conv256_down: conv256_down,
|
||
|
conv256_1: conv256_1,
|
||
|
conv256_2: conv256_2,
|
||
|
conv256_down_out: conv256_down_out,
|
||
|
fc: fc
|
||
|
};
|
||
|
disposeUnusedWeightTensors(weightMap, paramMappings);
|
||
|
return [2 /*return*/, { params: params, paramMappings: paramMappings }];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function residual(x, params) {
|
||
|
var out = conv(x, params.conv1);
|
||
|
out = convNoRelu(out, params.conv2);
|
||
|
out = add(out, x);
|
||
|
out = relu(out);
|
||
|
return out;
|
||
|
}
|
||
|
function residualDown(x, params) {
|
||
|
var out = convDown(x, params.conv1);
|
||
|
out = convNoRelu(out, params.conv2);
|
||
|
var pooled = avgPool(x, 2, 2, 'valid');
|
||
|
var zeros$$1 = zeros(pooled.shape);
|
||
|
var isPad = pooled.shape[3] !== out.shape[3];
|
||
|
var isAdjustShape = pooled.shape[1] !== out.shape[1] || pooled.shape[2] !== out.shape[2];
|
||
|
if (isAdjustShape) {
|
||
|
var padShapeX = out.shape.slice();
|
||
|
padShapeX[1] = 1;
|
||
|
var zerosW = zeros(padShapeX);
|
||
|
out = concat([out, zerosW], 1);
|
||
|
var padShapeY = out.shape.slice();
|
||
|
padShapeY[2] = 1;
|
||
|
var zerosH = zeros(padShapeY);
|
||
|
out = concat([out, zerosH], 2);
|
||
|
}
|
||
|
pooled = isPad ? concat([pooled, zeros$$1], 3) : pooled;
|
||
|
out = add(pooled, out);
|
||
|
out = relu(out);
|
||
|
return out;
|
||
|
}
|
||
|
|
||
|
var FaceRecognitionNet = /** @class */ (function (_super) {
|
||
|
__extends$1(FaceRecognitionNet, _super);
|
||
|
function FaceRecognitionNet() {
|
||
|
return _super.call(this, 'FaceRecognitionNet') || this;
|
||
|
}
|
||
|
FaceRecognitionNet.prototype.forwardInput = function (input) {
|
||
|
var params = this.params;
|
||
|
if (!params) {
|
||
|
throw new Error('FaceRecognitionNet - load model before inference');
|
||
|
}
|
||
|
return tidy(function () {
|
||
|
var batchTensor = input.toBatchTensor(150, true).toFloat();
|
||
|
var meanRgb = [122.782, 117.001, 104.298];
|
||
|
var normalized = normalize(batchTensor, meanRgb).div(scalar(256));
|
||
|
var out = convDown(normalized, params.conv32_down);
|
||
|
out = maxPool(out, 3, 2, 'valid');
|
||
|
out = residual(out, params.conv32_1);
|
||
|
out = residual(out, params.conv32_2);
|
||
|
out = residual(out, params.conv32_3);
|
||
|
out = residualDown(out, params.conv64_down);
|
||
|
out = residual(out, params.conv64_1);
|
||
|
out = residual(out, params.conv64_2);
|
||
|
out = residual(out, params.conv64_3);
|
||
|
out = residualDown(out, params.conv128_down);
|
||
|
out = residual(out, params.conv128_1);
|
||
|
out = residual(out, params.conv128_2);
|
||
|
out = residualDown(out, params.conv256_down);
|
||
|
out = residual(out, params.conv256_1);
|
||
|
out = residual(out, params.conv256_2);
|
||
|
out = residualDown(out, params.conv256_down_out);
|
||
|
var globalAvg = out.mean([1, 2]);
|
||
|
var fullyConnected = matMul(globalAvg, params.fc);
|
||
|
return fullyConnected;
|
||
|
});
|
||
|
};
|
||
|
FaceRecognitionNet.prototype.forward = function (input) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _a;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
_a = this.forwardInput;
|
||
|
return [4 /*yield*/, toNetInput(input)];
|
||
|
case 1: return [2 /*return*/, _a.apply(this, [_b.sent()])];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
FaceRecognitionNet.prototype.computeFaceDescriptor = function (input) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _this = this;
|
||
|
var netInput, faceDescriptorTensors, faceDescriptorsForBatch;
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, toNetInput(input)];
|
||
|
case 1:
|
||
|
netInput = _a.sent();
|
||
|
faceDescriptorTensors = tidy(function () { return unstack(_this.forwardInput(netInput)); });
|
||
|
return [4 /*yield*/, Promise.all(faceDescriptorTensors.map(function (t) { return t.data(); }))];
|
||
|
case 2:
|
||
|
faceDescriptorsForBatch = _a.sent();
|
||
|
faceDescriptorTensors.forEach(function (t) { return t.dispose(); });
|
||
|
return [2 /*return*/, netInput.isBatchInput
|
||
|
? faceDescriptorsForBatch
|
||
|
: faceDescriptorsForBatch[0]];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
FaceRecognitionNet.prototype.loadQuantizedParams = function (uri) {
|
||
|
return loadQuantizedParams$2(uri);
|
||
|
};
|
||
|
FaceRecognitionNet.prototype.extractParams = function (weights) {
|
||
|
return extractParams$2(weights);
|
||
|
};
|
||
|
return FaceRecognitionNet;
|
||
|
}(NeuralNetwork));
|
||
|
|
||
|
function createFaceRecognitionNet(weights) {
|
||
|
var net = new FaceRecognitionNet();
|
||
|
net.extractWeights(weights);
|
||
|
return net;
|
||
|
}
|
||
|
|
||
|
var MtcnnOptions = /** @class */ (function () {
|
||
|
function MtcnnOptions(_a) {
|
||
|
var _b = _a === void 0 ? {} : _a, minFaceSize = _b.minFaceSize, scaleFactor = _b.scaleFactor, maxNumScales = _b.maxNumScales, scoreThresholds = _b.scoreThresholds, scaleSteps = _b.scaleSteps;
|
||
|
this._name = 'MtcnnOptions';
|
||
|
this._minFaceSize = minFaceSize || 20;
|
||
|
this._scaleFactor = scaleFactor || 0.709;
|
||
|
this._maxNumScales = maxNumScales || 10;
|
||
|
this._scoreThresholds = scoreThresholds || [0.6, 0.7, 0.7];
|
||
|
this._scaleSteps = scaleSteps;
|
||
|
if (typeof this._minFaceSize !== 'number' || this._minFaceSize < 0) {
|
||
|
throw new Error(this._name + " - expected minFaceSize to be a number > 0");
|
||
|
}
|
||
|
if (typeof this._scaleFactor !== 'number' || this._scaleFactor <= 0 || this._scaleFactor >= 1) {
|
||
|
throw new Error(this._name + " - expected scaleFactor to be a number between 0 and 1");
|
||
|
}
|
||
|
if (typeof this._maxNumScales !== 'number' || this._maxNumScales < 0) {
|
||
|
throw new Error(this._name + " - expected maxNumScales to be a number > 0");
|
||
|
}
|
||
|
if (!Array.isArray(this._scoreThresholds)
|
||
|
|| this._scoreThresholds.length !== 3
|
||
|
|| this._scoreThresholds.some(function (th) { return typeof th !== 'number'; })) {
|
||
|
throw new Error(this._name + " - expected scoreThresholds to be an array of numbers of length 3");
|
||
|
}
|
||
|
if (this._scaleSteps
|
||
|
&& (!Array.isArray(this._scaleSteps) || this._scaleSteps.some(function (th) { return typeof th !== 'number'; }))) {
|
||
|
throw new Error(this._name + " - expected scaleSteps to be an array of numbers");
|
||
|
}
|
||
|
}
|
||
|
Object.defineProperty(MtcnnOptions.prototype, "minFaceSize", {
|
||
|
get: function () { return this._minFaceSize; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(MtcnnOptions.prototype, "scaleFactor", {
|
||
|
get: function () { return this._scaleFactor; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(MtcnnOptions.prototype, "maxNumScales", {
|
||
|
get: function () { return this._maxNumScales; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(MtcnnOptions.prototype, "scoreThresholds", {
|
||
|
get: function () { return this._scoreThresholds; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(MtcnnOptions.prototype, "scaleSteps", {
|
||
|
get: function () { return this._scaleSteps; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
return MtcnnOptions;
|
||
|
}());
|
||
|
|
||
|
function extractorsFactory$5(extractWeights, paramMappings) {
|
||
|
function extractDepthwiseConvParams(numChannels, mappedPrefix) {
|
||
|
var filters = tensor4d(extractWeights(3 * 3 * numChannels), [3, 3, numChannels, 1]);
|
||
|
var batch_norm_scale = tensor1d(extractWeights(numChannels));
|
||
|
var batch_norm_offset = tensor1d(extractWeights(numChannels));
|
||
|
var batch_norm_mean = tensor1d(extractWeights(numChannels));
|
||
|
var batch_norm_variance = tensor1d(extractWeights(numChannels));
|
||
|
paramMappings.push({ paramPath: mappedPrefix + "/filters" }, { paramPath: mappedPrefix + "/batch_norm_scale" }, { paramPath: mappedPrefix + "/batch_norm_offset" }, { paramPath: mappedPrefix + "/batch_norm_mean" }, { paramPath: mappedPrefix + "/batch_norm_variance" });
|
||
|
return {
|
||
|
filters: filters,
|
||
|
batch_norm_scale: batch_norm_scale,
|
||
|
batch_norm_offset: batch_norm_offset,
|
||
|
batch_norm_mean: batch_norm_mean,
|
||
|
batch_norm_variance: batch_norm_variance
|
||
|
};
|
||
|
}
|
||
|
function extractConvParams(channelsIn, channelsOut, filterSize, mappedPrefix, isPointwiseConv) {
|
||
|
var filters = tensor4d(extractWeights(channelsIn * channelsOut * filterSize * filterSize), [filterSize, filterSize, channelsIn, channelsOut]);
|
||
|
var bias = tensor1d(extractWeights(channelsOut));
|
||
|
paramMappings.push({ paramPath: mappedPrefix + "/filters" }, { paramPath: mappedPrefix + "/" + (isPointwiseConv ? 'batch_norm_offset' : 'bias') });
|
||
|
return { filters: filters, bias: bias };
|
||
|
}
|
||
|
function extractPointwiseConvParams(channelsIn, channelsOut, filterSize, mappedPrefix) {
|
||
|
var _a = extractConvParams(channelsIn, channelsOut, filterSize, mappedPrefix, true), filters = _a.filters, bias = _a.bias;
|
||
|
return {
|
||
|
filters: filters,
|
||
|
batch_norm_offset: bias
|
||
|
};
|
||
|
}
|
||
|
function extractConvPairParams(channelsIn, channelsOut, mappedPrefix) {
|
||
|
var depthwise_conv = extractDepthwiseConvParams(channelsIn, mappedPrefix + "/depthwise_conv");
|
||
|
var pointwise_conv = extractPointwiseConvParams(channelsIn, channelsOut, 1, mappedPrefix + "/pointwise_conv");
|
||
|
return { depthwise_conv: depthwise_conv, pointwise_conv: pointwise_conv };
|
||
|
}
|
||
|
function extractMobilenetV1Params() {
|
||
|
var conv_0 = extractPointwiseConvParams(3, 32, 3, 'mobilenetv1/conv_0');
|
||
|
var conv_1 = extractConvPairParams(32, 64, 'mobilenetv1/conv_1');
|
||
|
var conv_2 = extractConvPairParams(64, 128, 'mobilenetv1/conv_2');
|
||
|
var conv_3 = extractConvPairParams(128, 128, 'mobilenetv1/conv_3');
|
||
|
var conv_4 = extractConvPairParams(128, 256, 'mobilenetv1/conv_4');
|
||
|
var conv_5 = extractConvPairParams(256, 256, 'mobilenetv1/conv_5');
|
||
|
var conv_6 = extractConvPairParams(256, 512, 'mobilenetv1/conv_6');
|
||
|
var conv_7 = extractConvPairParams(512, 512, 'mobilenetv1/conv_7');
|
||
|
var conv_8 = extractConvPairParams(512, 512, 'mobilenetv1/conv_8');
|
||
|
var conv_9 = extractConvPairParams(512, 512, 'mobilenetv1/conv_9');
|
||
|
var conv_10 = extractConvPairParams(512, 512, 'mobilenetv1/conv_10');
|
||
|
var conv_11 = extractConvPairParams(512, 512, 'mobilenetv1/conv_11');
|
||
|
var conv_12 = extractConvPairParams(512, 1024, 'mobilenetv1/conv_12');
|
||
|
var conv_13 = extractConvPairParams(1024, 1024, 'mobilenetv1/conv_13');
|
||
|
return {
|
||
|
conv_0: conv_0,
|
||
|
conv_1: conv_1,
|
||
|
conv_2: conv_2,
|
||
|
conv_3: conv_3,
|
||
|
conv_4: conv_4,
|
||
|
conv_5: conv_5,
|
||
|
conv_6: conv_6,
|
||
|
conv_7: conv_7,
|
||
|
conv_8: conv_8,
|
||
|
conv_9: conv_9,
|
||
|
conv_10: conv_10,
|
||
|
conv_11: conv_11,
|
||
|
conv_12: conv_12,
|
||
|
conv_13: conv_13
|
||
|
};
|
||
|
}
|
||
|
function extractPredictionLayerParams() {
|
||
|
var conv_0 = extractPointwiseConvParams(1024, 256, 1, 'prediction_layer/conv_0');
|
||
|
var conv_1 = extractPointwiseConvParams(256, 512, 3, 'prediction_layer/conv_1');
|
||
|
var conv_2 = extractPointwiseConvParams(512, 128, 1, 'prediction_layer/conv_2');
|
||
|
var conv_3 = extractPointwiseConvParams(128, 256, 3, 'prediction_layer/conv_3');
|
||
|
var conv_4 = extractPointwiseConvParams(256, 128, 1, 'prediction_layer/conv_4');
|
||
|
var conv_5 = extractPointwiseConvParams(128, 256, 3, 'prediction_layer/conv_5');
|
||
|
var conv_6 = extractPointwiseConvParams(256, 64, 1, 'prediction_layer/conv_6');
|
||
|
var conv_7 = extractPointwiseConvParams(64, 128, 3, 'prediction_layer/conv_7');
|
||
|
var box_encoding_0_predictor = extractConvParams(512, 12, 1, 'prediction_layer/box_predictor_0/box_encoding_predictor');
|
||
|
var class_predictor_0 = extractConvParams(512, 9, 1, 'prediction_layer/box_predictor_0/class_predictor');
|
||
|
var box_encoding_1_predictor = extractConvParams(1024, 24, 1, 'prediction_layer/box_predictor_1/box_encoding_predictor');
|
||
|
var class_predictor_1 = extractConvParams(1024, 18, 1, 'prediction_layer/box_predictor_1/class_predictor');
|
||
|
var box_encoding_2_predictor = extractConvParams(512, 24, 1, 'prediction_layer/box_predictor_2/box_encoding_predictor');
|
||
|
var class_predictor_2 = extractConvParams(512, 18, 1, 'prediction_layer/box_predictor_2/class_predictor');
|
||
|
var box_encoding_3_predictor = extractConvParams(256, 24, 1, 'prediction_layer/box_predictor_3/box_encoding_predictor');
|
||
|
var class_predictor_3 = extractConvParams(256, 18, 1, 'prediction_layer/box_predictor_3/class_predictor');
|
||
|
var box_encoding_4_predictor = extractConvParams(256, 24, 1, 'prediction_layer/box_predictor_4/box_encoding_predictor');
|
||
|
var class_predictor_4 = extractConvParams(256, 18, 1, 'prediction_layer/box_predictor_4/class_predictor');
|
||
|
var box_encoding_5_predictor = extractConvParams(128, 24, 1, 'prediction_layer/box_predictor_5/box_encoding_predictor');
|
||
|
var class_predictor_5 = extractConvParams(128, 18, 1, 'prediction_layer/box_predictor_5/class_predictor');
|
||
|
var box_predictor_0 = {
|
||
|
box_encoding_predictor: box_encoding_0_predictor,
|
||
|
class_predictor: class_predictor_0
|
||
|
};
|
||
|
var box_predictor_1 = {
|
||
|
box_encoding_predictor: box_encoding_1_predictor,
|
||
|
class_predictor: class_predictor_1
|
||
|
};
|
||
|
var box_predictor_2 = {
|
||
|
box_encoding_predictor: box_encoding_2_predictor,
|
||
|
class_predictor: class_predictor_2
|
||
|
};
|
||
|
var box_predictor_3 = {
|
||
|
box_encoding_predictor: box_encoding_3_predictor,
|
||
|
class_predictor: class_predictor_3
|
||
|
};
|
||
|
var box_predictor_4 = {
|
||
|
box_encoding_predictor: box_encoding_4_predictor,
|
||
|
class_predictor: class_predictor_4
|
||
|
};
|
||
|
var box_predictor_5 = {
|
||
|
box_encoding_predictor: box_encoding_5_predictor,
|
||
|
class_predictor: class_predictor_5
|
||
|
};
|
||
|
return {
|
||
|
conv_0: conv_0,
|
||
|
conv_1: conv_1,
|
||
|
conv_2: conv_2,
|
||
|
conv_3: conv_3,
|
||
|
conv_4: conv_4,
|
||
|
conv_5: conv_5,
|
||
|
conv_6: conv_6,
|
||
|
conv_7: conv_7,
|
||
|
box_predictor_0: box_predictor_0,
|
||
|
box_predictor_1: box_predictor_1,
|
||
|
box_predictor_2: box_predictor_2,
|
||
|
box_predictor_3: box_predictor_3,
|
||
|
box_predictor_4: box_predictor_4,
|
||
|
box_predictor_5: box_predictor_5
|
||
|
};
|
||
|
}
|
||
|
return {
|
||
|
extractMobilenetV1Params: extractMobilenetV1Params,
|
||
|
extractPredictionLayerParams: extractPredictionLayerParams
|
||
|
};
|
||
|
}
|
||
|
function extractParams$3(weights) {
|
||
|
var paramMappings = [];
|
||
|
var _a = extractWeightsFactory(weights), extractWeights = _a.extractWeights, getRemainingWeights = _a.getRemainingWeights;
|
||
|
var _b = extractorsFactory$5(extractWeights, paramMappings), extractMobilenetV1Params = _b.extractMobilenetV1Params, extractPredictionLayerParams = _b.extractPredictionLayerParams;
|
||
|
var mobilenetv1 = extractMobilenetV1Params();
|
||
|
var prediction_layer = extractPredictionLayerParams();
|
||
|
var extra_dim = tensor3d(extractWeights(5118 * 4), [1, 5118, 4]);
|
||
|
var output_layer = {
|
||
|
extra_dim: extra_dim
|
||
|
};
|
||
|
paramMappings.push({ paramPath: 'output_layer/extra_dim' });
|
||
|
if (getRemainingWeights().length !== 0) {
|
||
|
throw new Error("weights remaing after extract: " + getRemainingWeights().length);
|
||
|
}
|
||
|
return {
|
||
|
params: {
|
||
|
mobilenetv1: mobilenetv1,
|
||
|
prediction_layer: prediction_layer,
|
||
|
output_layer: output_layer
|
||
|
},
|
||
|
paramMappings: paramMappings
|
||
|
};
|
||
|
}
|
||
|
|
||
|
var DEFAULT_MODEL_NAME$3 = 'ssd_mobilenetv1_model';
|
||
|
function extractorsFactory$6(weightMap, paramMappings) {
|
||
|
var extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);
|
||
|
function extractPointwiseConvParams(prefix, idx, mappedPrefix) {
|
||
|
var filters = extractWeightEntry(prefix + "/Conv2d_" + idx + "_pointwise/weights", 4, mappedPrefix + "/filters");
|
||
|
var batch_norm_offset = extractWeightEntry(prefix + "/Conv2d_" + idx + "_pointwise/convolution_bn_offset", 1, mappedPrefix + "/batch_norm_offset");
|
||
|
return { filters: filters, batch_norm_offset: batch_norm_offset };
|
||
|
}
|
||
|
function extractConvPairParams(idx) {
|
||
|
var mappedPrefix = "mobilenetv1/conv_" + idx;
|
||
|
var prefixDepthwiseConv = "MobilenetV1/Conv2d_" + idx + "_depthwise";
|
||
|
var mappedPrefixDepthwiseConv = mappedPrefix + "/depthwise_conv";
|
||
|
var mappedPrefixPointwiseConv = mappedPrefix + "/pointwise_conv";
|
||
|
var filters = extractWeightEntry(prefixDepthwiseConv + "/depthwise_weights", 4, mappedPrefixDepthwiseConv + "/filters");
|
||
|
var batch_norm_scale = extractWeightEntry(prefixDepthwiseConv + "/BatchNorm/gamma", 1, mappedPrefixDepthwiseConv + "/batch_norm_scale");
|
||
|
var batch_norm_offset = extractWeightEntry(prefixDepthwiseConv + "/BatchNorm/beta", 1, mappedPrefixDepthwiseConv + "/batch_norm_offset");
|
||
|
var batch_norm_mean = extractWeightEntry(prefixDepthwiseConv + "/BatchNorm/moving_mean", 1, mappedPrefixDepthwiseConv + "/batch_norm_mean");
|
||
|
var batch_norm_variance = extractWeightEntry(prefixDepthwiseConv + "/BatchNorm/moving_variance", 1, mappedPrefixDepthwiseConv + "/batch_norm_variance");
|
||
|
return {
|
||
|
depthwise_conv: {
|
||
|
filters: filters,
|
||
|
batch_norm_scale: batch_norm_scale,
|
||
|
batch_norm_offset: batch_norm_offset,
|
||
|
batch_norm_mean: batch_norm_mean,
|
||
|
batch_norm_variance: batch_norm_variance
|
||
|
},
|
||
|
pointwise_conv: extractPointwiseConvParams('MobilenetV1', idx, mappedPrefixPointwiseConv)
|
||
|
};
|
||
|
}
|
||
|
function extractMobilenetV1Params() {
|
||
|
return {
|
||
|
conv_0: extractPointwiseConvParams('MobilenetV1', 0, 'mobilenetv1/conv_0'),
|
||
|
conv_1: extractConvPairParams(1),
|
||
|
conv_2: extractConvPairParams(2),
|
||
|
conv_3: extractConvPairParams(3),
|
||
|
conv_4: extractConvPairParams(4),
|
||
|
conv_5: extractConvPairParams(5),
|
||
|
conv_6: extractConvPairParams(6),
|
||
|
conv_7: extractConvPairParams(7),
|
||
|
conv_8: extractConvPairParams(8),
|
||
|
conv_9: extractConvPairParams(9),
|
||
|
conv_10: extractConvPairParams(10),
|
||
|
conv_11: extractConvPairParams(11),
|
||
|
conv_12: extractConvPairParams(12),
|
||
|
conv_13: extractConvPairParams(13)
|
||
|
};
|
||
|
}
|
||
|
function extractConvParams(prefix, mappedPrefix) {
|
||
|
var filters = extractWeightEntry(prefix + "/weights", 4, mappedPrefix + "/filters");
|
||
|
var bias = extractWeightEntry(prefix + "/biases", 1, mappedPrefix + "/bias");
|
||
|
return { filters: filters, bias: bias };
|
||
|
}
|
||
|
function extractBoxPredictorParams(idx) {
|
||
|
var box_encoding_predictor = extractConvParams("Prediction/BoxPredictor_" + idx + "/BoxEncodingPredictor", "prediction_layer/box_predictor_" + idx + "/box_encoding_predictor");
|
||
|
var class_predictor = extractConvParams("Prediction/BoxPredictor_" + idx + "/ClassPredictor", "prediction_layer/box_predictor_" + idx + "/class_predictor");
|
||
|
return { box_encoding_predictor: box_encoding_predictor, class_predictor: class_predictor };
|
||
|
}
|
||
|
function extractPredictionLayerParams() {
|
||
|
return {
|
||
|
conv_0: extractPointwiseConvParams('Prediction', 0, 'prediction_layer/conv_0'),
|
||
|
conv_1: extractPointwiseConvParams('Prediction', 1, 'prediction_layer/conv_1'),
|
||
|
conv_2: extractPointwiseConvParams('Prediction', 2, 'prediction_layer/conv_2'),
|
||
|
conv_3: extractPointwiseConvParams('Prediction', 3, 'prediction_layer/conv_3'),
|
||
|
conv_4: extractPointwiseConvParams('Prediction', 4, 'prediction_layer/conv_4'),
|
||
|
conv_5: extractPointwiseConvParams('Prediction', 5, 'prediction_layer/conv_5'),
|
||
|
conv_6: extractPointwiseConvParams('Prediction', 6, 'prediction_layer/conv_6'),
|
||
|
conv_7: extractPointwiseConvParams('Prediction', 7, 'prediction_layer/conv_7'),
|
||
|
box_predictor_0: extractBoxPredictorParams(0),
|
||
|
box_predictor_1: extractBoxPredictorParams(1),
|
||
|
box_predictor_2: extractBoxPredictorParams(2),
|
||
|
box_predictor_3: extractBoxPredictorParams(3),
|
||
|
box_predictor_4: extractBoxPredictorParams(4),
|
||
|
box_predictor_5: extractBoxPredictorParams(5)
|
||
|
};
|
||
|
}
|
||
|
return {
|
||
|
extractMobilenetV1Params: extractMobilenetV1Params,
|
||
|
extractPredictionLayerParams: extractPredictionLayerParams
|
||
|
};
|
||
|
}
|
||
|
function loadQuantizedParams$3(uri) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var weightMap, paramMappings, _a, extractMobilenetV1Params, extractPredictionLayerParams, extra_dim, params;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0: return [4 /*yield*/, loadWeightMap(uri, DEFAULT_MODEL_NAME$3)];
|
||
|
case 1:
|
||
|
weightMap = _b.sent();
|
||
|
paramMappings = [];
|
||
|
_a = extractorsFactory$6(weightMap, paramMappings), extractMobilenetV1Params = _a.extractMobilenetV1Params, extractPredictionLayerParams = _a.extractPredictionLayerParams;
|
||
|
extra_dim = weightMap['Output/extra_dim'];
|
||
|
paramMappings.push({ originalPath: 'Output/extra_dim', paramPath: 'output_layer/extra_dim' });
|
||
|
if (!isTensor3D(extra_dim)) {
|
||
|
throw new Error("expected weightMap['Output/extra_dim'] to be a Tensor3D, instead have " + extra_dim);
|
||
|
}
|
||
|
params = {
|
||
|
mobilenetv1: extractMobilenetV1Params(),
|
||
|
prediction_layer: extractPredictionLayerParams(),
|
||
|
output_layer: {
|
||
|
extra_dim: extra_dim
|
||
|
}
|
||
|
};
|
||
|
disposeUnusedWeightTensors(weightMap, paramMappings);
|
||
|
return [2 /*return*/, { params: params, paramMappings: paramMappings }];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function pointwiseConvLayer(x, params, strides) {
|
||
|
return tidy(function () {
|
||
|
var out = conv2d(x, params.filters, strides, 'same');
|
||
|
out = add(out, params.batch_norm_offset);
|
||
|
return clipByValue(out, 0, 6);
|
||
|
});
|
||
|
}
|
||
|
|
||
|
var epsilon = 0.0010000000474974513;
|
||
|
function depthwiseConvLayer(x, params, strides) {
|
||
|
return tidy(function () {
|
||
|
var out = depthwiseConv2d(x, params.filters, strides, 'same');
|
||
|
out = batchNormalization(out, params.batch_norm_mean, params.batch_norm_variance, epsilon, params.batch_norm_scale, params.batch_norm_offset);
|
||
|
return clipByValue(out, 0, 6);
|
||
|
});
|
||
|
}
|
||
|
function getStridesForLayerIdx(layerIdx) {
|
||
|
return [2, 4, 6, 12].some(function (idx) { return idx === layerIdx; }) ? [2, 2] : [1, 1];
|
||
|
}
|
||
|
function mobileNetV1(x, params) {
|
||
|
return tidy(function () {
|
||
|
var conv11 = null;
|
||
|
var out = pointwiseConvLayer(x, params.conv_0, [2, 2]);
|
||
|
var convPairParams = [
|
||
|
params.conv_1,
|
||
|
params.conv_2,
|
||
|
params.conv_3,
|
||
|
params.conv_4,
|
||
|
params.conv_5,
|
||
|
params.conv_6,
|
||
|
params.conv_7,
|
||
|
params.conv_8,
|
||
|
params.conv_9,
|
||
|
params.conv_10,
|
||
|
params.conv_11,
|
||
|
params.conv_12,
|
||
|
params.conv_13
|
||
|
];
|
||
|
convPairParams.forEach(function (param, i) {
|
||
|
var layerIdx = i + 1;
|
||
|
var depthwiseConvStrides = getStridesForLayerIdx(layerIdx);
|
||
|
out = depthwiseConvLayer(out, param.depthwise_conv, depthwiseConvStrides);
|
||
|
out = pointwiseConvLayer(out, param.pointwise_conv, [1, 1]);
|
||
|
if (layerIdx === 11) {
|
||
|
conv11 = out;
|
||
|
}
|
||
|
});
|
||
|
if (conv11 === null) {
|
||
|
throw new Error('mobileNetV1 - output of conv layer 11 is null');
|
||
|
}
|
||
|
return {
|
||
|
out: out,
|
||
|
conv11: conv11
|
||
|
};
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function nonMaxSuppression$2(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) {
|
||
|
var numBoxes = boxes.shape[0];
|
||
|
var outputSize = Math.min(maxOutputSize, numBoxes);
|
||
|
var candidates = scores
|
||
|
.map(function (score, boxIndex) { return ({ score: score, boxIndex: boxIndex }); })
|
||
|
.filter(function (c) { return c.score > scoreThreshold; })
|
||
|
.sort(function (c1, c2) { return c2.score - c1.score; });
|
||
|
var suppressFunc = function (x) { return x <= iouThreshold ? 1 : 0; };
|
||
|
var selected = [];
|
||
|
candidates.forEach(function (c) {
|
||
|
if (selected.length >= outputSize) {
|
||
|
return;
|
||
|
}
|
||
|
var originalScore = c.score;
|
||
|
for (var j = selected.length - 1; j >= 0; --j) {
|
||
|
var iou = IOU(boxes, c.boxIndex, selected[j]);
|
||
|
if (iou === 0.0) {
|
||
|
continue;
|
||
|
}
|
||
|
c.score *= suppressFunc(iou);
|
||
|
if (c.score <= scoreThreshold) {
|
||
|
break;
|
||
|
}
|
||
|
}
|
||
|
if (originalScore === c.score) {
|
||
|
selected.push(c.boxIndex);
|
||
|
}
|
||
|
});
|
||
|
return selected;
|
||
|
}
|
||
|
function IOU(boxes, i, j) {
|
||
|
var yminI = Math.min(boxes.get(i, 0), boxes.get(i, 2));
|
||
|
var xminI = Math.min(boxes.get(i, 1), boxes.get(i, 3));
|
||
|
var ymaxI = Math.max(boxes.get(i, 0), boxes.get(i, 2));
|
||
|
var xmaxI = Math.max(boxes.get(i, 1), boxes.get(i, 3));
|
||
|
var yminJ = Math.min(boxes.get(j, 0), boxes.get(j, 2));
|
||
|
var xminJ = Math.min(boxes.get(j, 1), boxes.get(j, 3));
|
||
|
var ymaxJ = Math.max(boxes.get(j, 0), boxes.get(j, 2));
|
||
|
var xmaxJ = Math.max(boxes.get(j, 1), boxes.get(j, 3));
|
||
|
var areaI = (ymaxI - yminI) * (xmaxI - xminI);
|
||
|
var areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ);
|
||
|
if (areaI <= 0 || areaJ <= 0) {
|
||
|
return 0.0;
|
||
|
}
|
||
|
var intersectionYmin = Math.max(yminI, yminJ);
|
||
|
var intersectionXmin = Math.max(xminI, xminJ);
|
||
|
var intersectionYmax = Math.min(ymaxI, ymaxJ);
|
||
|
var intersectionXmax = Math.min(xmaxI, xmaxJ);
|
||
|
var intersectionArea = Math.max(intersectionYmax - intersectionYmin, 0.0) *
|
||
|
Math.max(intersectionXmax - intersectionXmin, 0.0);
|
||
|
return intersectionArea / (areaI + areaJ - intersectionArea);
|
||
|
}
|
||
|
|
||
|
function getCenterCoordinatesAndSizesLayer(x) {
|
||
|
var vec = unstack(transpose(x, [1, 0]));
|
||
|
var sizes = [
|
||
|
sub(vec[2], vec[0]),
|
||
|
sub(vec[3], vec[1])
|
||
|
];
|
||
|
var centers = [
|
||
|
add(vec[0], div(sizes[0], scalar(2))),
|
||
|
add(vec[1], div(sizes[1], scalar(2)))
|
||
|
];
|
||
|
return {
|
||
|
sizes: sizes,
|
||
|
centers: centers
|
||
|
};
|
||
|
}
|
||
|
function decodeBoxesLayer(x0, x1) {
|
||
|
var _a = getCenterCoordinatesAndSizesLayer(x0), sizes = _a.sizes, centers = _a.centers;
|
||
|
var vec = unstack(transpose(x1, [1, 0]));
|
||
|
var div0_out = div(mul(exp(div(vec[2], scalar(5))), sizes[0]), scalar(2));
|
||
|
var add0_out = add(mul(div(vec[0], scalar(10)), sizes[0]), centers[0]);
|
||
|
var div1_out = div(mul(exp(div(vec[3], scalar(5))), sizes[1]), scalar(2));
|
||
|
var add1_out = add(mul(div(vec[1], scalar(10)), sizes[1]), centers[1]);
|
||
|
return transpose(stack([
|
||
|
sub(add0_out, div0_out),
|
||
|
sub(add1_out, div1_out),
|
||
|
add(add0_out, div0_out),
|
||
|
add(add1_out, div1_out)
|
||
|
]), [1, 0]);
|
||
|
}
|
||
|
function outputLayer(boxPredictions, classPredictions, params) {
|
||
|
return tidy(function () {
|
||
|
var batchSize = boxPredictions.shape[0];
|
||
|
var boxes = decodeBoxesLayer(reshape(tile(params.extra_dim, [batchSize, 1, 1]), [-1, 4]), reshape(boxPredictions, [-1, 4]));
|
||
|
boxes = reshape(boxes, [batchSize, (boxes.shape[0] / batchSize), 4]);
|
||
|
var scoresAndClasses = sigmoid(slice(classPredictions, [0, 0, 1], [-1, -1, -1]));
|
||
|
var scores = slice(scoresAndClasses, [0, 0, 0], [-1, -1, 1]);
|
||
|
scores = reshape(scores, [batchSize, scores.shape[1]]);
|
||
|
var boxesByBatch = unstack(boxes);
|
||
|
var scoresByBatch = unstack(scores);
|
||
|
return {
|
||
|
boxes: boxesByBatch,
|
||
|
scores: scoresByBatch
|
||
|
};
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function boxPredictionLayer(x, params) {
|
||
|
return tidy(function () {
|
||
|
var batchSize = x.shape[0];
|
||
|
var boxPredictionEncoding = reshape(convLayer(x, params.box_encoding_predictor), [batchSize, -1, 1, 4]);
|
||
|
var classPrediction = reshape(convLayer(x, params.class_predictor), [batchSize, -1, 3]);
|
||
|
return {
|
||
|
boxPredictionEncoding: boxPredictionEncoding,
|
||
|
classPrediction: classPrediction
|
||
|
};
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function predictionLayer(x, conv11, params) {
|
||
|
return tidy(function () {
|
||
|
var conv0 = pointwiseConvLayer(x, params.conv_0, [1, 1]);
|
||
|
var conv1 = pointwiseConvLayer(conv0, params.conv_1, [2, 2]);
|
||
|
var conv2 = pointwiseConvLayer(conv1, params.conv_2, [1, 1]);
|
||
|
var conv3 = pointwiseConvLayer(conv2, params.conv_3, [2, 2]);
|
||
|
var conv4 = pointwiseConvLayer(conv3, params.conv_4, [1, 1]);
|
||
|
var conv5 = pointwiseConvLayer(conv4, params.conv_5, [2, 2]);
|
||
|
var conv6 = pointwiseConvLayer(conv5, params.conv_6, [1, 1]);
|
||
|
var conv7 = pointwiseConvLayer(conv6, params.conv_7, [2, 2]);
|
||
|
var boxPrediction0 = boxPredictionLayer(conv11, params.box_predictor_0);
|
||
|
var boxPrediction1 = boxPredictionLayer(x, params.box_predictor_1);
|
||
|
var boxPrediction2 = boxPredictionLayer(conv1, params.box_predictor_2);
|
||
|
var boxPrediction3 = boxPredictionLayer(conv3, params.box_predictor_3);
|
||
|
var boxPrediction4 = boxPredictionLayer(conv5, params.box_predictor_4);
|
||
|
var boxPrediction5 = boxPredictionLayer(conv7, params.box_predictor_5);
|
||
|
var boxPredictions = concat([
|
||
|
boxPrediction0.boxPredictionEncoding,
|
||
|
boxPrediction1.boxPredictionEncoding,
|
||
|
boxPrediction2.boxPredictionEncoding,
|
||
|
boxPrediction3.boxPredictionEncoding,
|
||
|
boxPrediction4.boxPredictionEncoding,
|
||
|
boxPrediction5.boxPredictionEncoding
|
||
|
], 1);
|
||
|
var classPredictions = concat([
|
||
|
boxPrediction0.classPrediction,
|
||
|
boxPrediction1.classPrediction,
|
||
|
boxPrediction2.classPrediction,
|
||
|
boxPrediction3.classPrediction,
|
||
|
boxPrediction4.classPrediction,
|
||
|
boxPrediction5.classPrediction
|
||
|
], 1);
|
||
|
return {
|
||
|
boxPredictions: boxPredictions,
|
||
|
classPredictions: classPredictions
|
||
|
};
|
||
|
});
|
||
|
}
|
||
|
|
||
|
var SsdMobilenetv1Options = /** @class */ (function () {
|
||
|
function SsdMobilenetv1Options(_a) {
|
||
|
var _b = _a === void 0 ? {} : _a, minConfidence = _b.minConfidence, maxResults = _b.maxResults;
|
||
|
this._name = 'SsdMobilenetv1Options';
|
||
|
this._minConfidence = minConfidence || 0.5;
|
||
|
this._maxResults = maxResults || 100;
|
||
|
if (typeof this._minConfidence !== 'number' || this._minConfidence <= 0 || this._minConfidence >= 1) {
|
||
|
throw new Error(this._name + " - expected minConfidence to be a number between 0 and 1");
|
||
|
}
|
||
|
if (typeof this._maxResults !== 'number') {
|
||
|
throw new Error(this._name + " - expected maxResults to be a number");
|
||
|
}
|
||
|
}
|
||
|
Object.defineProperty(SsdMobilenetv1Options.prototype, "minConfidence", {
|
||
|
get: function () { return this._minConfidence; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(SsdMobilenetv1Options.prototype, "maxResults", {
|
||
|
get: function () { return this._maxResults; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
return SsdMobilenetv1Options;
|
||
|
}());
|
||
|
|
||
|
var SsdMobilenetv1 = /** @class */ (function (_super) {
|
||
|
__extends$1(SsdMobilenetv1, _super);
|
||
|
function SsdMobilenetv1() {
|
||
|
return _super.call(this, 'SsdMobilenetv1') || this;
|
||
|
}
|
||
|
SsdMobilenetv1.prototype.forwardInput = function (input) {
|
||
|
var params = this.params;
|
||
|
if (!params) {
|
||
|
throw new Error('SsdMobilenetv1 - load model before inference');
|
||
|
}
|
||
|
return tidy(function () {
|
||
|
var batchTensor = input.toBatchTensor(512, false).toFloat();
|
||
|
var x = sub(mul(batchTensor, scalar(0.007843137718737125)), scalar(1));
|
||
|
var features = mobileNetV1(x, params.mobilenetv1);
|
||
|
var _a = predictionLayer(features.out, features.conv11, params.prediction_layer), boxPredictions = _a.boxPredictions, classPredictions = _a.classPredictions;
|
||
|
return outputLayer(boxPredictions, classPredictions, params.output_layer);
|
||
|
});
|
||
|
};
|
||
|
SsdMobilenetv1.prototype.forward = function (input) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _a;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
_a = this.forwardInput;
|
||
|
return [4 /*yield*/, toNetInput(input)];
|
||
|
case 1: return [2 /*return*/, _a.apply(this, [_b.sent()])];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
SsdMobilenetv1.prototype.locateFaces = function (input, options) {
|
||
|
if (options === void 0) { options = {}; }
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _a, maxResults, minConfidence, netInput, _b, _boxes, _scores, boxes, scores, i, scoresData, _c, _d, iouThreshold, indices, reshapedDims, inputSize, padX, padY, results;
|
||
|
return __generator$1(this, function (_e) {
|
||
|
switch (_e.label) {
|
||
|
case 0:
|
||
|
_a = new SsdMobilenetv1Options(options), maxResults = _a.maxResults, minConfidence = _a.minConfidence;
|
||
|
return [4 /*yield*/, toNetInput(input)];
|
||
|
case 1:
|
||
|
netInput = _e.sent();
|
||
|
_b = this.forwardInput(netInput), _boxes = _b.boxes, _scores = _b.scores;
|
||
|
boxes = _boxes[0];
|
||
|
scores = _scores[0];
|
||
|
for (i = 1; i < _boxes.length; i++) {
|
||
|
_boxes[i].dispose();
|
||
|
_scores[i].dispose();
|
||
|
}
|
||
|
_d = (_c = Array).from;
|
||
|
return [4 /*yield*/, scores.data()];
|
||
|
case 2:
|
||
|
scoresData = _d.apply(_c, [_e.sent()]);
|
||
|
iouThreshold = 0.5;
|
||
|
indices = nonMaxSuppression$2(boxes, scoresData, maxResults, iouThreshold, minConfidence);
|
||
|
reshapedDims = netInput.getReshapedInputDimensions(0);
|
||
|
inputSize = netInput.inputSize;
|
||
|
padX = inputSize / reshapedDims.width;
|
||
|
padY = inputSize / reshapedDims.height;
|
||
|
results = indices
|
||
|
.map(function (idx) {
|
||
|
var _a = [
|
||
|
Math.max(0, boxes.get(idx, 0)),
|
||
|
Math.min(1.0, boxes.get(idx, 2))
|
||
|
].map(function (val) { return val * padY; }), top = _a[0], bottom = _a[1];
|
||
|
var _b = [
|
||
|
Math.max(0, boxes.get(idx, 1)),
|
||
|
Math.min(1.0, boxes.get(idx, 3))
|
||
|
].map(function (val) { return val * padX; }), left = _b[0], right = _b[1];
|
||
|
return new FaceDetection(scoresData[idx], new Rect(left, top, right - left, bottom - top), {
|
||
|
height: netInput.getInputHeight(0),
|
||
|
width: netInput.getInputWidth(0)
|
||
|
});
|
||
|
});
|
||
|
boxes.dispose();
|
||
|
scores.dispose();
|
||
|
return [2 /*return*/, results];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
SsdMobilenetv1.prototype.loadQuantizedParams = function (uri) {
|
||
|
return loadQuantizedParams$3(uri);
|
||
|
};
|
||
|
SsdMobilenetv1.prototype.extractParams = function (weights) {
|
||
|
return extractParams$3(weights);
|
||
|
};
|
||
|
return SsdMobilenetv1;
|
||
|
}(NeuralNetwork));
|
||
|
|
||
|
function createSsdMobilenetv1(weights) {
|
||
|
var net = new SsdMobilenetv1();
|
||
|
net.extractWeights(weights);
|
||
|
return net;
|
||
|
}
|
||
|
function createFaceDetectionNet(weights) {
|
||
|
return createSsdMobilenetv1(weights);
|
||
|
}
|
||
|
// alias for backward compatibily
|
||
|
var FaceDetectionNet = /** @class */ (function (_super) {
|
||
|
__extends$1(FaceDetectionNet, _super);
|
||
|
function FaceDetectionNet() {
|
||
|
return _super !== null && _super.apply(this, arguments) || this;
|
||
|
}
|
||
|
return FaceDetectionNet;
|
||
|
}(SsdMobilenetv1));
|
||
|
|
||
|
var TinyFaceDetectorOptions = /** @class */ (function (_super) {
|
||
|
__extends$1(TinyFaceDetectorOptions, _super);
|
||
|
function TinyFaceDetectorOptions() {
|
||
|
var _this = _super !== null && _super.apply(this, arguments) || this;
|
||
|
_this._name = 'TinyFaceDetectorOptions';
|
||
|
return _this;
|
||
|
}
|
||
|
return TinyFaceDetectorOptions;
|
||
|
}(TinyYolov2Options));
|
||
|
|
||
|
var ComposableTask = /** @class */ (function () {
|
||
|
function ComposableTask() {
|
||
|
}
|
||
|
ComposableTask.prototype.then = function (onfulfilled) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _a;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
_a = onfulfilled;
|
||
|
return [4 /*yield*/, this.run()];
|
||
|
case 1: return [2 /*return*/, _a.apply(void 0, [_b.sent()])];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
ComposableTask.prototype.run = function () {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
return __generator$1(this, function (_a) {
|
||
|
throw new Error('ComposableTask - run is not implemented');
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
return ComposableTask;
|
||
|
}());
|
||
|
|
||
|
function bgrToRgbTensor(tensor$$1) {
|
||
|
return tidy(function () { return stack(unstack(tensor$$1, 3).reverse(), 3); });
|
||
|
}
|
||
|
|
||
|
var CELL_STRIDE = 2;
|
||
|
var CELL_SIZE$1 = 12;
|
||
|
|
||
|
function extractorsFactory$7(extractWeights, paramMappings) {
|
||
|
var extractConvParams = extractConvParamsFactory(extractWeights, paramMappings);
|
||
|
var extractFCParams = extractFCParamsFactory(extractWeights, paramMappings);
|
||
|
function extractPReluParams(size, paramPath) {
|
||
|
var alpha = tensor1d(extractWeights(size));
|
||
|
paramMappings.push({ paramPath: paramPath });
|
||
|
return alpha;
|
||
|
}
|
||
|
function extractSharedParams(numFilters, mappedPrefix, isRnet) {
|
||
|
if (isRnet === void 0) { isRnet = false; }
|
||
|
var conv1 = extractConvParams(numFilters[0], numFilters[1], 3, mappedPrefix + "/conv1");
|
||
|
var prelu1_alpha = extractPReluParams(numFilters[1], mappedPrefix + "/prelu1_alpha");
|
||
|
var conv2 = extractConvParams(numFilters[1], numFilters[2], 3, mappedPrefix + "/conv2");
|
||
|
var prelu2_alpha = extractPReluParams(numFilters[2], mappedPrefix + "/prelu2_alpha");
|
||
|
var conv3 = extractConvParams(numFilters[2], numFilters[3], isRnet ? 2 : 3, mappedPrefix + "/conv3");
|
||
|
var prelu3_alpha = extractPReluParams(numFilters[3], mappedPrefix + "/prelu3_alpha");
|
||
|
return { conv1: conv1, prelu1_alpha: prelu1_alpha, conv2: conv2, prelu2_alpha: prelu2_alpha, conv3: conv3, prelu3_alpha: prelu3_alpha };
|
||
|
}
|
||
|
function extractPNetParams() {
|
||
|
var sharedParams = extractSharedParams([3, 10, 16, 32], 'pnet');
|
||
|
var conv4_1 = extractConvParams(32, 2, 1, 'pnet/conv4_1');
|
||
|
var conv4_2 = extractConvParams(32, 4, 1, 'pnet/conv4_2');
|
||
|
return __assign$1({}, sharedParams, { conv4_1: conv4_1, conv4_2: conv4_2 });
|
||
|
}
|
||
|
function extractRNetParams() {
|
||
|
var sharedParams = extractSharedParams([3, 28, 48, 64], 'rnet', true);
|
||
|
var fc1 = extractFCParams(576, 128, 'rnet/fc1');
|
||
|
var prelu4_alpha = extractPReluParams(128, 'rnet/prelu4_alpha');
|
||
|
var fc2_1 = extractFCParams(128, 2, 'rnet/fc2_1');
|
||
|
var fc2_2 = extractFCParams(128, 4, 'rnet/fc2_2');
|
||
|
return __assign$1({}, sharedParams, { fc1: fc1, prelu4_alpha: prelu4_alpha, fc2_1: fc2_1, fc2_2: fc2_2 });
|
||
|
}
|
||
|
function extractONetParams() {
|
||
|
var sharedParams = extractSharedParams([3, 32, 64, 64], 'onet');
|
||
|
var conv4 = extractConvParams(64, 128, 2, 'onet/conv4');
|
||
|
var prelu4_alpha = extractPReluParams(128, 'onet/prelu4_alpha');
|
||
|
var fc1 = extractFCParams(1152, 256, 'onet/fc1');
|
||
|
var prelu5_alpha = extractPReluParams(256, 'onet/prelu5_alpha');
|
||
|
var fc2_1 = extractFCParams(256, 2, 'onet/fc2_1');
|
||
|
var fc2_2 = extractFCParams(256, 4, 'onet/fc2_2');
|
||
|
var fc2_3 = extractFCParams(256, 10, 'onet/fc2_3');
|
||
|
return __assign$1({}, sharedParams, { conv4: conv4, prelu4_alpha: prelu4_alpha, fc1: fc1, prelu5_alpha: prelu5_alpha, fc2_1: fc2_1, fc2_2: fc2_2, fc2_3: fc2_3 });
|
||
|
}
|
||
|
return {
|
||
|
extractPNetParams: extractPNetParams,
|
||
|
extractRNetParams: extractRNetParams,
|
||
|
extractONetParams: extractONetParams
|
||
|
};
|
||
|
}
|
||
|
function extractParams$4(weights) {
|
||
|
var _a = extractWeightsFactory(weights), extractWeights = _a.extractWeights, getRemainingWeights = _a.getRemainingWeights;
|
||
|
var paramMappings = [];
|
||
|
var _b = extractorsFactory$7(extractWeights, paramMappings), extractPNetParams = _b.extractPNetParams, extractRNetParams = _b.extractRNetParams, extractONetParams = _b.extractONetParams;
|
||
|
var pnet = extractPNetParams();
|
||
|
var rnet = extractRNetParams();
|
||
|
var onet = extractONetParams();
|
||
|
if (getRemainingWeights().length !== 0) {
|
||
|
throw new Error("weights remaing after extract: " + getRemainingWeights().length);
|
||
|
}
|
||
|
return { params: { pnet: pnet, rnet: rnet, onet: onet }, paramMappings: paramMappings };
|
||
|
}
|
||
|
|
||
|
function getSizesForScale(scale, _a) {
|
||
|
var height = _a[0], width = _a[1];
|
||
|
return {
|
||
|
height: Math.floor(height * scale),
|
||
|
width: Math.floor(width * scale)
|
||
|
};
|
||
|
}
|
||
|
|
||
|
var DEFAULT_MODEL_NAME$4 = 'mtcnn_model';
|
||
|
function extractorsFactory$8(weightMap, paramMappings) {
|
||
|
var extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);
|
||
|
function extractConvParams(prefix) {
|
||
|
var filters = extractWeightEntry(prefix + "/weights", 4, prefix + "/filters");
|
||
|
var bias = extractWeightEntry(prefix + "/bias", 1);
|
||
|
return { filters: filters, bias: bias };
|
||
|
}
|
||
|
function extractFCParams(prefix) {
|
||
|
var weights = extractWeightEntry(prefix + "/weights", 2);
|
||
|
var bias = extractWeightEntry(prefix + "/bias", 1);
|
||
|
return { weights: weights, bias: bias };
|
||
|
}
|
||
|
function extractPReluParams(paramPath) {
|
||
|
return extractWeightEntry(paramPath, 1);
|
||
|
}
|
||
|
function extractSharedParams(prefix) {
|
||
|
var conv1 = extractConvParams(prefix + "/conv1");
|
||
|
var prelu1_alpha = extractPReluParams(prefix + "/prelu1_alpha");
|
||
|
var conv2 = extractConvParams(prefix + "/conv2");
|
||
|
var prelu2_alpha = extractPReluParams(prefix + "/prelu2_alpha");
|
||
|
var conv3 = extractConvParams(prefix + "/conv3");
|
||
|
var prelu3_alpha = extractPReluParams(prefix + "/prelu3_alpha");
|
||
|
return { conv1: conv1, prelu1_alpha: prelu1_alpha, conv2: conv2, prelu2_alpha: prelu2_alpha, conv3: conv3, prelu3_alpha: prelu3_alpha };
|
||
|
}
|
||
|
function extractPNetParams() {
|
||
|
var sharedParams = extractSharedParams('pnet');
|
||
|
var conv4_1 = extractConvParams('pnet/conv4_1');
|
||
|
var conv4_2 = extractConvParams('pnet/conv4_2');
|
||
|
return __assign$1({}, sharedParams, { conv4_1: conv4_1, conv4_2: conv4_2 });
|
||
|
}
|
||
|
function extractRNetParams() {
|
||
|
var sharedParams = extractSharedParams('rnet');
|
||
|
var fc1 = extractFCParams('rnet/fc1');
|
||
|
var prelu4_alpha = extractPReluParams('rnet/prelu4_alpha');
|
||
|
var fc2_1 = extractFCParams('rnet/fc2_1');
|
||
|
var fc2_2 = extractFCParams('rnet/fc2_2');
|
||
|
return __assign$1({}, sharedParams, { fc1: fc1, prelu4_alpha: prelu4_alpha, fc2_1: fc2_1, fc2_2: fc2_2 });
|
||
|
}
|
||
|
function extractONetParams() {
|
||
|
var sharedParams = extractSharedParams('onet');
|
||
|
var conv4 = extractConvParams('onet/conv4');
|
||
|
var prelu4_alpha = extractPReluParams('onet/prelu4_alpha');
|
||
|
var fc1 = extractFCParams('onet/fc1');
|
||
|
var prelu5_alpha = extractPReluParams('onet/prelu5_alpha');
|
||
|
var fc2_1 = extractFCParams('onet/fc2_1');
|
||
|
var fc2_2 = extractFCParams('onet/fc2_2');
|
||
|
var fc2_3 = extractFCParams('onet/fc2_3');
|
||
|
return __assign$1({}, sharedParams, { conv4: conv4, prelu4_alpha: prelu4_alpha, fc1: fc1, prelu5_alpha: prelu5_alpha, fc2_1: fc2_1, fc2_2: fc2_2, fc2_3: fc2_3 });
|
||
|
}
|
||
|
return {
|
||
|
extractPNetParams: extractPNetParams,
|
||
|
extractRNetParams: extractRNetParams,
|
||
|
extractONetParams: extractONetParams
|
||
|
};
|
||
|
}
|
||
|
function loadQuantizedParams$4(uri) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var weightMap, paramMappings, _a, extractPNetParams, extractRNetParams, extractONetParams, pnet, rnet, onet;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0: return [4 /*yield*/, loadWeightMap(uri, DEFAULT_MODEL_NAME$4)];
|
||
|
case 1:
|
||
|
weightMap = _b.sent();
|
||
|
paramMappings = [];
|
||
|
_a = extractorsFactory$8(weightMap, paramMappings), extractPNetParams = _a.extractPNetParams, extractRNetParams = _a.extractRNetParams, extractONetParams = _a.extractONetParams;
|
||
|
pnet = extractPNetParams();
|
||
|
rnet = extractRNetParams();
|
||
|
onet = extractONetParams();
|
||
|
disposeUnusedWeightTensors(weightMap, paramMappings);
|
||
|
return [2 /*return*/, { params: { pnet: pnet, rnet: rnet, onet: onet }, paramMappings: paramMappings }];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function pyramidDown(minFaceSize, scaleFactor, dims) {
|
||
|
var height = dims[0], width = dims[1];
|
||
|
var m = CELL_SIZE$1 / minFaceSize;
|
||
|
var scales = [];
|
||
|
var minLayer = Math.min(height, width) * m;
|
||
|
var exp = 0;
|
||
|
while (minLayer >= 12) {
|
||
|
scales.push(m * Math.pow(scaleFactor, exp));
|
||
|
minLayer = minLayer * scaleFactor;
|
||
|
exp += 1;
|
||
|
}
|
||
|
return scales;
|
||
|
}
|
||
|
|
||
|
var MtcnnBox = /** @class */ (function (_super) {
|
||
|
__extends$1(MtcnnBox, _super);
|
||
|
function MtcnnBox(left, top, right, bottom) {
|
||
|
return _super.call(this, { left: left, top: top, right: right, bottom: bottom }, true) || this;
|
||
|
}
|
||
|
return MtcnnBox;
|
||
|
}(Box));
|
||
|
|
||
|
function normalize$1(x) {
|
||
|
return tidy(function () { return mul(sub(x, scalar(127.5)), scalar(0.0078125)); });
|
||
|
}
|
||
|
|
||
|
function prelu$1(x, alpha) {
|
||
|
return tidy(function () {
|
||
|
return add(relu(x), mul(alpha, neg(relu(neg(x)))));
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function sharedLayer(x, params, isPnet) {
|
||
|
if (isPnet === void 0) { isPnet = false; }
|
||
|
return tidy(function () {
|
||
|
var out = convLayer(x, params.conv1, 'valid');
|
||
|
out = prelu$1(out, params.prelu1_alpha);
|
||
|
out = maxPool(out, isPnet ? [2, 2] : [3, 3], [2, 2], 'same');
|
||
|
out = convLayer(out, params.conv2, 'valid');
|
||
|
out = prelu$1(out, params.prelu2_alpha);
|
||
|
out = isPnet ? out : maxPool(out, [3, 3], [2, 2], 'valid');
|
||
|
out = convLayer(out, params.conv3, 'valid');
|
||
|
out = prelu$1(out, params.prelu3_alpha);
|
||
|
return out;
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function PNet(x, params) {
|
||
|
return tidy(function () {
|
||
|
var out = sharedLayer(x, params, true);
|
||
|
var conv = convLayer(out, params.conv4_1, 'valid');
|
||
|
var max$$1 = expandDims(max(conv, 3), 3);
|
||
|
var prob = softmax(sub(conv, max$$1), 3);
|
||
|
var regions = convLayer(out, params.conv4_2, 'valid');
|
||
|
return { prob: prob, regions: regions };
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function rescaleAndNormalize(x, scale) {
|
||
|
return tidy(function () {
|
||
|
var _a = getSizesForScale(scale, x.shape.slice(1)), height = _a.height, width = _a.width;
|
||
|
var resized = image_ops.resizeBilinear(x, [height, width]);
|
||
|
var normalized = normalize$1(resized);
|
||
|
return transpose(normalized, [0, 2, 1, 3]);
|
||
|
});
|
||
|
}
|
||
|
function extractBoundingBoxes(scoresTensor, regionsTensor, scale, scoreThreshold) {
|
||
|
// TODO: fix this!, maybe better to use tf.gather here
|
||
|
var indices = [];
|
||
|
for (var y = 0; y < scoresTensor.shape[0]; y++) {
|
||
|
for (var x = 0; x < scoresTensor.shape[1]; x++) {
|
||
|
if (scoresTensor.get(y, x) >= scoreThreshold) {
|
||
|
indices.push(new Point(x, y));
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
var boundingBoxes = indices.map(function (idx) {
|
||
|
var cell = new BoundingBox(Math.round((idx.y * CELL_STRIDE + 1) / scale), Math.round((idx.x * CELL_STRIDE + 1) / scale), Math.round((idx.y * CELL_STRIDE + CELL_SIZE$1) / scale), Math.round((idx.x * CELL_STRIDE + CELL_SIZE$1) / scale));
|
||
|
var score = scoresTensor.get(idx.y, idx.x);
|
||
|
var region = new MtcnnBox(regionsTensor.get(idx.y, idx.x, 0), regionsTensor.get(idx.y, idx.x, 1), regionsTensor.get(idx.y, idx.x, 2), regionsTensor.get(idx.y, idx.x, 3));
|
||
|
return {
|
||
|
cell: cell,
|
||
|
score: score,
|
||
|
region: region
|
||
|
};
|
||
|
});
|
||
|
return boundingBoxes;
|
||
|
}
|
||
|
function stage1(imgTensor, scales, scoreThreshold, params, stats) {
|
||
|
stats.stage1 = [];
|
||
|
var pnetOutputs = scales.map(function (scale) { return tidy(function () {
|
||
|
var statsForScale = { scale: scale };
|
||
|
var resized = rescaleAndNormalize(imgTensor, scale);
|
||
|
var ts = Date.now();
|
||
|
var _a = PNet(resized, params), prob = _a.prob, regions = _a.regions;
|
||
|
statsForScale.pnet = Date.now() - ts;
|
||
|
var scoresTensor = unstack(unstack(prob, 3)[1])[0];
|
||
|
var regionsTensor = unstack(regions)[0];
|
||
|
return {
|
||
|
scoresTensor: scoresTensor,
|
||
|
regionsTensor: regionsTensor,
|
||
|
scale: scale,
|
||
|
statsForScale: statsForScale
|
||
|
};
|
||
|
}); });
|
||
|
var boxesForScale = pnetOutputs.map(function (_a) {
|
||
|
var scoresTensor = _a.scoresTensor, regionsTensor = _a.regionsTensor, scale = _a.scale, statsForScale = _a.statsForScale;
|
||
|
var boundingBoxes = extractBoundingBoxes(scoresTensor, regionsTensor, scale, scoreThreshold);
|
||
|
scoresTensor.dispose();
|
||
|
regionsTensor.dispose();
|
||
|
if (!boundingBoxes.length) {
|
||
|
stats.stage1.push(statsForScale);
|
||
|
return [];
|
||
|
}
|
||
|
var ts = Date.now();
|
||
|
var indices = nonMaxSuppression$1(boundingBoxes.map(function (bbox) { return bbox.cell; }), boundingBoxes.map(function (bbox) { return bbox.score; }), 0.5);
|
||
|
statsForScale.nms = Date.now() - ts;
|
||
|
statsForScale.numBoxes = indices.length;
|
||
|
stats.stage1.push(statsForScale);
|
||
|
return indices.map(function (boxIdx) { return boundingBoxes[boxIdx]; });
|
||
|
});
|
||
|
var allBoxes = boxesForScale.reduce(function (all$$1, boxes) { return all$$1.concat(boxes); }, []);
|
||
|
var finalBoxes = [];
|
||
|
var finalScores = [];
|
||
|
if (allBoxes.length > 0) {
|
||
|
var ts = Date.now();
|
||
|
var indices = nonMaxSuppression$1(allBoxes.map(function (bbox) { return bbox.cell; }), allBoxes.map(function (bbox) { return bbox.score; }), 0.7);
|
||
|
stats.stage1_nms = Date.now() - ts;
|
||
|
finalScores = indices.map(function (idx) { return allBoxes[idx].score; });
|
||
|
finalBoxes = indices
|
||
|
.map(function (idx) { return allBoxes[idx]; })
|
||
|
.map(function (_a) {
|
||
|
var cell = _a.cell, region = _a.region;
|
||
|
return new BoundingBox(cell.left + (region.left * cell.width), cell.top + (region.top * cell.height), cell.right + (region.right * cell.width), cell.bottom + (region.bottom * cell.height)).toSquare().round();
|
||
|
});
|
||
|
}
|
||
|
return {
|
||
|
boxes: finalBoxes,
|
||
|
scores: finalScores
|
||
|
};
|
||
|
}
|
||
|
|
||
|
function extractImagePatches(img, boxes, _a) {
|
||
|
var width = _a.width, height = _a.height;
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _this = this;
|
||
|
var imgCtx, bitmaps, imagePatchesDatas;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
imgCtx = getContext2dOrThrow(img);
|
||
|
return [4 /*yield*/, Promise.all(boxes.map(function (box) { return __awaiter$1(_this, void 0, void 0, function () {
|
||
|
var _a, y, ey, x, ex, fromX, fromY, imgData;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
_a = box.padAtBorders(img.height, img.width), y = _a.y, ey = _a.ey, x = _a.x, ex = _a.ex;
|
||
|
fromX = x - 1;
|
||
|
fromY = y - 1;
|
||
|
imgData = imgCtx.getImageData(fromX, fromY, (ex - fromX), (ey - fromY));
|
||
|
return [2 /*return*/, createImageBitmap(imgData)];
|
||
|
});
|
||
|
}); }))];
|
||
|
case 1:
|
||
|
bitmaps = _b.sent();
|
||
|
imagePatchesDatas = [];
|
||
|
bitmaps.forEach(function (bmp) {
|
||
|
var patch = createCanvas({ width: width, height: height });
|
||
|
var patchCtx = getContext2dOrThrow(patch);
|
||
|
patchCtx.drawImage(bmp, 0, 0, width, height);
|
||
|
var data = patchCtx.getImageData(0, 0, width, height).data;
|
||
|
var currData = [];
|
||
|
// RGBA -> BGR
|
||
|
for (var i = 0; i < data.length; i += 4) {
|
||
|
currData.push(data[i + 2]);
|
||
|
currData.push(data[i + 1]);
|
||
|
currData.push(data[i]);
|
||
|
}
|
||
|
imagePatchesDatas.push(currData);
|
||
|
});
|
||
|
return [2 /*return*/, imagePatchesDatas.map(function (data) {
|
||
|
var t = tidy(function () {
|
||
|
var imagePatchTensor = transpose(tensor4d(data, [1, width, height, 3]), [0, 2, 1, 3]).toFloat();
|
||
|
return normalize$1(imagePatchTensor);
|
||
|
});
|
||
|
return t;
|
||
|
})];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function RNet(x, params) {
|
||
|
return tidy(function () {
|
||
|
var convOut = sharedLayer(x, params);
|
||
|
var vectorized = reshape(convOut, [convOut.shape[0], params.fc1.weights.shape[0]]);
|
||
|
var fc1 = fullyConnectedLayer(vectorized, params.fc1);
|
||
|
var prelu4 = prelu$1(fc1, params.prelu4_alpha);
|
||
|
var fc2_1 = fullyConnectedLayer(prelu4, params.fc2_1);
|
||
|
var max$$1 = expandDims(max(fc2_1, 1), 1);
|
||
|
var prob = softmax(sub(fc2_1, max$$1), 1);
|
||
|
var regions = fullyConnectedLayer(prelu4, params.fc2_2);
|
||
|
var scores = unstack(prob, 1)[1];
|
||
|
return { scores: scores, regions: regions };
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function stage2(img, inputBoxes, scoreThreshold, params, stats) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var ts, rnetInputs, rnetOuts, scoresTensor, scores, _a, _b, indices, filteredBoxes, filteredScores, finalBoxes, finalScores, indicesNms, regions_1;
|
||
|
return __generator$1(this, function (_c) {
|
||
|
switch (_c.label) {
|
||
|
case 0:
|
||
|
ts = Date.now();
|
||
|
return [4 /*yield*/, extractImagePatches(img, inputBoxes, { width: 24, height: 24 })];
|
||
|
case 1:
|
||
|
rnetInputs = _c.sent();
|
||
|
stats.stage2_extractImagePatches = Date.now() - ts;
|
||
|
ts = Date.now();
|
||
|
rnetOuts = rnetInputs.map(function (rnetInput) {
|
||
|
var out = RNet(rnetInput, params);
|
||
|
rnetInput.dispose();
|
||
|
return out;
|
||
|
});
|
||
|
stats.stage2_rnet = Date.now() - ts;
|
||
|
scoresTensor = rnetOuts.length > 1
|
||
|
? concat(rnetOuts.map(function (out) { return out.scores; }))
|
||
|
: rnetOuts[0].scores;
|
||
|
_b = (_a = Array).from;
|
||
|
return [4 /*yield*/, scoresTensor.data()];
|
||
|
case 2:
|
||
|
scores = _b.apply(_a, [_c.sent()]);
|
||
|
scoresTensor.dispose();
|
||
|
indices = scores
|
||
|
.map(function (score, idx) { return ({ score: score, idx: idx }); })
|
||
|
.filter(function (c) { return c.score > scoreThreshold; })
|
||
|
.map(function (_a) {
|
||
|
var idx = _a.idx;
|
||
|
return idx;
|
||
|
});
|
||
|
filteredBoxes = indices.map(function (idx) { return inputBoxes[idx]; });
|
||
|
filteredScores = indices.map(function (idx) { return scores[idx]; });
|
||
|
finalBoxes = [];
|
||
|
finalScores = [];
|
||
|
if (filteredBoxes.length > 0) {
|
||
|
ts = Date.now();
|
||
|
indicesNms = nonMaxSuppression$1(filteredBoxes, filteredScores, 0.7);
|
||
|
stats.stage2_nms = Date.now() - ts;
|
||
|
regions_1 = indicesNms.map(function (idx) {
|
||
|
return new MtcnnBox(rnetOuts[indices[idx]].regions.get(0, 0), rnetOuts[indices[idx]].regions.get(0, 1), rnetOuts[indices[idx]].regions.get(0, 2), rnetOuts[indices[idx]].regions.get(0, 3));
|
||
|
});
|
||
|
finalScores = indicesNms.map(function (idx) { return filteredScores[idx]; });
|
||
|
finalBoxes = indicesNms.map(function (idx, i) { return filteredBoxes[idx].calibrate(regions_1[i]); });
|
||
|
}
|
||
|
rnetOuts.forEach(function (t) {
|
||
|
t.regions.dispose();
|
||
|
t.scores.dispose();
|
||
|
});
|
||
|
return [2 /*return*/, {
|
||
|
boxes: finalBoxes,
|
||
|
scores: finalScores
|
||
|
}];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function ONet(x, params) {
|
||
|
return tidy(function () {
|
||
|
var out = sharedLayer(x, params);
|
||
|
out = maxPool(out, [2, 2], [2, 2], 'same');
|
||
|
out = convLayer(out, params.conv4, 'valid');
|
||
|
out = prelu$1(out, params.prelu4_alpha);
|
||
|
var vectorized = reshape(out, [out.shape[0], params.fc1.weights.shape[0]]);
|
||
|
var fc1 = fullyConnectedLayer(vectorized, params.fc1);
|
||
|
var prelu5 = prelu$1(fc1, params.prelu5_alpha);
|
||
|
var fc2_1 = fullyConnectedLayer(prelu5, params.fc2_1);
|
||
|
var max$$1 = expandDims(max(fc2_1, 1), 1);
|
||
|
var prob = softmax(sub(fc2_1, max$$1), 1);
|
||
|
var regions = fullyConnectedLayer(prelu5, params.fc2_2);
|
||
|
var points = fullyConnectedLayer(prelu5, params.fc2_3);
|
||
|
var scores = unstack(prob, 1)[1];
|
||
|
return { scores: scores, regions: regions, points: points };
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function stage3(img, inputBoxes, scoreThreshold, params, stats) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var ts, onetInputs, onetOuts, scoresTensor, scores, _a, _b, indices, filteredRegions, filteredBoxes, filteredScores, finalBoxes, finalScores, points, indicesNms;
|
||
|
return __generator$1(this, function (_c) {
|
||
|
switch (_c.label) {
|
||
|
case 0:
|
||
|
ts = Date.now();
|
||
|
return [4 /*yield*/, extractImagePatches(img, inputBoxes, { width: 48, height: 48 })];
|
||
|
case 1:
|
||
|
onetInputs = _c.sent();
|
||
|
stats.stage3_extractImagePatches = Date.now() - ts;
|
||
|
ts = Date.now();
|
||
|
onetOuts = onetInputs.map(function (onetInput) {
|
||
|
var out = ONet(onetInput, params);
|
||
|
onetInput.dispose();
|
||
|
return out;
|
||
|
});
|
||
|
stats.stage3_onet = Date.now() - ts;
|
||
|
scoresTensor = onetOuts.length > 1
|
||
|
? concat(onetOuts.map(function (out) { return out.scores; }))
|
||
|
: onetOuts[0].scores;
|
||
|
_b = (_a = Array).from;
|
||
|
return [4 /*yield*/, scoresTensor.data()];
|
||
|
case 2:
|
||
|
scores = _b.apply(_a, [_c.sent()]);
|
||
|
scoresTensor.dispose();
|
||
|
indices = scores
|
||
|
.map(function (score, idx) { return ({ score: score, idx: idx }); })
|
||
|
.filter(function (c) { return c.score > scoreThreshold; })
|
||
|
.map(function (_a) {
|
||
|
var idx = _a.idx;
|
||
|
return idx;
|
||
|
});
|
||
|
filteredRegions = indices.map(function (idx) { return new MtcnnBox(onetOuts[idx].regions.get(0, 0), onetOuts[idx].regions.get(0, 1), onetOuts[idx].regions.get(0, 2), onetOuts[idx].regions.get(0, 3)); });
|
||
|
filteredBoxes = indices
|
||
|
.map(function (idx, i) { return inputBoxes[idx].calibrate(filteredRegions[i]); });
|
||
|
filteredScores = indices.map(function (idx) { return scores[idx]; });
|
||
|
finalBoxes = [];
|
||
|
finalScores = [];
|
||
|
points = [];
|
||
|
if (filteredBoxes.length > 0) {
|
||
|
ts = Date.now();
|
||
|
indicesNms = nonMaxSuppression$1(filteredBoxes, filteredScores, 0.7, false);
|
||
|
stats.stage3_nms = Date.now() - ts;
|
||
|
finalBoxes = indicesNms.map(function (idx) { return filteredBoxes[idx]; });
|
||
|
finalScores = indicesNms.map(function (idx) { return filteredScores[idx]; });
|
||
|
points = indicesNms.map(function (idx, i) {
|
||
|
return Array(5).fill(0).map(function (_, ptIdx) {
|
||
|
return new Point(((onetOuts[idx].points.get(0, ptIdx) * (finalBoxes[i].width + 1)) + finalBoxes[i].left), ((onetOuts[idx].points.get(0, ptIdx + 5) * (finalBoxes[i].height + 1)) + finalBoxes[i].top));
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
onetOuts.forEach(function (t) {
|
||
|
t.regions.dispose();
|
||
|
t.scores.dispose();
|
||
|
t.points.dispose();
|
||
|
});
|
||
|
return [2 /*return*/, {
|
||
|
boxes: finalBoxes,
|
||
|
scores: finalScores,
|
||
|
points: points
|
||
|
}];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
|
||
|
var Mtcnn = /** @class */ (function (_super) {
|
||
|
__extends$1(Mtcnn, _super);
|
||
|
function Mtcnn() {
|
||
|
return _super.call(this, 'Mtcnn') || this;
|
||
|
}
|
||
|
Mtcnn.prototype.forwardInput = function (input, forwardParams) {
|
||
|
if (forwardParams === void 0) { forwardParams = {}; }
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var params, inputCanvas, stats, tsTotal, imgTensor, onReturn, _a, height, width, _b, minFaceSize, scaleFactor, maxNumScales, scoreThresholds, scaleSteps, scales, ts, out1, out2, out3, results;
|
||
|
return __generator$1(this, function (_c) {
|
||
|
switch (_c.label) {
|
||
|
case 0:
|
||
|
params = this.params;
|
||
|
if (!params) {
|
||
|
throw new Error('Mtcnn - load model before inference');
|
||
|
}
|
||
|
inputCanvas = input.canvases[0];
|
||
|
if (!inputCanvas) {
|
||
|
throw new Error('Mtcnn - inputCanvas is not defined, note that passing tensors into Mtcnn.forwardInput is not supported yet.');
|
||
|
}
|
||
|
stats = {};
|
||
|
tsTotal = Date.now();
|
||
|
imgTensor = tidy(function () {
|
||
|
return bgrToRgbTensor(expandDims(fromPixels(inputCanvas)).toFloat());
|
||
|
});
|
||
|
onReturn = function (results) {
|
||
|
// dispose tensors on return
|
||
|
imgTensor.dispose();
|
||
|
stats.total = Date.now() - tsTotal;
|
||
|
return results;
|
||
|
};
|
||
|
_a = imgTensor.shape.slice(1), height = _a[0], width = _a[1];
|
||
|
_b = new MtcnnOptions(forwardParams), minFaceSize = _b.minFaceSize, scaleFactor = _b.scaleFactor, maxNumScales = _b.maxNumScales, scoreThresholds = _b.scoreThresholds, scaleSteps = _b.scaleSteps;
|
||
|
scales = (scaleSteps || pyramidDown(minFaceSize, scaleFactor, [height, width]))
|
||
|
.filter(function (scale) {
|
||
|
var sizes = getSizesForScale(scale, [height, width]);
|
||
|
return Math.min(sizes.width, sizes.height) > CELL_SIZE$1;
|
||
|
})
|
||
|
.slice(0, maxNumScales);
|
||
|
stats.scales = scales;
|
||
|
stats.pyramid = scales.map(function (scale) { return getSizesForScale(scale, [height, width]); });
|
||
|
ts = Date.now();
|
||
|
return [4 /*yield*/, stage1(imgTensor, scales, scoreThresholds[0], params.pnet, stats)];
|
||
|
case 1:
|
||
|
out1 = _c.sent();
|
||
|
stats.total_stage1 = Date.now() - ts;
|
||
|
if (!out1.boxes.length) {
|
||
|
return [2 /*return*/, onReturn({ results: [], stats: stats })];
|
||
|
}
|
||
|
stats.stage2_numInputBoxes = out1.boxes.length;
|
||
|
// using the inputCanvas to extract and resize the image patches, since it is faster
|
||
|
// than doing this on the gpu
|
||
|
ts = Date.now();
|
||
|
return [4 /*yield*/, stage2(inputCanvas, out1.boxes, scoreThresholds[1], params.rnet, stats)];
|
||
|
case 2:
|
||
|
out2 = _c.sent();
|
||
|
stats.total_stage2 = Date.now() - ts;
|
||
|
if (!out2.boxes.length) {
|
||
|
return [2 /*return*/, onReturn({ results: [], stats: stats })];
|
||
|
}
|
||
|
stats.stage3_numInputBoxes = out2.boxes.length;
|
||
|
ts = Date.now();
|
||
|
return [4 /*yield*/, stage3(inputCanvas, out2.boxes, scoreThresholds[2], params.onet, stats)];
|
||
|
case 3:
|
||
|
out3 = _c.sent();
|
||
|
stats.total_stage3 = Date.now() - ts;
|
||
|
results = out3.boxes.map(function (box, idx) { return new FaceDetectionWithLandmarks(new FaceDetection(out3.scores[idx], new Rect(box.left / width, box.top / height, box.width / width, box.height / height), {
|
||
|
height: height,
|
||
|
width: width
|
||
|
}), new FaceLandmarks5(out3.points[idx].map(function (pt) { return pt.sub(new Point(box.left, box.top)).div(new Point(box.width, box.height)); }), { width: box.width, height: box.height })); });
|
||
|
return [2 /*return*/, onReturn({ results: results, stats: stats })];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
Mtcnn.prototype.forward = function (input, forwardParams) {
|
||
|
if (forwardParams === void 0) { forwardParams = {}; }
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _a;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
_a = this.forwardInput;
|
||
|
return [4 /*yield*/, toNetInput(input)];
|
||
|
case 1: return [4 /*yield*/, _a.apply(this, [_b.sent(),
|
||
|
forwardParams])];
|
||
|
case 2: return [2 /*return*/, (_b.sent()).results];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
Mtcnn.prototype.forwardWithStats = function (input, forwardParams) {
|
||
|
if (forwardParams === void 0) { forwardParams = {}; }
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _a;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
_a = this.forwardInput;
|
||
|
return [4 /*yield*/, toNetInput(input)];
|
||
|
case 1: return [2 /*return*/, _a.apply(this, [_b.sent(),
|
||
|
forwardParams])];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
// none of the param tensors are quantized yet
|
||
|
Mtcnn.prototype.loadQuantizedParams = function (uri) {
|
||
|
return loadQuantizedParams$4(uri);
|
||
|
};
|
||
|
Mtcnn.prototype.extractParams = function (weights) {
|
||
|
return extractParams$4(weights);
|
||
|
};
|
||
|
return Mtcnn;
|
||
|
}(NeuralNetwork));
|
||
|
|
||
|
var IOU_THRESHOLD = 0.4;
|
||
|
var BOX_ANCHORS = [
|
||
|
new Point(1.603231, 2.094468),
|
||
|
new Point(6.041143, 7.080126),
|
||
|
new Point(2.882459, 3.518061),
|
||
|
new Point(4.266906, 5.178857),
|
||
|
new Point(9.041765, 10.66308)
|
||
|
];
|
||
|
var MEAN_RGB = [117.001, 114.697, 97.404];
|
||
|
var DEFAULT_MODEL_NAME$5 = 'tiny_face_detector_model';
|
||
|
|
||
|
var TinyFaceDetector = /** @class */ (function (_super) {
|
||
|
__extends$1(TinyFaceDetector, _super);
|
||
|
function TinyFaceDetector() {
|
||
|
var _this = this;
|
||
|
var config = {
|
||
|
withSeparableConvs: true,
|
||
|
iouThreshold: IOU_THRESHOLD,
|
||
|
classes: ['face'],
|
||
|
anchors: BOX_ANCHORS,
|
||
|
meanRgb: MEAN_RGB,
|
||
|
isFirstLayerConv2d: true,
|
||
|
filterSizes: [3, 16, 32, 64, 128, 256, 512]
|
||
|
};
|
||
|
_this = _super.call(this, config) || this;
|
||
|
return _this;
|
||
|
}
|
||
|
Object.defineProperty(TinyFaceDetector.prototype, "anchors", {
|
||
|
get: function () {
|
||
|
return this.config.anchors;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
TinyFaceDetector.prototype.locateFaces = function (input, forwardParams) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var objectDetections;
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, this.detect(input, forwardParams)];
|
||
|
case 1:
|
||
|
objectDetections = _a.sent();
|
||
|
return [2 /*return*/, objectDetections.map(function (det) { return new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight }); })];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
TinyFaceDetector.prototype.loadQuantizedParams = function (modelUri) {
|
||
|
var defaultModelName = DEFAULT_MODEL_NAME$5;
|
||
|
return _super.prototype.loadQuantizedParams.call(this, modelUri, defaultModelName);
|
||
|
};
|
||
|
return TinyFaceDetector;
|
||
|
}(TinyYolov2));
|
||
|
|
||
|
var IOU_THRESHOLD$1 = 0.4;
|
||
|
var BOX_ANCHORS$1 = [
|
||
|
new Point(0.738768, 0.874946),
|
||
|
new Point(2.42204, 2.65704),
|
||
|
new Point(4.30971, 7.04493),
|
||
|
new Point(10.246, 4.59428),
|
||
|
new Point(12.6868, 11.8741)
|
||
|
];
|
||
|
var BOX_ANCHORS_SEPARABLE = [
|
||
|
new Point(1.603231, 2.094468),
|
||
|
new Point(6.041143, 7.080126),
|
||
|
new Point(2.882459, 3.518061),
|
||
|
new Point(4.266906, 5.178857),
|
||
|
new Point(9.041765, 10.66308)
|
||
|
];
|
||
|
var MEAN_RGB_SEPARABLE = [117.001, 114.697, 97.404];
|
||
|
var DEFAULT_MODEL_NAME$6 = 'tiny_yolov2_model';
|
||
|
var DEFAULT_MODEL_NAME_SEPARABLE_CONV = 'tiny_yolov2_separable_conv_model';
|
||
|
|
||
|
var TinyYolov2$1 = /** @class */ (function (_super) {
|
||
|
__extends$1(TinyYolov2$$1, _super);
|
||
|
function TinyYolov2$$1(withSeparableConvs) {
|
||
|
if (withSeparableConvs === void 0) { withSeparableConvs = true; }
|
||
|
var _this = this;
|
||
|
var config = Object.assign({}, {
|
||
|
withSeparableConvs: withSeparableConvs,
|
||
|
iouThreshold: IOU_THRESHOLD$1,
|
||
|
classes: ['face']
|
||
|
}, withSeparableConvs
|
||
|
? {
|
||
|
anchors: BOX_ANCHORS_SEPARABLE,
|
||
|
meanRgb: MEAN_RGB_SEPARABLE
|
||
|
}
|
||
|
: {
|
||
|
anchors: BOX_ANCHORS$1,
|
||
|
withClassScores: true
|
||
|
});
|
||
|
_this = _super.call(this, config) || this;
|
||
|
return _this;
|
||
|
}
|
||
|
Object.defineProperty(TinyYolov2$$1.prototype, "withSeparableConvs", {
|
||
|
get: function () {
|
||
|
return this.config.withSeparableConvs;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(TinyYolov2$$1.prototype, "anchors", {
|
||
|
get: function () {
|
||
|
return this.config.anchors;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
TinyYolov2$$1.prototype.locateFaces = function (input, forwardParams) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var objectDetections;
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, this.detect(input, forwardParams)];
|
||
|
case 1:
|
||
|
objectDetections = _a.sent();
|
||
|
return [2 /*return*/, objectDetections.map(function (det) { return new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight }); })];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
TinyYolov2$$1.prototype.loadQuantizedParams = function (modelUri) {
|
||
|
var defaultModelName = this.withSeparableConvs ? DEFAULT_MODEL_NAME_SEPARABLE_CONV : DEFAULT_MODEL_NAME$6;
|
||
|
return _super.prototype.loadQuantizedParams.call(this, modelUri, defaultModelName);
|
||
|
};
|
||
|
return TinyYolov2$$1;
|
||
|
}(TinyYolov2));
|
||
|
|
||
|
var nets = {
|
||
|
ssdMobilenetv1: new SsdMobilenetv1(),
|
||
|
tinyFaceDetector: new TinyFaceDetector(),
|
||
|
tinyYolov2: new TinyYolov2$1(),
|
||
|
mtcnn: new Mtcnn(),
|
||
|
faceLandmark68Net: new FaceLandmark68Net(),
|
||
|
faceLandmark68TinyNet: new FaceLandmark68TinyNet(),
|
||
|
faceRecognitionNet: new FaceRecognitionNet()
|
||
|
};
|
||
|
/**
|
||
|
* Attempts to detect all faces in an image using SSD Mobilenetv1 Network.
|
||
|
*
|
||
|
* @param input The input image.
|
||
|
* @param options (optional, default: see SsdMobilenetv1Options constructor for default parameters).
|
||
|
* @returns Bounding box of each face with score.
|
||
|
*/
|
||
|
var ssdMobilenetv1 = function (input, options) {
|
||
|
return nets.ssdMobilenetv1.locateFaces(input, options);
|
||
|
};
|
||
|
/**
|
||
|
* Attempts to detect all faces in an image using the Tiny Face Detector.
|
||
|
*
|
||
|
* @param input The input image.
|
||
|
* @param options (optional, default: see TinyFaceDetectorOptions constructor for default parameters).
|
||
|
* @returns Bounding box of each face with score.
|
||
|
*/
|
||
|
var tinyFaceDetector = function (input, options) {
|
||
|
return nets.tinyFaceDetector.locateFaces(input, options);
|
||
|
};
|
||
|
/**
|
||
|
* Attempts to detect all faces in an image using the Tiny Yolov2 Network.
|
||
|
*
|
||
|
* @param input The input image.
|
||
|
* @param options (optional, default: see TinyYolov2Options constructor for default parameters).
|
||
|
* @returns Bounding box of each face with score.
|
||
|
*/
|
||
|
var tinyYolov2 = function (input, options) {
|
||
|
return nets.tinyYolov2.locateFaces(input, options);
|
||
|
};
|
||
|
/**
|
||
|
* Attempts to detect all faces in an image and the 5 point face landmarks
|
||
|
* of each detected face using the MTCNN Network.
|
||
|
*
|
||
|
* @param input The input image.
|
||
|
* @param options (optional, default: see MtcnnOptions constructor for default parameters).
|
||
|
* @returns Bounding box of each face with score and 5 point face landmarks.
|
||
|
*/
|
||
|
var mtcnn = function (input, options) {
|
||
|
return nets.mtcnn.forward(input, options);
|
||
|
};
|
||
|
/**
|
||
|
* Detects the 68 point face landmark positions of the face shown in an image.
|
||
|
*
|
||
|
* @param inputs The face image extracted from the bounding box of a face. Can
|
||
|
* also be an array of input images, which will be batch processed.
|
||
|
* @returns 68 point face landmarks or array thereof in case of batch input.
|
||
|
*/
|
||
|
var detectFaceLandmarks = function (input) {
|
||
|
return nets.faceLandmark68Net.detectLandmarks(input);
|
||
|
};
|
||
|
/**
|
||
|
* Detects the 68 point face landmark positions of the face shown in an image
|
||
|
* using a tinier version of the 68 point face landmark model, which is slightly
|
||
|
* faster at inference, but also slightly less accurate.
|
||
|
*
|
||
|
* @param inputs The face image extracted from the bounding box of a face. Can
|
||
|
* also be an array of input images, which will be batch processed.
|
||
|
* @returns 68 point face landmarks or array thereof in case of batch input.
|
||
|
*/
|
||
|
var detectFaceLandmarksTiny = function (input) {
|
||
|
return nets.faceLandmark68TinyNet.detectLandmarks(input);
|
||
|
};
|
||
|
/**
|
||
|
* Computes a 128 entry vector (face descriptor / face embeddings) from the face shown in an image,
|
||
|
* which uniquely represents the features of that persons face. The computed face descriptor can
|
||
|
* be used to measure the similarity between faces, by computing the euclidean distance of two
|
||
|
* face descriptors.
|
||
|
*
|
||
|
* @param inputs The face image extracted from the aligned bounding box of a face. Can
|
||
|
* also be an array of input images, which will be batch processed.
|
||
|
* @returns Face descriptor with 128 entries or array thereof in case of batch input.
|
||
|
*/
|
||
|
var computeFaceDescriptor = function (input) {
|
||
|
return nets.faceRecognitionNet.computeFaceDescriptor(input);
|
||
|
};
|
||
|
var loadSsdMobilenetv1Model = function (url) { return nets.ssdMobilenetv1.load(url); };
|
||
|
var loadTinyFaceDetectorModel = function (url) { return nets.tinyFaceDetector.load(url); };
|
||
|
var loadMtcnnModel = function (url) { return nets.mtcnn.load(url); };
|
||
|
var loadTinyYolov2Model = function (url) { return nets.tinyYolov2.load(url); };
|
||
|
var loadFaceLandmarkModel = function (url) { return nets.faceLandmark68Net.load(url); };
|
||
|
var loadFaceLandmarkTinyModel = function (url) { return nets.faceLandmark68TinyNet.load(url); };
|
||
|
var loadFaceRecognitionModel = function (url) { return nets.faceRecognitionNet.load(url); };
|
||
|
// backward compatibility
|
||
|
var loadFaceDetectionModel = loadSsdMobilenetv1Model;
|
||
|
var locateFaces = ssdMobilenetv1;
|
||
|
var detectLandmarks = detectFaceLandmarks;
|
||
|
|
||
|
var ComputeFaceDescriptorsTaskBase = /** @class */ (function (_super) {
|
||
|
__extends$1(ComputeFaceDescriptorsTaskBase, _super);
|
||
|
function ComputeFaceDescriptorsTaskBase(detectFaceLandmarksTask, input) {
|
||
|
var _this = _super.call(this) || this;
|
||
|
_this.detectFaceLandmarksTask = detectFaceLandmarksTask;
|
||
|
_this.input = input;
|
||
|
return _this;
|
||
|
}
|
||
|
return ComputeFaceDescriptorsTaskBase;
|
||
|
}(ComposableTask));
|
||
|
var ComputeAllFaceDescriptorsTask = /** @class */ (function (_super) {
|
||
|
__extends$1(ComputeAllFaceDescriptorsTask, _super);
|
||
|
function ComputeAllFaceDescriptorsTask() {
|
||
|
return _super !== null && _super.apply(this, arguments) || this;
|
||
|
}
|
||
|
ComputeAllFaceDescriptorsTask.prototype.run = function () {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _this = this;
|
||
|
var facesWithLandmarks, alignedFaceCanvases;
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, this.detectFaceLandmarksTask];
|
||
|
case 1:
|
||
|
facesWithLandmarks = _a.sent();
|
||
|
return [4 /*yield*/, extractFaces(this.input, facesWithLandmarks.map(function (_a) {
|
||
|
var landmarks = _a.landmarks;
|
||
|
return landmarks.align();
|
||
|
}))];
|
||
|
case 2:
|
||
|
alignedFaceCanvases = _a.sent();
|
||
|
return [4 /*yield*/, Promise.all(facesWithLandmarks.map(function (_a, i) {
|
||
|
var detection = _a.detection, landmarks = _a.landmarks;
|
||
|
return __awaiter$1(_this, void 0, void 0, function () {
|
||
|
var descriptor;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0: return [4 /*yield*/, nets.faceRecognitionNet.computeFaceDescriptor(alignedFaceCanvases[i])];
|
||
|
case 1:
|
||
|
descriptor = _b.sent();
|
||
|
return [2 /*return*/, new FullFaceDescription(detection, landmarks, descriptor)];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}))];
|
||
|
case 3: return [2 /*return*/, _a.sent()];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
return ComputeAllFaceDescriptorsTask;
|
||
|
}(ComputeFaceDescriptorsTaskBase));
|
||
|
var ComputeSingleFaceDescriptorTask = /** @class */ (function (_super) {
|
||
|
__extends$1(ComputeSingleFaceDescriptorTask, _super);
|
||
|
function ComputeSingleFaceDescriptorTask() {
|
||
|
return _super !== null && _super.apply(this, arguments) || this;
|
||
|
}
|
||
|
ComputeSingleFaceDescriptorTask.prototype.run = function () {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var detectionWithLandmarks, detection, landmarks, alignedRect, alignedFaceCanvas, descriptor;
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, this.detectFaceLandmarksTask];
|
||
|
case 1:
|
||
|
detectionWithLandmarks = _a.sent();
|
||
|
if (!detectionWithLandmarks) {
|
||
|
return [2 /*return*/];
|
||
|
}
|
||
|
detection = detectionWithLandmarks.detection, landmarks = detectionWithLandmarks.landmarks, alignedRect = detectionWithLandmarks.alignedRect;
|
||
|
return [4 /*yield*/, extractFaces(this.input, [alignedRect])];
|
||
|
case 2:
|
||
|
alignedFaceCanvas = (_a.sent())[0];
|
||
|
return [4 /*yield*/, nets.faceRecognitionNet.computeFaceDescriptor(alignedFaceCanvas)];
|
||
|
case 3:
|
||
|
descriptor = _a.sent();
|
||
|
return [2 /*return*/, new FullFaceDescription(detection, landmarks, descriptor)];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
return ComputeSingleFaceDescriptorTask;
|
||
|
}(ComputeFaceDescriptorsTaskBase));
|
||
|
|
||
|
var DetectFaceLandmarksTaskBase = /** @class */ (function (_super) {
|
||
|
__extends$1(DetectFaceLandmarksTaskBase, _super);
|
||
|
function DetectFaceLandmarksTaskBase(detectFacesTask, input, useTinyLandmarkNet) {
|
||
|
var _this = _super.call(this) || this;
|
||
|
_this.detectFacesTask = detectFacesTask;
|
||
|
_this.input = input;
|
||
|
_this.useTinyLandmarkNet = useTinyLandmarkNet;
|
||
|
return _this;
|
||
|
}
|
||
|
Object.defineProperty(DetectFaceLandmarksTaskBase.prototype, "landmarkNet", {
|
||
|
get: function () {
|
||
|
return this.useTinyLandmarkNet
|
||
|
? nets.faceLandmark68TinyNet
|
||
|
: nets.faceLandmark68Net;
|
||
|
},
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
return DetectFaceLandmarksTaskBase;
|
||
|
}(ComposableTask));
|
||
|
var DetectAllFaceLandmarksTask = /** @class */ (function (_super) {
|
||
|
__extends$1(DetectAllFaceLandmarksTask, _super);
|
||
|
function DetectAllFaceLandmarksTask() {
|
||
|
return _super !== null && _super.apply(this, arguments) || this;
|
||
|
}
|
||
|
DetectAllFaceLandmarksTask.prototype.run = function () {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _this = this;
|
||
|
var detections, faceCanvases, faceLandmarksByFace;
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, this.detectFacesTask];
|
||
|
case 1:
|
||
|
detections = _a.sent();
|
||
|
return [4 /*yield*/, extractFaces(this.input, detections)];
|
||
|
case 2:
|
||
|
faceCanvases = _a.sent();
|
||
|
return [4 /*yield*/, Promise.all(faceCanvases.map(function (canvas) { return _this.landmarkNet.detectLandmarks(canvas); }))];
|
||
|
case 3:
|
||
|
faceLandmarksByFace = _a.sent();
|
||
|
return [2 /*return*/, detections.map(function (detection, i) {
|
||
|
return new FaceDetectionWithLandmarks(detection, faceLandmarksByFace[i]);
|
||
|
})];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
DetectAllFaceLandmarksTask.prototype.withFaceDescriptors = function () {
|
||
|
return new ComputeAllFaceDescriptorsTask(this, this.input);
|
||
|
};
|
||
|
return DetectAllFaceLandmarksTask;
|
||
|
}(DetectFaceLandmarksTaskBase));
|
||
|
var DetectSingleFaceLandmarksTask = /** @class */ (function (_super) {
|
||
|
__extends$1(DetectSingleFaceLandmarksTask, _super);
|
||
|
function DetectSingleFaceLandmarksTask() {
|
||
|
return _super !== null && _super.apply(this, arguments) || this;
|
||
|
}
|
||
|
DetectSingleFaceLandmarksTask.prototype.run = function () {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var detection, faceCanvas, _a, _b;
|
||
|
return __generator$1(this, function (_c) {
|
||
|
switch (_c.label) {
|
||
|
case 0: return [4 /*yield*/, this.detectFacesTask];
|
||
|
case 1:
|
||
|
detection = _c.sent();
|
||
|
if (!detection) {
|
||
|
return [2 /*return*/];
|
||
|
}
|
||
|
return [4 /*yield*/, extractFaces(this.input, [detection])];
|
||
|
case 2:
|
||
|
faceCanvas = (_c.sent())[0];
|
||
|
_a = FaceDetectionWithLandmarks.bind;
|
||
|
_b = [void 0, detection];
|
||
|
return [4 /*yield*/, this.landmarkNet.detectLandmarks(faceCanvas)];
|
||
|
case 3: return [2 /*return*/, new (_a.apply(FaceDetectionWithLandmarks, _b.concat([_c.sent()])))()];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
DetectSingleFaceLandmarksTask.prototype.withFaceDescriptor = function () {
|
||
|
return new ComputeSingleFaceDescriptorTask(this, this.input);
|
||
|
};
|
||
|
return DetectSingleFaceLandmarksTask;
|
||
|
}(DetectFaceLandmarksTaskBase));
|
||
|
|
||
|
var DetectFacesTaskBase = /** @class */ (function (_super) {
|
||
|
__extends$1(DetectFacesTaskBase, _super);
|
||
|
function DetectFacesTaskBase(input, options) {
|
||
|
if (options === void 0) { options = new SsdMobilenetv1Options(); }
|
||
|
var _this = _super.call(this) || this;
|
||
|
_this.input = input;
|
||
|
_this.options = options;
|
||
|
return _this;
|
||
|
}
|
||
|
return DetectFacesTaskBase;
|
||
|
}(ComposableTask));
|
||
|
var DetectAllFacesTask = /** @class */ (function (_super) {
|
||
|
__extends$1(DetectAllFacesTask, _super);
|
||
|
function DetectAllFacesTask() {
|
||
|
return _super !== null && _super.apply(this, arguments) || this;
|
||
|
}
|
||
|
DetectAllFacesTask.prototype.run = function () {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
var _a, input, options, faceDetectionFunction;
|
||
|
return __generator$1(this, function (_b) {
|
||
|
switch (_b.label) {
|
||
|
case 0:
|
||
|
_a = this, input = _a.input, options = _a.options;
|
||
|
if (!(options instanceof MtcnnOptions)) return [3 /*break*/, 2];
|
||
|
return [4 /*yield*/, nets.mtcnn.forward(input, options)];
|
||
|
case 1: return [2 /*return*/, (_b.sent())
|
||
|
.map(function (result) { return result.faceDetection; })];
|
||
|
case 2:
|
||
|
faceDetectionFunction = options instanceof TinyFaceDetectorOptions
|
||
|
? function (input) { return nets.tinyFaceDetector.locateFaces(input, options); }
|
||
|
: (options instanceof SsdMobilenetv1Options
|
||
|
? function (input) { return nets.ssdMobilenetv1.locateFaces(input, options); }
|
||
|
: (options instanceof TinyYolov2Options
|
||
|
? function (input) { return nets.tinyYolov2.locateFaces(input, options); }
|
||
|
: null));
|
||
|
if (!faceDetectionFunction) {
|
||
|
throw new Error('detectFaces - expected options to be instance of TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TinyYolov2Options');
|
||
|
}
|
||
|
return [2 /*return*/, faceDetectionFunction(input)];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
DetectAllFacesTask.prototype.withFaceLandmarks = function (useTinyLandmarkNet) {
|
||
|
if (useTinyLandmarkNet === void 0) { useTinyLandmarkNet = false; }
|
||
|
return new DetectAllFaceLandmarksTask(this, this.input, useTinyLandmarkNet);
|
||
|
};
|
||
|
return DetectAllFacesTask;
|
||
|
}(DetectFacesTaskBase));
|
||
|
var DetectSingleFaceTask = /** @class */ (function (_super) {
|
||
|
__extends$1(DetectSingleFaceTask, _super);
|
||
|
function DetectSingleFaceTask() {
|
||
|
return _super !== null && _super.apply(this, arguments) || this;
|
||
|
}
|
||
|
DetectSingleFaceTask.prototype.run = function () {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, new DetectAllFacesTask(this.input, this.options)];
|
||
|
case 1: return [2 /*return*/, (_a.sent())
|
||
|
.sort(function (f1, f2) { return f1.score - f2.score; })[0]];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
DetectSingleFaceTask.prototype.withFaceLandmarks = function (useTinyLandmarkNet) {
|
||
|
if (useTinyLandmarkNet === void 0) { useTinyLandmarkNet = false; }
|
||
|
return new DetectSingleFaceLandmarksTask(this, this.input, useTinyLandmarkNet);
|
||
|
};
|
||
|
return DetectSingleFaceTask;
|
||
|
}(DetectFacesTaskBase));
|
||
|
|
||
|
function detectSingleFace(input, options) {
|
||
|
if (options === void 0) { options = new SsdMobilenetv1Options(); }
|
||
|
return new DetectSingleFaceTask(input, options);
|
||
|
}
|
||
|
function detectAllFaces(input, options) {
|
||
|
if (options === void 0) { options = new SsdMobilenetv1Options(); }
|
||
|
return new DetectAllFacesTask(input, options);
|
||
|
}
|
||
|
|
||
|
// export allFaces API for backward compatibility
|
||
|
function allFacesSsdMobilenetv1(input, minConfidence) {
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, detectAllFaces(input, new SsdMobilenetv1Options(minConfidence ? { minConfidence: minConfidence } : {}))
|
||
|
.withFaceLandmarks()
|
||
|
.withFaceDescriptors()];
|
||
|
case 1: return [2 /*return*/, _a.sent()];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
function allFacesTinyYolov2(input, forwardParams) {
|
||
|
if (forwardParams === void 0) { forwardParams = {}; }
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, detectAllFaces(input, new TinyYolov2Options(forwardParams))
|
||
|
.withFaceLandmarks()
|
||
|
.withFaceDescriptors()];
|
||
|
case 1: return [2 /*return*/, _a.sent()];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
function allFacesMtcnn(input, forwardParams) {
|
||
|
if (forwardParams === void 0) { forwardParams = {}; }
|
||
|
return __awaiter$1(this, void 0, void 0, function () {
|
||
|
return __generator$1(this, function (_a) {
|
||
|
switch (_a.label) {
|
||
|
case 0: return [4 /*yield*/, detectAllFaces(input, new MtcnnOptions(forwardParams))
|
||
|
.withFaceLandmarks()
|
||
|
.withFaceDescriptors()];
|
||
|
case 1: return [2 /*return*/, _a.sent()];
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
var allFaces = allFacesSsdMobilenetv1;
|
||
|
|
||
|
function euclideanDistance(arr1, arr2) {
|
||
|
if (arr1.length !== arr2.length)
|
||
|
throw new Error('euclideanDistance: arr1.length !== arr2.length');
|
||
|
var desc1 = Array.from(arr1);
|
||
|
var desc2 = Array.from(arr2);
|
||
|
return Math.sqrt(desc1
|
||
|
.map(function (val, i) { return val - desc2[i]; })
|
||
|
.reduce(function (res, diff) { return res + Math.pow(diff, 2); }, 0));
|
||
|
}
|
||
|
|
||
|
var FaceMatcher = /** @class */ (function () {
|
||
|
function FaceMatcher(inputs, distanceThreshold) {
|
||
|
if (distanceThreshold === void 0) { distanceThreshold = 0.6; }
|
||
|
this._distanceThreshold = distanceThreshold;
|
||
|
var inputArray = Array.isArray(inputs) ? inputs : [inputs];
|
||
|
if (!inputArray.length) {
|
||
|
throw new Error("FaceRecognizer.constructor - expected atleast one input");
|
||
|
}
|
||
|
var count = 1;
|
||
|
var createUniqueLabel = function () { return "person " + count++; };
|
||
|
this._labeledDescriptors = inputArray.map(function (desc) {
|
||
|
if (desc instanceof LabeledFaceDescriptors) {
|
||
|
return desc;
|
||
|
}
|
||
|
if (desc instanceof FullFaceDescription) {
|
||
|
return new LabeledFaceDescriptors(createUniqueLabel(), [desc.descriptor]);
|
||
|
}
|
||
|
if (desc instanceof Float32Array) {
|
||
|
return new LabeledFaceDescriptors(createUniqueLabel(), [desc]);
|
||
|
}
|
||
|
throw new Error("FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | FullFaceDescription | Float32Array | Array<LabeledFaceDescriptors | FullFaceDescription | Float32Array>");
|
||
|
});
|
||
|
}
|
||
|
Object.defineProperty(FaceMatcher.prototype, "labeledDescriptors", {
|
||
|
get: function () { return this._labeledDescriptors; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
Object.defineProperty(FaceMatcher.prototype, "distanceThreshold", {
|
||
|
get: function () { return this._distanceThreshold; },
|
||
|
enumerable: true,
|
||
|
configurable: true
|
||
|
});
|
||
|
FaceMatcher.prototype.computeMeanDistance = function (queryDescriptor, descriptors) {
|
||
|
return descriptors
|
||
|
.map(function (d) { return euclideanDistance(d, queryDescriptor); })
|
||
|
.reduce(function (d1, d2) { return d1 + d2; }, 0)
|
||
|
/ (descriptors.length || 1);
|
||
|
};
|
||
|
FaceMatcher.prototype.matchDescriptor = function (queryDescriptor) {
|
||
|
var _this = this;
|
||
|
return this.labeledDescriptors
|
||
|
.map(function (_a) {
|
||
|
var descriptors = _a.descriptors, label = _a.label;
|
||
|
return new FaceMatch(label, _this.computeMeanDistance(queryDescriptor, descriptors));
|
||
|
})
|
||
|
.reduce(function (best, curr) { return best.distance < curr.distance ? best : curr; });
|
||
|
};
|
||
|
FaceMatcher.prototype.findBestMatch = function (queryDescriptor) {
|
||
|
var bestMatch = this.matchDescriptor(queryDescriptor);
|
||
|
return bestMatch.distance < this.distanceThreshold
|
||
|
? bestMatch
|
||
|
: new FaceMatch('unknown', bestMatch.distance);
|
||
|
};
|
||
|
return FaceMatcher;
|
||
|
}());
|
||
|
|
||
|
function createMtcnn(weights) {
|
||
|
var net = new Mtcnn();
|
||
|
net.extractWeights(weights);
|
||
|
return net;
|
||
|
}
|
||
|
|
||
|
function createTinyFaceDetector(weights) {
|
||
|
var net = new TinyFaceDetector();
|
||
|
net.extractWeights(weights);
|
||
|
return net;
|
||
|
}
|
||
|
|
||
|
function createTinyYolov2(weights, withSeparableConvs) {
|
||
|
if (withSeparableConvs === void 0) { withSeparableConvs = true; }
|
||
|
var net = new TinyYolov2$1(withSeparableConvs);
|
||
|
net.extractWeights(weights);
|
||
|
return net;
|
||
|
}
|
||
|
|
||
|
exports.tf = tfCore_esm;
|
||
|
exports.BoundingBox = BoundingBox;
|
||
|
exports.Box = Box;
|
||
|
exports.BoxWithText = BoxWithText;
|
||
|
exports.Dimensions = Dimensions;
|
||
|
exports.LabeledBox = LabeledBox;
|
||
|
exports.ObjectDetection = ObjectDetection;
|
||
|
exports.Point = Point;
|
||
|
exports.PredictedBox = PredictedBox;
|
||
|
exports.Rect = Rect;
|
||
|
exports.disposeUnusedWeightTensors = disposeUnusedWeightTensors;
|
||
|
exports.extractWeightEntryFactory = extractWeightEntryFactory;
|
||
|
exports.extractWeightsFactory = extractWeightsFactory;
|
||
|
exports.getModelUris = getModelUris;
|
||
|
exports.awaitMediaLoaded = awaitMediaLoaded;
|
||
|
exports.bufferToImage = bufferToImage;
|
||
|
exports.createCanvas = createCanvas;
|
||
|
exports.createCanvasFromMedia = createCanvasFromMedia;
|
||
|
exports.drawBox = drawBox;
|
||
|
exports.drawDetection = drawDetection;
|
||
|
exports.drawText = drawText;
|
||
|
exports.fetchImage = fetchImage;
|
||
|
exports.fetchJson = fetchJson;
|
||
|
exports.fetchNetWeights = fetchNetWeights;
|
||
|
exports.fetchOrThrow = fetchOrThrow;
|
||
|
exports.getContext2dOrThrow = getContext2dOrThrow;
|
||
|
exports.getDefaultDrawOptions = getDefaultDrawOptions;
|
||
|
exports.getMediaDimensions = getMediaDimensions;
|
||
|
exports.imageTensorToCanvas = imageTensorToCanvas;
|
||
|
exports.imageToSquare = imageToSquare;
|
||
|
exports.isMediaElement = isMediaElement;
|
||
|
exports.isMediaLoaded = isMediaLoaded;
|
||
|
exports.loadWeightMap = loadWeightMap;
|
||
|
exports.NetInput = NetInput;
|
||
|
exports.resolveInput = resolveInput;
|
||
|
exports.toNetInput = toNetInput;
|
||
|
exports.sigmoid = sigmoid$1;
|
||
|
exports.inverseSigmoid = inverseSigmoid;
|
||
|
exports.iou = iou;
|
||
|
exports.nonMaxSuppression = nonMaxSuppression$1;
|
||
|
exports.normalize = normalize;
|
||
|
exports.padToSquare = padToSquare;
|
||
|
exports.shuffleArray = shuffleArray;
|
||
|
exports.isTensor = isTensor;
|
||
|
exports.isTensor1D = isTensor1D;
|
||
|
exports.isTensor2D = isTensor2D;
|
||
|
exports.isTensor3D = isTensor3D;
|
||
|
exports.isTensor4D = isTensor4D;
|
||
|
exports.isFloat = isFloat;
|
||
|
exports.isEven = isEven;
|
||
|
exports.round = round$1;
|
||
|
exports.isDimensions = isDimensions;
|
||
|
exports.computeReshapedDimensions = computeReshapedDimensions;
|
||
|
exports.getCenterPoint = getCenterPoint;
|
||
|
exports.range = range$1;
|
||
|
exports.isValidNumber = isValidNumber;
|
||
|
exports.isValidProbablitiy = isValidProbablitiy;
|
||
|
exports.NeuralNetwork = NeuralNetwork;
|
||
|
exports.FaceDetection = FaceDetection;
|
||
|
exports.FaceDetectionWithLandmarks = FaceDetectionWithLandmarks;
|
||
|
exports.FaceLandmarks = FaceLandmarks;
|
||
|
exports.FaceLandmarks5 = FaceLandmarks5;
|
||
|
exports.FaceLandmarks68 = FaceLandmarks68;
|
||
|
exports.FaceMatch = FaceMatch;
|
||
|
exports.FullFaceDescription = FullFaceDescription;
|
||
|
exports.LabeledFaceDescriptors = LabeledFaceDescriptors;
|
||
|
exports.drawContour = drawContour;
|
||
|
exports.drawLandmarks = drawLandmarks;
|
||
|
exports.extractFaces = extractFaces;
|
||
|
exports.extractFaceTensors = extractFaceTensors;
|
||
|
exports.FaceLandmarkNet = FaceLandmarkNet;
|
||
|
exports.createFaceLandmarkNet = createFaceLandmarkNet;
|
||
|
exports.FaceLandmark68Net = FaceLandmark68Net;
|
||
|
exports.FaceLandmark68TinyNet = FaceLandmark68TinyNet;
|
||
|
exports.createFaceRecognitionNet = createFaceRecognitionNet;
|
||
|
exports.FaceRecognitionNet = FaceRecognitionNet;
|
||
|
exports.allFacesSsdMobilenetv1 = allFacesSsdMobilenetv1;
|
||
|
exports.allFacesTinyYolov2 = allFacesTinyYolov2;
|
||
|
exports.allFacesMtcnn = allFacesMtcnn;
|
||
|
exports.allFaces = allFaces;
|
||
|
exports.ComposableTask = ComposableTask;
|
||
|
exports.ComputeFaceDescriptorsTaskBase = ComputeFaceDescriptorsTaskBase;
|
||
|
exports.ComputeAllFaceDescriptorsTask = ComputeAllFaceDescriptorsTask;
|
||
|
exports.ComputeSingleFaceDescriptorTask = ComputeSingleFaceDescriptorTask;
|
||
|
exports.detectSingleFace = detectSingleFace;
|
||
|
exports.detectAllFaces = detectAllFaces;
|
||
|
exports.DetectFacesTaskBase = DetectFacesTaskBase;
|
||
|
exports.DetectAllFacesTask = DetectAllFacesTask;
|
||
|
exports.DetectSingleFaceTask = DetectSingleFaceTask;
|
||
|
exports.DetectFaceLandmarksTaskBase = DetectFaceLandmarksTaskBase;
|
||
|
exports.DetectAllFaceLandmarksTask = DetectAllFaceLandmarksTask;
|
||
|
exports.DetectSingleFaceLandmarksTask = DetectSingleFaceLandmarksTask;
|
||
|
exports.FaceMatcher = FaceMatcher;
|
||
|
exports.nets = nets;
|
||
|
exports.ssdMobilenetv1 = ssdMobilenetv1;
|
||
|
exports.tinyFaceDetector = tinyFaceDetector;
|
||
|
exports.tinyYolov2 = tinyYolov2;
|
||
|
exports.mtcnn = mtcnn;
|
||
|
exports.detectFaceLandmarks = detectFaceLandmarks;
|
||
|
exports.detectFaceLandmarksTiny = detectFaceLandmarksTiny;
|
||
|
exports.computeFaceDescriptor = computeFaceDescriptor;
|
||
|
exports.loadSsdMobilenetv1Model = loadSsdMobilenetv1Model;
|
||
|
exports.loadTinyFaceDetectorModel = loadTinyFaceDetectorModel;
|
||
|
exports.loadMtcnnModel = loadMtcnnModel;
|
||
|
exports.loadTinyYolov2Model = loadTinyYolov2Model;
|
||
|
exports.loadFaceLandmarkModel = loadFaceLandmarkModel;
|
||
|
exports.loadFaceLandmarkTinyModel = loadFaceLandmarkTinyModel;
|
||
|
exports.loadFaceRecognitionModel = loadFaceRecognitionModel;
|
||
|
exports.loadFaceDetectionModel = loadFaceDetectionModel;
|
||
|
exports.locateFaces = locateFaces;
|
||
|
exports.detectLandmarks = detectLandmarks;
|
||
|
exports.createMtcnn = createMtcnn;
|
||
|
exports.Mtcnn = Mtcnn;
|
||
|
exports.MtcnnOptions = MtcnnOptions;
|
||
|
exports.createSsdMobilenetv1 = createSsdMobilenetv1;
|
||
|
exports.createFaceDetectionNet = createFaceDetectionNet;
|
||
|
exports.FaceDetectionNet = FaceDetectionNet;
|
||
|
exports.SsdMobilenetv1 = SsdMobilenetv1;
|
||
|
exports.SsdMobilenetv1Options = SsdMobilenetv1Options;
|
||
|
exports.createTinyFaceDetector = createTinyFaceDetector;
|
||
|
exports.TinyFaceDetector = TinyFaceDetector;
|
||
|
exports.TinyFaceDetectorOptions = TinyFaceDetectorOptions;
|
||
|
exports.createTinyYolov2 = createTinyYolov2;
|
||
|
exports.TinyYolov2 = TinyYolov2$1;
|
||
|
exports.euclideanDistance = euclideanDistance;
|
||
|
|
||
|
Object.defineProperty(exports, '__esModule', { value: true });
|
||
|
|
||
|
})));
|
||
|
//# sourceMappingURL=face-api.js.map
|