mirror of
https://github.com/ente-io/ente.git
synced 2025-08-07 07:03:35 +00:00
[mob][photos] proper logging in ML indexing isolate
This commit is contained in:
parent
1b1f54feb0
commit
a9bc6502cb
@ -1,5 +1,4 @@
|
|||||||
import "dart:async";
|
import "dart:async";
|
||||||
import "dart:developer" as dev show log;
|
|
||||||
import 'dart:typed_data' show ByteData, Float32List;
|
import 'dart:typed_data' show ByteData, Float32List;
|
||||||
import 'dart:ui' as ui show Image;
|
import 'dart:ui' as ui show Image;
|
||||||
|
|
||||||
@ -55,9 +54,8 @@ class FaceDetectionService extends MlModel {
|
|||||||
'sessionAddress should be valid',
|
'sessionAddress should be valid',
|
||||||
);
|
);
|
||||||
|
|
||||||
final stopwatch = Stopwatch()..start();
|
final startTime = DateTime.now();
|
||||||
|
|
||||||
final stopwatchPreprocessing = Stopwatch()..start();
|
|
||||||
final (inputImageList, newSize) =
|
final (inputImageList, newSize) =
|
||||||
await preprocessImageToFloat32ChannelsFirst(
|
await preprocessImageToFloat32ChannelsFirst(
|
||||||
image,
|
image,
|
||||||
@ -67,17 +65,12 @@ class FaceDetectionService extends MlModel {
|
|||||||
requiredHeight: kInputHeight,
|
requiredHeight: kInputHeight,
|
||||||
maintainAspectRatio: true,
|
maintainAspectRatio: true,
|
||||||
);
|
);
|
||||||
stopwatchPreprocessing.stop();
|
final preprocessingTime = DateTime.now();
|
||||||
dev.log(
|
|
||||||
'Face detection image preprocessing is finished, in ${stopwatchPreprocessing.elapsedMilliseconds}ms',
|
|
||||||
);
|
|
||||||
_logger.info(
|
_logger.info(
|
||||||
'Image decoding and preprocessing is finished, in ${stopwatchPreprocessing.elapsedMilliseconds}ms',
|
'Face detection preprocessing is finished, in ${preprocessingTime.difference(startTime).inMilliseconds} ms',
|
||||||
);
|
);
|
||||||
|
|
||||||
// Run inference
|
// Run inference
|
||||||
final stopwatchInterpreter = Stopwatch()..start();
|
|
||||||
|
|
||||||
List<List<List<double>>>? nestedResults = [];
|
List<List<List<double>>>? nestedResults = [];
|
||||||
try {
|
try {
|
||||||
if (MlModel.usePlatformPlugin) {
|
if (MlModel.usePlatformPlugin) {
|
||||||
@ -88,23 +81,16 @@ class FaceDetectionService extends MlModel {
|
|||||||
inputImageList,
|
inputImageList,
|
||||||
); // [1, 25200, 16]
|
); // [1, 25200, 16]
|
||||||
}
|
}
|
||||||
|
_logger.info(
|
||||||
|
'inference is finished, in ${DateTime.now().difference(preprocessingTime).inMilliseconds} ms',
|
||||||
|
);
|
||||||
} catch (e, s) {
|
} catch (e, s) {
|
||||||
dev.log('Error while running inference', error: e, stackTrace: s);
|
_logger.severe('Error while running inference', e, s);
|
||||||
throw YOLOFaceInterpreterRunException();
|
throw YOLOFaceInterpreterRunException();
|
||||||
}
|
}
|
||||||
stopwatchInterpreter.stop();
|
|
||||||
try {
|
try {
|
||||||
_logger.info(
|
|
||||||
'interpreter.run is finished, in ${stopwatchInterpreter.elapsedMilliseconds} ms',
|
|
||||||
);
|
|
||||||
|
|
||||||
final relativeDetections =
|
final relativeDetections =
|
||||||
_yoloPostProcessOutputs(nestedResults!, newSize);
|
_yoloPostProcessOutputs(nestedResults!, newSize);
|
||||||
stopwatch.stop();
|
|
||||||
_logger.info(
|
|
||||||
'predict() face detection executed in ${stopwatch.elapsedMilliseconds}ms',
|
|
||||||
);
|
|
||||||
|
|
||||||
return relativeDetections;
|
return relativeDetections;
|
||||||
} catch (e, s) {
|
} catch (e, s) {
|
||||||
_logger.severe('Error while post processing', e, s);
|
_logger.severe('Error while post processing', e, s);
|
||||||
|
@ -46,9 +46,11 @@ class FaceEmbeddingService extends MlModel {
|
|||||||
} else {
|
} else {
|
||||||
return _runFFIBasedPredict(input, sessionAddress);
|
return _runFFIBasedPredict(input, sessionAddress);
|
||||||
}
|
}
|
||||||
} catch (e) {
|
} catch (e, s) {
|
||||||
_logger.info(
|
_logger.severe(
|
||||||
'MobileFaceNet (PlatformPlugin: $MlModel.usePlatformPlugin)Error while running inference: $e',
|
'Error while running inference (PlatformPlugin: ${MlModel.usePlatformPlugin})',
|
||||||
|
e,
|
||||||
|
s,
|
||||||
);
|
);
|
||||||
throw MobileFaceNetInterpreterRunException();
|
throw MobileFaceNetInterpreterRunException();
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
import "dart:async" show unawaited;
|
import "dart:async" show unawaited;
|
||||||
import "dart:developer" as dev show log;
|
|
||||||
import "dart:typed_data" show ByteData, Float32List;
|
import "dart:typed_data" show ByteData, Float32List;
|
||||||
import "dart:ui" show Image;
|
import "dart:ui" show Image;
|
||||||
|
|
||||||
@ -16,7 +15,7 @@ import "package:photos/services/machine_learning/ml_result.dart";
|
|||||||
import "package:photos/utils/image_ml_util.dart";
|
import "package:photos/utils/image_ml_util.dart";
|
||||||
|
|
||||||
class FaceRecognitionService {
|
class FaceRecognitionService {
|
||||||
final _logger = Logger("FaceRecognitionService");
|
static final _logger = Logger("FaceRecognitionService");
|
||||||
|
|
||||||
// Singleton pattern
|
// Singleton pattern
|
||||||
FaceRecognitionService._privateConstructor();
|
FaceRecognitionService._privateConstructor();
|
||||||
@ -76,8 +75,6 @@ class FaceRecognitionService {
|
|||||||
int faceEmbeddingAddress,
|
int faceEmbeddingAddress,
|
||||||
) async {
|
) async {
|
||||||
final faceResults = <FaceResult>[];
|
final faceResults = <FaceResult>[];
|
||||||
|
|
||||||
final Stopwatch stopwatch = Stopwatch()..start();
|
|
||||||
final startTime = DateTime.now();
|
final startTime = DateTime.now();
|
||||||
|
|
||||||
// Get the faces
|
// Get the faces
|
||||||
@ -89,19 +86,17 @@ class FaceRecognitionService {
|
|||||||
faceDetectionAddress,
|
faceDetectionAddress,
|
||||||
faceResults,
|
faceResults,
|
||||||
);
|
);
|
||||||
dev.log(
|
final detectFacesTime = DateTime.now();
|
||||||
"${faceDetectionResult.length} faces detected with scores ${faceDetectionResult.map((e) => e.score).toList()}: completed `detectFacesSync` function, in "
|
final detectFacesMs = detectFacesTime.difference(startTime).inMilliseconds;
|
||||||
"${stopwatch.elapsedMilliseconds} ms");
|
|
||||||
|
|
||||||
// If no faces were detected, return a result with no faces. Otherwise, continue.
|
// If no faces were detected, return a result with no faces. Otherwise, continue.
|
||||||
if (faceDetectionResult.isEmpty) {
|
if (faceDetectionResult.isEmpty) {
|
||||||
dev.log(
|
_logger.info(
|
||||||
"No faceDetectionResult, Completed analyzing image with uploadedFileID $enteFileID, in "
|
"Finished runFacesPipeline with fileID $enteFileID in $detectFacesMs ms (${faceDetectionResult.length} faces, detectFaces: $detectFacesMs ms)",
|
||||||
"${stopwatch.elapsedMilliseconds} ms");
|
);
|
||||||
return [];
|
return [];
|
||||||
}
|
}
|
||||||
|
|
||||||
stopwatch.reset();
|
|
||||||
// Align the faces
|
// Align the faces
|
||||||
final Float32List faceAlignmentResult = await _alignFacesSync(
|
final Float32List faceAlignmentResult = await _alignFacesSync(
|
||||||
image,
|
image,
|
||||||
@ -109,23 +104,24 @@ class FaceRecognitionService {
|
|||||||
faceDetectionResult,
|
faceDetectionResult,
|
||||||
faceResults,
|
faceResults,
|
||||||
);
|
);
|
||||||
dev.log("Completed `alignFacesSync` function, in "
|
final alignFacesTime = DateTime.now();
|
||||||
"${stopwatch.elapsedMilliseconds} ms");
|
final alignFacesMs =
|
||||||
|
alignFacesTime.difference(detectFacesTime).inMilliseconds;
|
||||||
|
|
||||||
stopwatch.reset();
|
|
||||||
// Get the embeddings of the faces
|
// Get the embeddings of the faces
|
||||||
final embeddings = await _embedFacesSync(
|
await _embedFacesSync(
|
||||||
faceAlignmentResult,
|
faceAlignmentResult,
|
||||||
faceEmbeddingAddress,
|
faceEmbeddingAddress,
|
||||||
faceResults,
|
faceResults,
|
||||||
);
|
);
|
||||||
dev.log("Completed `embedFacesSync` function, in "
|
final embedFacesTime = DateTime.now();
|
||||||
"${stopwatch.elapsedMilliseconds} ms");
|
final embedFacesMs =
|
||||||
stopwatch.stop();
|
embedFacesTime.difference(alignFacesTime).inMilliseconds;
|
||||||
|
final totalMs = DateTime.now().difference(startTime).inMilliseconds;
|
||||||
|
|
||||||
dev.log("Finished faces pipeline (${embeddings.length} faces) with "
|
_logger.info(
|
||||||
"uploadedFileID $enteFileID, in "
|
"Finished runFacesPipeline with fileID $enteFileID in $totalMs ms (${faceDetectionResult.length} faces, detectFaces: $detectFacesMs ms, alignFaces: $alignFacesMs ms, embedFaces: $embedFacesMs ms)",
|
||||||
"${DateTime.now().difference(startTime).inMilliseconds} ms");
|
);
|
||||||
|
|
||||||
return faceResults;
|
return faceResults;
|
||||||
}
|
}
|
||||||
@ -160,8 +156,8 @@ class FaceRecognitionService {
|
|||||||
return faces;
|
return faces;
|
||||||
} on YOLOFaceInterpreterRunException {
|
} on YOLOFaceInterpreterRunException {
|
||||||
throw CouldNotRunFaceDetector();
|
throw CouldNotRunFaceDetector();
|
||||||
} catch (e) {
|
} catch (e, s) {
|
||||||
dev.log('[SEVERE] Face detection failed: $e');
|
_logger.severe('Face detection failed', e, s);
|
||||||
throw GeneralFaceMlException('Face detection failed: $e');
|
throw GeneralFaceMlException('Face detection failed: $e');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -184,6 +180,9 @@ class FaceRecognitionService {
|
|||||||
|
|
||||||
// Store the results
|
// Store the results
|
||||||
if (alignmentResults.length != faces.length) {
|
if (alignmentResults.length != faces.length) {
|
||||||
|
_logger.severe(
|
||||||
|
"The amount of alignment results (${alignmentResults.length}) does not match the number of faces (${faces.length})",
|
||||||
|
);
|
||||||
throw Exception(
|
throw Exception(
|
||||||
"The amount of alignment results (${alignmentResults.length}) does not match the number of faces (${faces.length})",
|
"The amount of alignment results (${alignmentResults.length}) does not match the number of faces (${faces.length})",
|
||||||
);
|
);
|
||||||
@ -195,7 +194,7 @@ class FaceRecognitionService {
|
|||||||
|
|
||||||
return alignedFaces;
|
return alignedFaces;
|
||||||
} catch (e, s) {
|
} catch (e, s) {
|
||||||
dev.log('[SEVERE] Face alignment failed: $e $s');
|
_logger.severe('Face alignment failed: $e $s');
|
||||||
throw CouldNotWarpAffine();
|
throw CouldNotWarpAffine();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -214,6 +213,9 @@ class FaceRecognitionService {
|
|||||||
|
|
||||||
// Store the results
|
// Store the results
|
||||||
if (embeddings.length != faceResults.length) {
|
if (embeddings.length != faceResults.length) {
|
||||||
|
_logger.severe(
|
||||||
|
"The amount of embeddings (${embeddings.length}) does not match the number of faces (${faceResults.length})",
|
||||||
|
);
|
||||||
throw Exception(
|
throw Exception(
|
||||||
"The amount of embeddings (${embeddings.length}) does not match the number of faces (${faceResults.length})",
|
"The amount of embeddings (${embeddings.length}) does not match the number of faces (${faceResults.length})",
|
||||||
);
|
);
|
||||||
@ -225,8 +227,8 @@ class FaceRecognitionService {
|
|||||||
return embeddings;
|
return embeddings;
|
||||||
} on MobileFaceNetInterpreterRunException {
|
} on MobileFaceNetInterpreterRunException {
|
||||||
throw CouldNotRunFaceEmbeddor();
|
throw CouldNotRunFaceEmbeddor();
|
||||||
} catch (e) {
|
} catch (e, s) {
|
||||||
dev.log('[SEVERE] Face embedding (batch) failed: $e');
|
_logger.severe('Face embedding (batch) failed', e, s);
|
||||||
throw GeneralFaceMlException('Face embedding (batch) failed: $e');
|
throw GeneralFaceMlException('Face embedding (batch) failed: $e');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -102,6 +102,7 @@ class MLComputer {
|
|||||||
sendPort.send(true);
|
sendPort.send(true);
|
||||||
break;
|
break;
|
||||||
case MLComputerOperation.runClipText:
|
case MLComputerOperation.runClipText:
|
||||||
|
//TODO:lau check logging here
|
||||||
final textEmbedding = await ClipTextEncoder.predict(args);
|
final textEmbedding = await ClipTextEncoder.predict(args);
|
||||||
sendPort.send(List<double>.from(textEmbedding, growable: false));
|
sendPort.send(List<double>.from(textEmbedding, growable: false));
|
||||||
break;
|
break;
|
||||||
@ -199,7 +200,8 @@ class MLComputer {
|
|||||||
|
|
||||||
// Load ClipText model
|
// Load ClipText model
|
||||||
final String modelName = ClipTextEncoder.instance.modelName;
|
final String modelName = ClipTextEncoder.instance.modelName;
|
||||||
final String? modelPath = await ClipTextEncoder.instance.downloadModelSafe();
|
final String? modelPath =
|
||||||
|
await ClipTextEncoder.instance.downloadModelSafe();
|
||||||
if (modelPath == null) {
|
if (modelPath == null) {
|
||||||
throw Exception("Could not download clip text model, no wifi");
|
throw Exception("Could not download clip text model, no wifi");
|
||||||
}
|
}
|
||||||
|
@ -67,9 +67,7 @@ class MLIndexingIsolate {
|
|||||||
@pragma('vm:entry-point')
|
@pragma('vm:entry-point')
|
||||||
static void _isolateMain(SendPort mainSendPort) async {
|
static void _isolateMain(SendPort mainSendPort) async {
|
||||||
Logger.root.level = kDebugMode ? Level.ALL : Level.INFO;
|
Logger.root.level = kDebugMode ? Level.ALL : Level.INFO;
|
||||||
Logger.root.onRecord.listen((LogRecord rec) {
|
Logger.root.onRecord.listen(SuperLogging.onLogRecord);
|
||||||
debugPrint('[MLIsolate] ${rec.toPrettyString()}');
|
|
||||||
});
|
|
||||||
final receivePort = ReceivePort();
|
final receivePort = ReceivePort();
|
||||||
mainSendPort.send(receivePort.sendPort);
|
mainSendPort.send(receivePort.sendPort);
|
||||||
receivePort.listen((message) async {
|
receivePort.listen((message) async {
|
||||||
@ -81,11 +79,7 @@ class MLIndexingIsolate {
|
|||||||
try {
|
try {
|
||||||
switch (function) {
|
switch (function) {
|
||||||
case MLIndexingOperation.analyzeImage:
|
case MLIndexingOperation.analyzeImage:
|
||||||
final time = DateTime.now();
|
|
||||||
final MLResult result = await analyzeImageStatic(args);
|
final MLResult result = await analyzeImageStatic(args);
|
||||||
_logger.info(
|
|
||||||
"`analyzeImageSync` function executed in ${DateTime.now().difference(time).inMilliseconds} ms",
|
|
||||||
);
|
|
||||||
sendPort.send(result.toJsonString());
|
sendPort.send(result.toJsonString());
|
||||||
break;
|
break;
|
||||||
case MLIndexingOperation.loadModels:
|
case MLIndexingOperation.loadModels:
|
||||||
@ -93,6 +87,7 @@ class MLIndexingIsolate {
|
|||||||
final modelPaths = args['modelPaths'] as List<String>;
|
final modelPaths = args['modelPaths'] as List<String>;
|
||||||
final addresses = <int>[];
|
final addresses = <int>[];
|
||||||
for (int i = 0; i < modelNames.length; i++) {
|
for (int i = 0; i < modelNames.length; i++) {
|
||||||
|
// TODO:lau check logging here
|
||||||
final int address = await MlModel.loadModel(
|
final int address = await MlModel.loadModel(
|
||||||
modelNames[i],
|
modelNames[i],
|
||||||
modelPaths[i],
|
modelPaths[i],
|
||||||
@ -102,6 +97,7 @@ class MLIndexingIsolate {
|
|||||||
sendPort.send(List<int>.from(addresses, growable: false));
|
sendPort.send(List<int>.from(addresses, growable: false));
|
||||||
break;
|
break;
|
||||||
case MLIndexingOperation.releaseModels:
|
case MLIndexingOperation.releaseModels:
|
||||||
|
// TODO:lau check logging here
|
||||||
final modelNames = args['modelNames'] as List<String>;
|
final modelNames = args['modelNames'] as List<String>;
|
||||||
final modelAddresses = args['modelAddresses'] as List<int>;
|
final modelAddresses = args['modelAddresses'] as List<int>;
|
||||||
for (int i = 0; i < modelNames.length; i++) {
|
for (int i = 0; i < modelNames.length; i++) {
|
||||||
|
@ -31,14 +31,27 @@ class ClipImageEncoder extends MlModel {
|
|||||||
static Future<List<double>> predict(
|
static Future<List<double>> predict(
|
||||||
Image image,
|
Image image,
|
||||||
ByteData imageByteData,
|
ByteData imageByteData,
|
||||||
int sessionAddress,
|
int sessionAddress, [
|
||||||
) async {
|
int? enteFileID,
|
||||||
|
]) async {
|
||||||
|
final startTime = DateTime.now();
|
||||||
final inputList = await preprocessImageClip(image, imageByteData);
|
final inputList = await preprocessImageClip(image, imageByteData);
|
||||||
|
final preprocessingTime = DateTime.now();
|
||||||
|
final preprocessingMs =
|
||||||
|
preprocessingTime.difference(startTime).inMilliseconds;
|
||||||
|
late List<double> result;
|
||||||
if (MlModel.usePlatformPlugin) {
|
if (MlModel.usePlatformPlugin) {
|
||||||
return await _runPlatformPluginPredict(inputList);
|
result = await _runPlatformPluginPredict(inputList);
|
||||||
} else {
|
} else {
|
||||||
return _runFFIBasedPredict(inputList, sessionAddress);
|
result = _runFFIBasedPredict(inputList, sessionAddress);
|
||||||
}
|
}
|
||||||
|
final inferTime = DateTime.now();
|
||||||
|
final inferenceMs = inferTime.difference(preprocessingTime).inMilliseconds;
|
||||||
|
final totalMs = inferTime.difference(startTime).inMilliseconds;
|
||||||
|
_logger.info(
|
||||||
|
"Clip predict took $totalMs ms${enteFileID != null ? " with fileID $enteFileID" : ""} (nference: $inferenceMs ms, preprocessing: $preprocessingMs ms)",
|
||||||
|
);
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static List<double> _runFFIBasedPredict(
|
static List<double> _runFFIBasedPredict(
|
||||||
|
@ -24,7 +24,7 @@ import "package:photos/services/machine_learning/semantic_search/clip/clip_image
|
|||||||
import "package:shared_preferences/shared_preferences.dart";
|
import "package:shared_preferences/shared_preferences.dart";
|
||||||
|
|
||||||
class SemanticSearchService {
|
class SemanticSearchService {
|
||||||
final _logger = Logger("SemanticSearchService");
|
static final _logger = Logger("SemanticSearchService");
|
||||||
SemanticSearchService._privateConstructor();
|
SemanticSearchService._privateConstructor();
|
||||||
|
|
||||||
static final SemanticSearchService instance =
|
static final SemanticSearchService instance =
|
||||||
@ -293,18 +293,15 @@ class SemanticSearchService {
|
|||||||
ByteData imageByteData,
|
ByteData imageByteData,
|
||||||
int clipImageAddress,
|
int clipImageAddress,
|
||||||
) async {
|
) async {
|
||||||
final startTime = DateTime.now();
|
|
||||||
final embedding = await ClipImageEncoder.predict(
|
final embedding = await ClipImageEncoder.predict(
|
||||||
image,
|
image,
|
||||||
imageByteData,
|
imageByteData,
|
||||||
clipImageAddress,
|
clipImageAddress,
|
||||||
|
enteFileID,
|
||||||
);
|
);
|
||||||
|
|
||||||
final clipResult = ClipResult(fileID: enteFileID, embedding: embedding);
|
final clipResult = ClipResult(fileID: enteFileID, embedding: embedding);
|
||||||
|
|
||||||
dev.log('Finished running ClipImage for $enteFileID in '
|
|
||||||
'${DateTime.now().difference(startTime).inMilliseconds} ms');
|
|
||||||
|
|
||||||
return clipResult;
|
return clipResult;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,9 @@ Future<(Image, ByteData)> decodeImageFromPath(String imagePath) async {
|
|||||||
await HeifConverter.convert(imagePath, format: 'jpg');
|
await HeifConverter.convert(imagePath, format: 'jpg');
|
||||||
if (jpgPath == null) {
|
if (jpgPath == null) {
|
||||||
_logger.severe('Error converting $format to jpg:', e, s);
|
_logger.severe('Error converting $format to jpg:', e, s);
|
||||||
throw Exception('InvalidImageFormatException: Error decoding image of format $format');
|
throw Exception(
|
||||||
|
'InvalidImageFormatException: Error decoding image of format $format',
|
||||||
|
);
|
||||||
}
|
}
|
||||||
final imageData = await File(jpgPath).readAsBytes();
|
final imageData = await File(jpgPath).readAsBytes();
|
||||||
final image = await decodeImageFromData(imageData);
|
final image = await decodeImageFromData(imageData);
|
||||||
@ -47,7 +49,9 @@ Future<(Image, ByteData)> decodeImageFromPath(String imagePath) async {
|
|||||||
e,
|
e,
|
||||||
s,
|
s,
|
||||||
);
|
);
|
||||||
throw Exception('InvalidImageFormatException: Error decoding image of format $format');
|
throw Exception(
|
||||||
|
'InvalidImageFormatException: Error decoding image of format $format',
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -278,7 +282,9 @@ Future<(Float32List, List<AlignmentResult>, List<bool>, List<double>, Size)>
|
|||||||
final (alignmentResult, correctlyEstimated) =
|
final (alignmentResult, correctlyEstimated) =
|
||||||
SimilarityTransform.estimate(face.allKeypoints);
|
SimilarityTransform.estimate(face.allKeypoints);
|
||||||
if (!correctlyEstimated) {
|
if (!correctlyEstimated) {
|
||||||
log('Face alignment failed because not able to estimate SimilarityTransform, for face: $face');
|
_logger.severe(
|
||||||
|
'Face alignment failed because not able to estimate SimilarityTransform, for face: $face',
|
||||||
|
);
|
||||||
throw Exception(
|
throw Exception(
|
||||||
'Face alignment failed because not able to estimate SimilarityTransform',
|
'Face alignment failed because not able to estimate SimilarityTransform',
|
||||||
);
|
);
|
||||||
|
@ -388,18 +388,20 @@ Future<MLResult> analyzeImageStatic(Map args) async {
|
|||||||
final int clipImageAddress = args["clipImageAddress"] as int;
|
final int clipImageAddress = args["clipImageAddress"] as int;
|
||||||
|
|
||||||
_logger.info(
|
_logger.info(
|
||||||
"Start analyzing image with uploadedFileID: $enteFileID inside the isolate",
|
"Start analyzeImageStatic with fileID: $enteFileID (runFaces: $runFaces, runClip: $runClip)",
|
||||||
);
|
);
|
||||||
final time = DateTime.now();
|
final startTime = DateTime.now();
|
||||||
|
|
||||||
// Decode the image once to use for both face detection and alignment
|
// Decode the image once to use for both face detection and alignment
|
||||||
final (image, imageByteData) = await decodeImageFromPath(imagePath);
|
final (image, imageByteData) = await decodeImageFromPath(imagePath);
|
||||||
_logger.info('Reading and decoding image took '
|
|
||||||
'${DateTime.now().difference(time).inMilliseconds} ms');
|
|
||||||
final decodedImageSize =
|
final decodedImageSize =
|
||||||
Dimensions(height: image.height, width: image.width);
|
Dimensions(height: image.height, width: image.width);
|
||||||
final result = MLResult.fromEnteFileID(enteFileID);
|
final result = MLResult.fromEnteFileID(enteFileID);
|
||||||
result.decodedImageSize = decodedImageSize;
|
result.decodedImageSize = decodedImageSize;
|
||||||
|
final decodeTime = DateTime.now();
|
||||||
|
_logger.info(
|
||||||
|
'Reading and decoding image took ${decodeTime.difference(startTime).inMilliseconds} ms (fileID: $enteFileID)',
|
||||||
|
);
|
||||||
|
|
||||||
if (runFaces) {
|
if (runFaces) {
|
||||||
final resultFaces = await FaceRecognitionService.runFacesPipeline(
|
final resultFaces = await FaceRecognitionService.runFacesPipeline(
|
||||||
@ -426,6 +428,11 @@ Future<MLResult> analyzeImageStatic(Map args) async {
|
|||||||
result.clip = clipResult;
|
result.clip = clipResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
final endTime = DateTime.now();
|
||||||
|
_logger.info(
|
||||||
|
'Finished analyzeImageStatic with fileID: $enteFileID, in ${endTime.difference(startTime).inMilliseconds} ms',
|
||||||
|
);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
} catch (e, s) {
|
} catch (e, s) {
|
||||||
_logger.severe("Could not analyze image", e, s);
|
_logger.severe("Could not analyze image", e, s);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user