diff --git a/mobile/lib/services/machine_learning/face_ml/face_detection/face_detection_service.dart b/mobile/lib/services/machine_learning/face_ml/face_detection/face_detection_service.dart index c232317b3a..728de95dae 100644 --- a/mobile/lib/services/machine_learning/face_ml/face_detection/face_detection_service.dart +++ b/mobile/lib/services/machine_learning/face_ml/face_detection/face_detection_service.dart @@ -1,5 +1,4 @@ import "dart:async"; -import "dart:developer" as dev show log; import 'dart:typed_data' show ByteData, Float32List; import 'dart:ui' as ui show Image; @@ -55,9 +54,8 @@ class FaceDetectionService extends MlModel { 'sessionAddress should be valid', ); - final stopwatch = Stopwatch()..start(); + final startTime = DateTime.now(); - final stopwatchPreprocessing = Stopwatch()..start(); final (inputImageList, newSize) = await preprocessImageToFloat32ChannelsFirst( image, @@ -67,17 +65,12 @@ class FaceDetectionService extends MlModel { requiredHeight: kInputHeight, maintainAspectRatio: true, ); - stopwatchPreprocessing.stop(); - dev.log( - 'Face detection image preprocessing is finished, in ${stopwatchPreprocessing.elapsedMilliseconds}ms', - ); + final preprocessingTime = DateTime.now(); _logger.info( - 'Image decoding and preprocessing is finished, in ${stopwatchPreprocessing.elapsedMilliseconds}ms', + 'Face detection preprocessing is finished, in ${preprocessingTime.difference(startTime).inMilliseconds} ms', ); // Run inference - final stopwatchInterpreter = Stopwatch()..start(); - List>>? nestedResults = []; try { if (MlModel.usePlatformPlugin) { @@ -88,23 +81,16 @@ class FaceDetectionService extends MlModel { inputImageList, ); // [1, 25200, 16] } + _logger.info( + 'inference is finished, in ${DateTime.now().difference(preprocessingTime).inMilliseconds} ms', + ); } catch (e, s) { - dev.log('Error while running inference', error: e, stackTrace: s); + _logger.severe('Error while running inference', e, s); throw YOLOFaceInterpreterRunException(); } - stopwatchInterpreter.stop(); try { - _logger.info( - 'interpreter.run is finished, in ${stopwatchInterpreter.elapsedMilliseconds} ms', - ); - final relativeDetections = _yoloPostProcessOutputs(nestedResults!, newSize); - stopwatch.stop(); - _logger.info( - 'predict() face detection executed in ${stopwatch.elapsedMilliseconds}ms', - ); - return relativeDetections; } catch (e, s) { _logger.severe('Error while post processing', e, s); diff --git a/mobile/lib/services/machine_learning/face_ml/face_embedding/face_embedding_service.dart b/mobile/lib/services/machine_learning/face_ml/face_embedding/face_embedding_service.dart index f36a69c752..0853112c44 100644 --- a/mobile/lib/services/machine_learning/face_ml/face_embedding/face_embedding_service.dart +++ b/mobile/lib/services/machine_learning/face_ml/face_embedding/face_embedding_service.dart @@ -46,9 +46,11 @@ class FaceEmbeddingService extends MlModel { } else { return _runFFIBasedPredict(input, sessionAddress); } - } catch (e) { - _logger.info( - 'MobileFaceNet (PlatformPlugin: $MlModel.usePlatformPlugin)Error while running inference: $e', + } catch (e, s) { + _logger.severe( + 'Error while running inference (PlatformPlugin: ${MlModel.usePlatformPlugin})', + e, + s, ); throw MobileFaceNetInterpreterRunException(); } diff --git a/mobile/lib/services/machine_learning/face_ml/face_recognition_service.dart b/mobile/lib/services/machine_learning/face_ml/face_recognition_service.dart index 8b3bb7eda1..22e3019517 100644 --- a/mobile/lib/services/machine_learning/face_ml/face_recognition_service.dart +++ b/mobile/lib/services/machine_learning/face_ml/face_recognition_service.dart @@ -1,5 +1,4 @@ import "dart:async" show unawaited; -import "dart:developer" as dev show log; import "dart:typed_data" show ByteData, Float32List; import "dart:ui" show Image; @@ -16,7 +15,7 @@ import "package:photos/services/machine_learning/ml_result.dart"; import "package:photos/utils/image_ml_util.dart"; class FaceRecognitionService { - final _logger = Logger("FaceRecognitionService"); + static final _logger = Logger("FaceRecognitionService"); // Singleton pattern FaceRecognitionService._privateConstructor(); @@ -76,8 +75,6 @@ class FaceRecognitionService { int faceEmbeddingAddress, ) async { final faceResults = []; - - final Stopwatch stopwatch = Stopwatch()..start(); final startTime = DateTime.now(); // Get the faces @@ -89,19 +86,17 @@ class FaceRecognitionService { faceDetectionAddress, faceResults, ); - dev.log( - "${faceDetectionResult.length} faces detected with scores ${faceDetectionResult.map((e) => e.score).toList()}: completed `detectFacesSync` function, in " - "${stopwatch.elapsedMilliseconds} ms"); + final detectFacesTime = DateTime.now(); + final detectFacesMs = detectFacesTime.difference(startTime).inMilliseconds; // If no faces were detected, return a result with no faces. Otherwise, continue. if (faceDetectionResult.isEmpty) { - dev.log( - "No faceDetectionResult, Completed analyzing image with uploadedFileID $enteFileID, in " - "${stopwatch.elapsedMilliseconds} ms"); + _logger.info( + "Finished runFacesPipeline with fileID $enteFileID in $detectFacesMs ms (${faceDetectionResult.length} faces, detectFaces: $detectFacesMs ms)", + ); return []; } - stopwatch.reset(); // Align the faces final Float32List faceAlignmentResult = await _alignFacesSync( image, @@ -109,23 +104,24 @@ class FaceRecognitionService { faceDetectionResult, faceResults, ); - dev.log("Completed `alignFacesSync` function, in " - "${stopwatch.elapsedMilliseconds} ms"); + final alignFacesTime = DateTime.now(); + final alignFacesMs = + alignFacesTime.difference(detectFacesTime).inMilliseconds; - stopwatch.reset(); // Get the embeddings of the faces - final embeddings = await _embedFacesSync( + await _embedFacesSync( faceAlignmentResult, faceEmbeddingAddress, faceResults, ); - dev.log("Completed `embedFacesSync` function, in " - "${stopwatch.elapsedMilliseconds} ms"); - stopwatch.stop(); + final embedFacesTime = DateTime.now(); + final embedFacesMs = + embedFacesTime.difference(alignFacesTime).inMilliseconds; + final totalMs = DateTime.now().difference(startTime).inMilliseconds; - dev.log("Finished faces pipeline (${embeddings.length} faces) with " - "uploadedFileID $enteFileID, in " - "${DateTime.now().difference(startTime).inMilliseconds} ms"); + _logger.info( + "Finished runFacesPipeline with fileID $enteFileID in $totalMs ms (${faceDetectionResult.length} faces, detectFaces: $detectFacesMs ms, alignFaces: $alignFacesMs ms, embedFaces: $embedFacesMs ms)", + ); return faceResults; } @@ -160,8 +156,8 @@ class FaceRecognitionService { return faces; } on YOLOFaceInterpreterRunException { throw CouldNotRunFaceDetector(); - } catch (e) { - dev.log('[SEVERE] Face detection failed: $e'); + } catch (e, s) { + _logger.severe('Face detection failed', e, s); throw GeneralFaceMlException('Face detection failed: $e'); } } @@ -184,6 +180,9 @@ class FaceRecognitionService { // Store the results if (alignmentResults.length != faces.length) { + _logger.severe( + "The amount of alignment results (${alignmentResults.length}) does not match the number of faces (${faces.length})", + ); throw Exception( "The amount of alignment results (${alignmentResults.length}) does not match the number of faces (${faces.length})", ); @@ -195,7 +194,7 @@ class FaceRecognitionService { return alignedFaces; } catch (e, s) { - dev.log('[SEVERE] Face alignment failed: $e $s'); + _logger.severe('Face alignment failed: $e $s'); throw CouldNotWarpAffine(); } } @@ -214,6 +213,9 @@ class FaceRecognitionService { // Store the results if (embeddings.length != faceResults.length) { + _logger.severe( + "The amount of embeddings (${embeddings.length}) does not match the number of faces (${faceResults.length})", + ); throw Exception( "The amount of embeddings (${embeddings.length}) does not match the number of faces (${faceResults.length})", ); @@ -225,8 +227,8 @@ class FaceRecognitionService { return embeddings; } on MobileFaceNetInterpreterRunException { throw CouldNotRunFaceEmbeddor(); - } catch (e) { - dev.log('[SEVERE] Face embedding (batch) failed: $e'); + } catch (e, s) { + _logger.severe('Face embedding (batch) failed', e, s); throw GeneralFaceMlException('Face embedding (batch) failed: $e'); } } diff --git a/mobile/lib/services/machine_learning/ml_computer.dart b/mobile/lib/services/machine_learning/ml_computer.dart index 8f27dfb3d4..78743bbd84 100644 --- a/mobile/lib/services/machine_learning/ml_computer.dart +++ b/mobile/lib/services/machine_learning/ml_computer.dart @@ -102,6 +102,7 @@ class MLComputer { sendPort.send(true); break; case MLComputerOperation.runClipText: + //TODO:lau check logging here final textEmbedding = await ClipTextEncoder.predict(args); sendPort.send(List.from(textEmbedding, growable: false)); break; @@ -199,7 +200,8 @@ class MLComputer { // Load ClipText model final String modelName = ClipTextEncoder.instance.modelName; - final String? modelPath = await ClipTextEncoder.instance.downloadModelSafe(); + final String? modelPath = + await ClipTextEncoder.instance.downloadModelSafe(); if (modelPath == null) { throw Exception("Could not download clip text model, no wifi"); } diff --git a/mobile/lib/services/machine_learning/ml_indexing_isolate.dart b/mobile/lib/services/machine_learning/ml_indexing_isolate.dart index fec1cbf410..66772dd40b 100644 --- a/mobile/lib/services/machine_learning/ml_indexing_isolate.dart +++ b/mobile/lib/services/machine_learning/ml_indexing_isolate.dart @@ -67,9 +67,7 @@ class MLIndexingIsolate { @pragma('vm:entry-point') static void _isolateMain(SendPort mainSendPort) async { Logger.root.level = kDebugMode ? Level.ALL : Level.INFO; - Logger.root.onRecord.listen((LogRecord rec) { - debugPrint('[MLIsolate] ${rec.toPrettyString()}'); - }); + Logger.root.onRecord.listen(SuperLogging.onLogRecord); final receivePort = ReceivePort(); mainSendPort.send(receivePort.sendPort); receivePort.listen((message) async { @@ -81,11 +79,7 @@ class MLIndexingIsolate { try { switch (function) { case MLIndexingOperation.analyzeImage: - final time = DateTime.now(); final MLResult result = await analyzeImageStatic(args); - _logger.info( - "`analyzeImageSync` function executed in ${DateTime.now().difference(time).inMilliseconds} ms", - ); sendPort.send(result.toJsonString()); break; case MLIndexingOperation.loadModels: @@ -93,6 +87,7 @@ class MLIndexingIsolate { final modelPaths = args['modelPaths'] as List; final addresses = []; for (int i = 0; i < modelNames.length; i++) { + // TODO:lau check logging here final int address = await MlModel.loadModel( modelNames[i], modelPaths[i], @@ -102,6 +97,7 @@ class MLIndexingIsolate { sendPort.send(List.from(addresses, growable: false)); break; case MLIndexingOperation.releaseModels: + // TODO:lau check logging here final modelNames = args['modelNames'] as List; final modelAddresses = args['modelAddresses'] as List; for (int i = 0; i < modelNames.length; i++) { diff --git a/mobile/lib/services/machine_learning/semantic_search/clip/clip_image_encoder.dart b/mobile/lib/services/machine_learning/semantic_search/clip/clip_image_encoder.dart index 1b6e8a9ebe..193e9bac14 100644 --- a/mobile/lib/services/machine_learning/semantic_search/clip/clip_image_encoder.dart +++ b/mobile/lib/services/machine_learning/semantic_search/clip/clip_image_encoder.dart @@ -31,14 +31,27 @@ class ClipImageEncoder extends MlModel { static Future> predict( Image image, ByteData imageByteData, - int sessionAddress, - ) async { + int sessionAddress, [ + int? enteFileID, + ]) async { + final startTime = DateTime.now(); final inputList = await preprocessImageClip(image, imageByteData); + final preprocessingTime = DateTime.now(); + final preprocessingMs = + preprocessingTime.difference(startTime).inMilliseconds; + late List result; if (MlModel.usePlatformPlugin) { - return await _runPlatformPluginPredict(inputList); + result = await _runPlatformPluginPredict(inputList); } else { - return _runFFIBasedPredict(inputList, sessionAddress); + result = _runFFIBasedPredict(inputList, sessionAddress); } + final inferTime = DateTime.now(); + final inferenceMs = inferTime.difference(preprocessingTime).inMilliseconds; + final totalMs = inferTime.difference(startTime).inMilliseconds; + _logger.info( + "Clip predict took $totalMs ms${enteFileID != null ? " with fileID $enteFileID" : ""} (nference: $inferenceMs ms, preprocessing: $preprocessingMs ms)", + ); + return result; } static List _runFFIBasedPredict( diff --git a/mobile/lib/services/machine_learning/semantic_search/semantic_search_service.dart b/mobile/lib/services/machine_learning/semantic_search/semantic_search_service.dart index 7beb284df7..067259307f 100644 --- a/mobile/lib/services/machine_learning/semantic_search/semantic_search_service.dart +++ b/mobile/lib/services/machine_learning/semantic_search/semantic_search_service.dart @@ -24,7 +24,7 @@ import "package:photos/services/machine_learning/semantic_search/clip/clip_image import "package:shared_preferences/shared_preferences.dart"; class SemanticSearchService { - final _logger = Logger("SemanticSearchService"); + static final _logger = Logger("SemanticSearchService"); SemanticSearchService._privateConstructor(); static final SemanticSearchService instance = @@ -293,18 +293,15 @@ class SemanticSearchService { ByteData imageByteData, int clipImageAddress, ) async { - final startTime = DateTime.now(); final embedding = await ClipImageEncoder.predict( image, imageByteData, clipImageAddress, + enteFileID, ); final clipResult = ClipResult(fileID: enteFileID, embedding: embedding); - dev.log('Finished running ClipImage for $enteFileID in ' - '${DateTime.now().difference(startTime).inMilliseconds} ms'); - return clipResult; } } diff --git a/mobile/lib/utils/image_ml_util.dart b/mobile/lib/utils/image_ml_util.dart index e8eb3e7312..0de42f0c98 100644 --- a/mobile/lib/utils/image_ml_util.dart +++ b/mobile/lib/utils/image_ml_util.dart @@ -35,7 +35,9 @@ Future<(Image, ByteData)> decodeImageFromPath(String imagePath) async { await HeifConverter.convert(imagePath, format: 'jpg'); if (jpgPath == null) { _logger.severe('Error converting $format to jpg:', e, s); - throw Exception('InvalidImageFormatException: Error decoding image of format $format'); + throw Exception( + 'InvalidImageFormatException: Error decoding image of format $format', + ); } final imageData = await File(jpgPath).readAsBytes(); final image = await decodeImageFromData(imageData); @@ -47,7 +49,9 @@ Future<(Image, ByteData)> decodeImageFromPath(String imagePath) async { e, s, ); - throw Exception('InvalidImageFormatException: Error decoding image of format $format'); + throw Exception( + 'InvalidImageFormatException: Error decoding image of format $format', + ); } } } @@ -278,7 +282,9 @@ Future<(Float32List, List, List, List, Size)> final (alignmentResult, correctlyEstimated) = SimilarityTransform.estimate(face.allKeypoints); if (!correctlyEstimated) { - log('Face alignment failed because not able to estimate SimilarityTransform, for face: $face'); + _logger.severe( + 'Face alignment failed because not able to estimate SimilarityTransform, for face: $face', + ); throw Exception( 'Face alignment failed because not able to estimate SimilarityTransform', ); diff --git a/mobile/lib/utils/ml_util.dart b/mobile/lib/utils/ml_util.dart index 01ddd353a9..817c0bd4ea 100644 --- a/mobile/lib/utils/ml_util.dart +++ b/mobile/lib/utils/ml_util.dart @@ -388,18 +388,20 @@ Future analyzeImageStatic(Map args) async { final int clipImageAddress = args["clipImageAddress"] as int; _logger.info( - "Start analyzing image with uploadedFileID: $enteFileID inside the isolate", + "Start analyzeImageStatic with fileID: $enteFileID (runFaces: $runFaces, runClip: $runClip)", ); - final time = DateTime.now(); + final startTime = DateTime.now(); // Decode the image once to use for both face detection and alignment final (image, imageByteData) = await decodeImageFromPath(imagePath); - _logger.info('Reading and decoding image took ' - '${DateTime.now().difference(time).inMilliseconds} ms'); final decodedImageSize = Dimensions(height: image.height, width: image.width); final result = MLResult.fromEnteFileID(enteFileID); result.decodedImageSize = decodedImageSize; + final decodeTime = DateTime.now(); + _logger.info( + 'Reading and decoding image took ${decodeTime.difference(startTime).inMilliseconds} ms (fileID: $enteFileID)', + ); if (runFaces) { final resultFaces = await FaceRecognitionService.runFacesPipeline( @@ -426,6 +428,11 @@ Future analyzeImageStatic(Map args) async { result.clip = clipResult; } + final endTime = DateTime.now(); + _logger.info( + 'Finished analyzeImageStatic with fileID: $enteFileID, in ${endTime.difference(startTime).inMilliseconds} ms', + ); + return result; } catch (e, s) { _logger.severe("Could not analyze image", e, s);