1390 lines
44 KiB
Plaintext
1390 lines
44 KiB
Plaintext
import 'dart:io';
|
|
import 'dart:math' as Math;
|
|
|
|
import 'package:camera/camera.dart';
|
|
import 'package:flutter/material.dart';
|
|
import 'package:flutter/services.dart';
|
|
import 'package:get/get.dart';
|
|
import 'package:google_mlkit_face_detection/google_mlkit_face_detection.dart';
|
|
import 'package:google_mlkit_face_mesh_detection/google_mlkit_face_mesh_detection.dart';
|
|
import 'package:sigap/src/features/auth/data/models/face_model.dart';
|
|
import 'package:sigap/src/features/auth/presentasion/controllers/selfie-verification/selfie_verification_controller.dart';
|
|
import 'package:uuid/uuid.dart';
|
|
|
|
enum LivenessStatus {
|
|
preparing,
|
|
detectingFace,
|
|
checkLeftRotation,
|
|
checkRightRotation,
|
|
checkSmile,
|
|
checkEyesOpen,
|
|
readyForPhoto,
|
|
photoTaken,
|
|
completed,
|
|
failed,
|
|
}
|
|
|
|
class FaceLivenessController extends GetxController
|
|
with WidgetsBindingObserver {
|
|
// Camera
|
|
CameraController? _cameraController;
|
|
late FaceDetector _faceDetector;
|
|
var frontCamera;
|
|
|
|
// Face Detection States
|
|
final _isFaceInFrame = false.obs;
|
|
final _isFaceLeft = false.obs;
|
|
final _isFaceRight = false.obs;
|
|
final _isEyeOpen = false.obs;
|
|
final _isNoFace = false.obs;
|
|
final _isMultiFace = false.obs;
|
|
final _isCaptured = false.obs;
|
|
final _isSmiled = false.obs;
|
|
final _isFaceReadyForPhoto = false.obs;
|
|
final _isDifferentPerson = false.obs;
|
|
|
|
// Status tracking
|
|
final Rx<LivenessStatus> status = Rx<LivenessStatus>(
|
|
LivenessStatus.preparing,
|
|
);
|
|
final RxString currentInstruction = RxString('Initializing camera...');
|
|
|
|
// Getters
|
|
bool get isFaceInFrame => _isFaceInFrame.value;
|
|
bool get isFaceLeft => _isFaceLeft.value;
|
|
bool get isFaceRight => _isFaceRight.value;
|
|
bool get isEyeOpen => _isEyeOpen.value;
|
|
bool get isNoFace => _isNoFace.value;
|
|
bool get isMultiFace => _isMultiFace.value;
|
|
bool get isCaptured => _isCaptured.value;
|
|
bool get isSmiled => _isSmiled.value;
|
|
bool get isFaceReadyForPhoto => _isFaceReadyForPhoto.value;
|
|
bool get isDifferentPerson => _isDifferentPerson.value;
|
|
|
|
CameraController? get cameraController => _cameraController;
|
|
|
|
// Face Mesh Detector
|
|
final FaceMeshDetector _faceMeshDetector = FaceMeshDetector(
|
|
option: FaceMeshDetectorOptions.faceMesh,
|
|
);
|
|
|
|
// Face Comparison
|
|
List<double>? _firstPersonEmbedding;
|
|
|
|
// Captured Image
|
|
final _capturedImage = Rxn<XFile>();
|
|
XFile? get capturedImage => _capturedImage.value;
|
|
|
|
// Successful Steps
|
|
final _successfulSteps = <String>[].obs;
|
|
List<String> get successfulSteps => _successfulSteps;
|
|
|
|
// Face Detector Options
|
|
final FaceDetectorOptions options = FaceDetectorOptions(
|
|
performanceMode:
|
|
Platform.isAndroid ? FaceDetectorMode.fast : FaceDetectorMode.accurate,
|
|
enableClassification: true,
|
|
enableLandmarks: true,
|
|
enableTracking: true,
|
|
);
|
|
|
|
// Device Orientations
|
|
final orientations = {
|
|
DeviceOrientation.portraitUp: 0,
|
|
DeviceOrientation.landscapeLeft: 90,
|
|
DeviceOrientation.portraitDown: 180,
|
|
DeviceOrientation.landscapeRight: 270,
|
|
};
|
|
|
|
@override
|
|
void onInit() {
|
|
super.onInit();
|
|
WidgetsBinding.instance.addObserver(this);
|
|
|
|
// Lock orientation to portrait mode
|
|
_lockDeviceOrientation();
|
|
|
|
// Cek capabilities dulu
|
|
_checkCameraCapabilities();
|
|
|
|
// Coba inisialisasi dengan fallback
|
|
_initializeCameraWithFallback().catchError((e) {
|
|
print("❌ All camera initialization attempts failed: $e");
|
|
status.value = LivenessStatus.failed;
|
|
currentInstruction.value =
|
|
'Camera initialization failed. Please restart the app.';
|
|
});
|
|
|
|
_faceDetector = FaceDetector(options: options);
|
|
}
|
|
|
|
// 1. UBAH KONFIGURASI CAMERA CONTROLLER
|
|
Future<void> _initializeCamera() async {
|
|
try {
|
|
status.value = LivenessStatus.preparing;
|
|
currentInstruction.value = 'Initializing camera...';
|
|
|
|
final cameras = await availableCameras();
|
|
final frontCameras = cameras.firstWhere(
|
|
(camera) => camera.lensDirection == CameraLensDirection.front,
|
|
);
|
|
|
|
frontCamera = frontCameras;
|
|
|
|
// ✅ PERBAIKAN UTAMA: Gunakan format yang didukung ML Kit
|
|
_cameraController = CameraController(
|
|
frontCamera,
|
|
ResolutionPreset.medium, // Naikkan dari low ke medium
|
|
imageFormatGroup:
|
|
Platform.isAndroid
|
|
? ImageFormatGroup
|
|
.nv21 // ✅ PENTING: Gunakan NV21 untuk Android
|
|
: ImageFormatGroup.bgra8888, // iOS tetap BGRA8888
|
|
enableAudio: false,
|
|
);
|
|
|
|
await _cameraController!.initialize();
|
|
|
|
// Tambahkan delay untuk stabilisasi
|
|
await Future.delayed(Duration(milliseconds: 1000));
|
|
|
|
await _cameraController!.setFlashMode(FlashMode.off);
|
|
await _cameraController!.setFocusMode(FocusMode.auto);
|
|
await _cameraController!.setExposureMode(ExposureMode.auto);
|
|
|
|
print(
|
|
"Camera initialized with resolution: ${_cameraController!.value.previewSize}",
|
|
);
|
|
|
|
_cameraController!.startImageStream((CameraImage img) {
|
|
_processCameraImage(img);
|
|
});
|
|
|
|
status.value = LivenessStatus.detectingFace;
|
|
currentInstruction.value = 'Position your face in the frame';
|
|
update();
|
|
} catch (e) {
|
|
print('Error initializing camera: $e');
|
|
status.value = LivenessStatus.failed;
|
|
currentInstruction.value = 'Failed to initialize camera: $e';
|
|
}
|
|
}
|
|
|
|
// Throttling flag to control processing rate
|
|
bool _throttled = false;
|
|
|
|
// Hapus batasan frame untuk deteksi wajah - selalu anggap wajah dalam frame
|
|
void _handleFaceDetection(Face face) {
|
|
if (!_isCaptured.value) {
|
|
final double? rotY = face.headEulerAngleY;
|
|
final double leftEyeOpen = face.leftEyeOpenProbability ?? -1.0;
|
|
final double rightEyeOpen = face.rightEyeOpenProbability ?? -1.0;
|
|
final double smileProb = face.smilingProbability ?? -1.0;
|
|
|
|
print("HEAD DETECTION - Head angle: $rotY");
|
|
print(
|
|
"HEAD DETECTION - Eyes: L=$leftEyeOpen, R=$rightEyeOpen, Smile=$smileProb",
|
|
);
|
|
|
|
// Selalu anggap wajah sudah dalam frame jika wajah terdeteksi
|
|
_updateFaceInFrameStatus();
|
|
|
|
// Print detection state
|
|
print("Detection state - Face in frame: ${_isFaceInFrame.value}");
|
|
print("Detection state - Face left: ${_isFaceLeft.value}");
|
|
print("Detection state - Face right: ${_isFaceRight.value}");
|
|
print("Detection state - Smiled: ${_isSmiled.value}");
|
|
print("Detection state - Eyes open: ${_isEyeOpen.value}");
|
|
print("Detection state - Ready for photo: ${_isFaceReadyForPhoto.value}");
|
|
|
|
_updateHeadRotationStatus(rotY);
|
|
_updateSmilingStatus(smileProb);
|
|
_updateEyeOpenStatus(leftEyeOpen, rightEyeOpen);
|
|
_updateFaceInFrameForPhotoStatus(rotY, smileProb);
|
|
|
|
// Log status updates
|
|
print("Updated status: ${status.value}");
|
|
print("Current instruction: ${currentInstruction.value}");
|
|
|
|
if (_isFaceInFrame.value &&
|
|
_isFaceLeft.value &&
|
|
_isFaceRight.value &&
|
|
_isSmiled.value &&
|
|
_isFaceReadyForPhoto.value &&
|
|
_isEyeOpen.value) {
|
|
if (!_isCaptured.value) {
|
|
_captureImage();
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// 2. PERBAIKAN INPUT IMAGE PROCESSING
|
|
InputImage? _getInputImageFromCameraImage(CameraImage image) {
|
|
try {
|
|
final sensorOrientation = frontCamera.sensorOrientation;
|
|
InputImageRotation? rotation;
|
|
|
|
// Log format untuk debugging
|
|
// print(
|
|
// "📸 Camera image format: ${image.format.raw}, planes: ${image.planes.length}",
|
|
// );
|
|
|
|
// Handle rotasi sesuai platform
|
|
if (Platform.isIOS) {
|
|
rotation =
|
|
InputImageRotationValue.fromRawValue(sensorOrientation) ??
|
|
InputImageRotation.rotation0deg;
|
|
} else if (Platform.isAndroid) {
|
|
var rotationCompensation =
|
|
orientations[_cameraController!.value.deviceOrientation] ?? 0;
|
|
|
|
if (frontCamera.lensDirection == CameraLensDirection.front) {
|
|
rotationCompensation =
|
|
(sensorOrientation + rotationCompensation) % 360;
|
|
} else {
|
|
rotationCompensation =
|
|
(sensorOrientation - rotationCompensation + 360) % 360;
|
|
}
|
|
|
|
rotation =
|
|
InputImageRotationValue.fromRawValue(rotationCompensation) ??
|
|
InputImageRotation.rotation0deg;
|
|
}
|
|
|
|
// Deteksi format gambar
|
|
final format = InputImageFormatValue.fromRawValue(image.format.raw);
|
|
|
|
// Ukuran gambar
|
|
final Size imageSize = Size(
|
|
image.width.toDouble(),
|
|
image.height.toDouble(),
|
|
);
|
|
|
|
// Tangani format YUV_420_888 yang terdeteksi di log debug
|
|
if (image.format.raw == 35) {
|
|
// YUV_420_888 memiliki raw value 35
|
|
// print("🔄 Processing YUV_420_888 format (raw value: 35)");
|
|
|
|
if (image.planes.length != 3) {
|
|
// print(
|
|
// "❌ YUV_420_888 requires 3 planes but got: ${image.planes.length}",
|
|
// );
|
|
return null;
|
|
}
|
|
|
|
// Konversi YUV_420_888 ke format yang dimengerti ML Kit
|
|
// Dengan mendapatkan hanya Y plane (luminance) yang bekerja untuk deteksi wajah
|
|
final plane =
|
|
image.planes[0]; // Y plane adalah yang penting untuk deteksi wajah
|
|
|
|
return InputImage.fromBytes(
|
|
bytes: plane.bytes,
|
|
metadata: InputImageMetadata(
|
|
size: imageSize,
|
|
rotation: rotation ?? InputImageRotation.rotation0deg,
|
|
format: InputImageFormat.nv21, // Format yang diharapkan ML Kit
|
|
bytesPerRow: plane.bytesPerRow,
|
|
),
|
|
);
|
|
}
|
|
// Format lain (NV21 atau BGRA8888)
|
|
else if (format == InputImageFormat.nv21 ||
|
|
format == InputImageFormat.bgra8888) {
|
|
if (image.planes.isEmpty) {
|
|
// print("❌ Not enough planes for ${format.toString()}");
|
|
return null;
|
|
}
|
|
|
|
final plane = image.planes.first;
|
|
|
|
return InputImage.fromBytes(
|
|
bytes: plane.bytes,
|
|
metadata: InputImageMetadata(
|
|
size: imageSize,
|
|
rotation: rotation ?? InputImageRotation.rotation0deg,
|
|
format: InputImageFormat.nv21, // Format yang diharapkan ML Kit
|
|
bytesPerRow: plane.bytesPerRow,
|
|
),
|
|
);
|
|
}
|
|
// Format tidak dikenali
|
|
else {
|
|
// Mencoba konversi alternatif untuk format yang tidak didukung langsung
|
|
return _processUnsupportedFormat(image, imageSize, rotation!);
|
|
}
|
|
} catch (e) {
|
|
print('❌ Error creating InputImage: $e');
|
|
return null;
|
|
}
|
|
}
|
|
|
|
// Metode baru untuk menangani format yang tidak didukung langsung
|
|
InputImage? _processUnsupportedFormat(
|
|
CameraImage image,
|
|
Size imageSize,
|
|
InputImageRotation rotation,
|
|
) {
|
|
try {
|
|
print(
|
|
"⚠️ Using fallback conversion for unsupported format: ${image.format.raw}",
|
|
);
|
|
|
|
// Untuk format apapun, coba gunakan plane pertama
|
|
if (image.planes.isNotEmpty) {
|
|
final plane = image.planes.first;
|
|
|
|
// Force format sebagai NV21 yang paling umum didukung oleh ML Kit
|
|
return InputImage.fromBytes(
|
|
bytes: plane.bytes,
|
|
metadata: InputImageMetadata(
|
|
size: imageSize,
|
|
rotation: rotation,
|
|
format: InputImageFormat.nv21,
|
|
bytesPerRow: plane.bytesPerRow,
|
|
),
|
|
);
|
|
}
|
|
|
|
print("❌ No planes available in image");
|
|
return null;
|
|
} catch (e) {
|
|
print("❌ Fallback format processing failed: $e");
|
|
return null;
|
|
}
|
|
}
|
|
|
|
// 3. TAMBAHKAN METHOD UNTUK CEK CAMERA CAPABILITIES
|
|
Future<void> _checkCameraCapabilities() async {
|
|
try {
|
|
final cameras = await availableCameras();
|
|
print("=== CAMERA CAPABILITIES ===");
|
|
|
|
for (var camera in cameras) {
|
|
print("Camera: ${camera.name}");
|
|
print(" Direction: ${camera.lensDirection}");
|
|
print(" Sensor Orientation: ${camera.sensorOrientation}");
|
|
}
|
|
|
|
if (_cameraController?.value.isInitialized == true) {
|
|
print("=== CURRENT CAMERA CONFIG ===");
|
|
print("Preview Size: ${_cameraController!.value.previewSize}");
|
|
print("Aspect Ratio: ${_cameraController!.value.aspectRatio}");
|
|
print("Is Streaming: ${_cameraController!.value.isStreamingImages}");
|
|
|
|
// device orientation
|
|
final deviceOrientation =
|
|
_cameraController!.value.deviceOrientation.toString();
|
|
print("Device Orientation: $deviceOrientation");
|
|
}
|
|
print("============================");
|
|
} catch (e) {
|
|
print("Error checking camera capabilities: $e");
|
|
}
|
|
}
|
|
|
|
// 4. FALLBACK CAMERA INITIALIZATION
|
|
Future<void> _initializeCameraWithFallback() async {
|
|
// Ubah urutan preferensi format - YUV_420 di urutan pertama karena sepertinya itu yang digunakan
|
|
final formatOptions = [
|
|
ImageFormatGroup.yuv420, // Prioritaskan YUV_420 yang terdeteksi dalam log
|
|
ImageFormatGroup.nv21,
|
|
// ImageFormatGroup.jpeg - tidak untuk streaming
|
|
];
|
|
|
|
final resolutionOptions = [
|
|
ResolutionPreset.medium,
|
|
ResolutionPreset.low,
|
|
ResolutionPreset.high,
|
|
];
|
|
|
|
for (var format in formatOptions) {
|
|
for (var resolution in resolutionOptions) {
|
|
try {
|
|
print("🔄 Trying format: $format with resolution: $resolution");
|
|
|
|
final cameras = await availableCameras();
|
|
final frontCamera = cameras.firstWhere(
|
|
(camera) => camera.lensDirection == CameraLensDirection.front,
|
|
);
|
|
|
|
final controller = CameraController(
|
|
frontCamera,
|
|
resolution,
|
|
imageFormatGroup: format,
|
|
enableAudio: false,
|
|
);
|
|
|
|
await controller.initialize();
|
|
|
|
// Test dengan sample image
|
|
bool formatWorking = await _testCameraFormat(controller);
|
|
|
|
if (formatWorking) {
|
|
print(
|
|
"✅ SUCCESS: Format $format with resolution $resolution works!",
|
|
);
|
|
_cameraController = controller;
|
|
this.frontCamera = frontCamera;
|
|
return;
|
|
} else {
|
|
print("❌ Format $format with resolution $resolution failed test");
|
|
await controller.dispose();
|
|
}
|
|
} catch (e) {
|
|
print("❌ Failed format $format with resolution $resolution: $e");
|
|
}
|
|
}
|
|
}
|
|
|
|
throw Exception("No compatible camera format found!");
|
|
}
|
|
|
|
// 5. TEST METHOD UNTUK VALIDASI FORMAT
|
|
Future<bool> _testCameraFormat(CameraController controller) async {
|
|
try {
|
|
bool testPassed = false;
|
|
|
|
controller.startImageStream((CameraImage img) async {
|
|
try {
|
|
final inputImage = _getInputImageFromCameraImage(img);
|
|
if (inputImage != null) {
|
|
print("✅ InputImage created successfully");
|
|
testPassed = true;
|
|
}
|
|
} catch (e) {
|
|
print("❌ Test failed: $e");
|
|
}
|
|
|
|
// Stop stream after first test
|
|
controller.stopImageStream();
|
|
});
|
|
|
|
// Wait for test
|
|
await Future.delayed(Duration(milliseconds: 2000));
|
|
|
|
return testPassed;
|
|
} catch (e) {
|
|
print("❌ Camera format test error: $e");
|
|
return false;
|
|
}
|
|
}
|
|
|
|
void _updateFaceInFrameStatus() {
|
|
if (!_isFaceInFrame.value) {
|
|
_isFaceInFrame.value = true;
|
|
_addSuccessfulStep('Face in frame');
|
|
|
|
if (status.value == LivenessStatus.detectingFace) {
|
|
status.value = LivenessStatus.checkLeftRotation;
|
|
currentInstruction.value = 'Great! Now rotate your face to the left';
|
|
print("Face now in frame! Moving to LEFT rotation stage");
|
|
}
|
|
}
|
|
}
|
|
|
|
// Longgarkan batasan rotasi kepala ke kiri
|
|
void _updateHeadRotationStatus(double? rotY) {
|
|
// Longgarkan deteksi rotasi ke kiri, nilai -3 lebih kecil dari -5 sebelumnya
|
|
if (_isFaceInFrame.value &&
|
|
!_isFaceLeft.value &&
|
|
rotY != null &&
|
|
rotY < -3) {
|
|
_isFaceLeft.value = true;
|
|
_addSuccessfulStep('Face rotated left');
|
|
|
|
if (status.value == LivenessStatus.checkLeftRotation) {
|
|
status.value = LivenessStatus.checkRightRotation;
|
|
currentInstruction.value = 'Good! Now rotate your face to the right';
|
|
print("Left rotation detected! Moving to RIGHT rotation stage");
|
|
}
|
|
}
|
|
|
|
// Longgarkan deteksi rotasi ke kanan, nilai 3 lebih kecil dari 5 sebelumnya
|
|
if (_isFaceLeft.value && !_isFaceRight.value && rotY != null && rotY > 3) {
|
|
_isFaceRight.value = true;
|
|
_addSuccessfulStep('Face rotated right');
|
|
|
|
if (status.value == LivenessStatus.checkRightRotation) {
|
|
status.value = LivenessStatus.checkSmile;
|
|
currentInstruction.value = 'Great! Now smile for the camera';
|
|
print("Right rotation detected! Moving to SMILE stage");
|
|
}
|
|
}
|
|
}
|
|
|
|
// Longgarkan kondisi senyum
|
|
void _updateSmilingStatus(double smileProb) {
|
|
if (_isFaceInFrame.value &&
|
|
_isFaceLeft.value &&
|
|
_isFaceRight.value &&
|
|
!_isSmiled.value &&
|
|
smileProb > 0.1) {
|
|
// Nilai diturunkan dari 0.2 menjadi 0.1
|
|
_isSmiled.value = true;
|
|
_addSuccessfulStep('Smiling');
|
|
|
|
if (status.value == LivenessStatus.checkSmile) {
|
|
status.value = LivenessStatus.checkEyesOpen;
|
|
currentInstruction.value = 'Excellent! Now open your eyes wide';
|
|
print("Smile detected! Moving to EYES OPEN stage");
|
|
}
|
|
}
|
|
}
|
|
|
|
// Longgarkan kondisi mata terbuka
|
|
void _updateEyeOpenStatus(double leftEyeOpen, double rightEyeOpen) {
|
|
if (_isFaceInFrame.value &&
|
|
_isFaceLeft.value &&
|
|
_isFaceRight.value &&
|
|
_isSmiled.value &&
|
|
!_isEyeOpen.value) {
|
|
if (leftEyeOpen > 0.1 && rightEyeOpen > 0.1) {
|
|
// Nilai diturunkan dari 0.2 menjadi 0.1
|
|
_isEyeOpen.value = true;
|
|
_addSuccessfulStep('Eyes Open');
|
|
|
|
if (status.value == LivenessStatus.checkEyesOpen) {
|
|
status.value = LivenessStatus.readyForPhoto;
|
|
currentInstruction.value = 'Perfect! Hold still for photo capture';
|
|
print("Eyes open detected! Moving to READY FOR PHOTO stage");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Longgarkan kondisi untuk siap foto
|
|
void _updateFaceInFrameForPhotoStatus(double? rotY, double? smileProb) {
|
|
// Longgarkan kondisi rotasi dan senyum
|
|
if (_isFaceRight.value &&
|
|
_isFaceLeft.value &&
|
|
rotY != null &&
|
|
rotY > -5 && // Nilai lebih longgar dari -3
|
|
rotY < 5 && // Nilai lebih longgar dari 3
|
|
smileProb != null) // Hapus batasan senyum
|
|
{
|
|
_isFaceReadyForPhoto.value = true;
|
|
_addSuccessfulStep('Face Ready For Photo');
|
|
|
|
if (status.value == LivenessStatus.checkEyesOpen) {
|
|
status.value = LivenessStatus.readyForPhoto;
|
|
currentInstruction.value = 'Perfect! Hold still for photo capture';
|
|
print("Face ready for photo! Moving to READY FOR PHOTO stage");
|
|
}
|
|
} else {
|
|
_isFaceReadyForPhoto.value = false;
|
|
}
|
|
}
|
|
|
|
// Ganti metode _isFaceInsideFrame untuk selalu mengembalikan true
|
|
bool _isFaceInsideFrame(Rect boundingBox) {
|
|
// Selalu kembalikan true tanpa memeriksa batas-batas
|
|
return true;
|
|
}
|
|
|
|
void _addSuccessfulStep(String step) {
|
|
if (!_successfulSteps.contains(step)) {
|
|
_successfulSteps.add(step);
|
|
}
|
|
}
|
|
|
|
// Tambahkan metode untuk memangkas frame dan otomatis menyelesaikan proses verifikasi
|
|
void autoCompleteVerification() {
|
|
print("Auto-completing verification process");
|
|
|
|
// Set semua status
|
|
_isFaceInFrame.value = true;
|
|
_addSuccessfulStep('Face in frame');
|
|
|
|
_isFaceLeft.value = true;
|
|
_addSuccessfulStep('Face rotated left');
|
|
|
|
_isFaceRight.value = true;
|
|
_addSuccessfulStep('Face rotated right');
|
|
|
|
_isSmiled.value = true;
|
|
_addSuccessfulStep('Smiling');
|
|
|
|
_isEyeOpen.value = true;
|
|
_addSuccessfulStep('Eyes Open');
|
|
|
|
_isFaceReadyForPhoto.value = true;
|
|
_addSuccessfulStep('Face Ready For Photo');
|
|
|
|
// Tangkap gambar
|
|
_captureImage();
|
|
}
|
|
|
|
// Tambahkan metode ini agar di debug panel bisa dipanggil
|
|
void skipAllVerificationSteps() {
|
|
autoCompleteVerification();
|
|
}
|
|
|
|
// Modifikasi metode _processCameraImage untuk lebih relaks dalam deteksi wajah
|
|
Future<void> _processCameraImage(CameraImage img) async {
|
|
// Avoid processing if already captured or currently processing
|
|
if (_isCaptured.value || _processingImage) return;
|
|
|
|
_processingImage = true;
|
|
|
|
try {
|
|
// Tambah delay untuk memberikan waktu lebih untuk pemrosesan
|
|
await Future.delayed(Duration(milliseconds: 50));
|
|
|
|
// Use microtask untuk memberikan waktu lebih pada face detection
|
|
await Future.microtask(() async {
|
|
final inputImage = _getInputImageFromCameraImage(img);
|
|
if (inputImage == null) {
|
|
_processingImage = false;
|
|
return;
|
|
}
|
|
|
|
// Log ukuran gambar untuk membantu debug zoom/resolusi
|
|
// print("Processing image of size: ${img.width}x${img.height}");
|
|
|
|
// Berikan timeout yang lebih panjang untuk deteksi wajah (5 detik)
|
|
List<Face> faces = [];
|
|
try {
|
|
// Gunakan timeout untuk mencegah pemrosesan yang terlalu lama
|
|
faces = await _faceDetector
|
|
.processImage(inputImage)
|
|
.timeout(
|
|
Duration(seconds: 3),
|
|
onTimeout: () {
|
|
print("⚠️ Face detection timed out after 3 seconds");
|
|
return [];
|
|
},
|
|
);
|
|
|
|
print("Detected ${faces.length} faces");
|
|
} catch (e) {
|
|
print("Face detection error: $e");
|
|
}
|
|
|
|
// Process face detection results
|
|
if (faces.isNotEmpty) {
|
|
await _processFaces(faces);
|
|
} else {
|
|
_handleNoFacesDetected();
|
|
}
|
|
});
|
|
} catch (e) {
|
|
print('Error in image processing: $e');
|
|
} finally {
|
|
// Ensure _processingImage is reset even if an error occurs
|
|
_processingImage = false;
|
|
}
|
|
}
|
|
|
|
// New method to process detected faces
|
|
Future<void> _processFaces(List<Face> faces) async {
|
|
// Pilih wajah terbaik berdasarkan ukuran (prioritaskan wajah yang lebih besar)
|
|
Face? bestFace;
|
|
double largestArea = 0;
|
|
|
|
for (var face in faces) {
|
|
final area = face.boundingBox.width * face.boundingBox.height;
|
|
if (area > largestArea) {
|
|
largestArea = area;
|
|
bestFace = face;
|
|
}
|
|
}
|
|
|
|
// Periksa apakah wajah terbaik memenuhi ukuran minimum yang direkomendasikan
|
|
if (bestFace != null) {
|
|
final faceWidth = bestFace.boundingBox.width;
|
|
final faceHeight = bestFace.boundingBox.height;
|
|
|
|
print("Best face size: ${faceWidth}x$faceHeight");
|
|
|
|
// Pastikan wajah cukup besar untuk deteksi (minimal 80x80 pixel)
|
|
// Reduced from 100x100 to improve detection with varied camera resolutions
|
|
if (faceWidth >= 80 && faceHeight >= 80) {
|
|
_isNoFace.value = false;
|
|
_isMultiFace.value = false;
|
|
await _compareFaces(bestFace);
|
|
|
|
if (!_isDifferentPerson.value) {
|
|
_handleFaceDetection(bestFace);
|
|
} else {
|
|
_duplicatePersonFaceDetect();
|
|
}
|
|
} else {
|
|
print(
|
|
"Face too small: ${faceWidth}x$faceHeight, minimum 80x80 required",
|
|
);
|
|
_isNoFace.value = true;
|
|
currentInstruction.value = 'Please move closer to the camera';
|
|
}
|
|
} else if (faces.isNotEmpty) {
|
|
// If we have faces but none meet our "best face" criteria, use the first one anyway
|
|
_isNoFace.value = false;
|
|
_isMultiFace.value = faces.length > 1;
|
|
final face = faces.first;
|
|
await _compareFaces(face);
|
|
_handleFaceDetection(face);
|
|
}
|
|
}
|
|
|
|
// New method to handle when no faces are detected
|
|
void _handleNoFacesDetected() {
|
|
_isNoFace.value = true;
|
|
|
|
// Don't reset progress if already started
|
|
if (!_isFaceInFrame.value) {
|
|
status.value = LivenessStatus.detectingFace;
|
|
currentInstruction.value =
|
|
'No face detected. Please position your face in the frame and ensure good lighting.';
|
|
}
|
|
}
|
|
|
|
// Tambahkan variabel untuk mencegah pemrosesan berlebihan
|
|
bool _processingImage = false;
|
|
|
|
// Tambahkan metode retryDetection untuk mencoba ulang jika terjadi kegagalan deteksi
|
|
void retryDetection() {
|
|
if (_cameraController != null && _cameraController!.value.isInitialized) {
|
|
// Reset flag proses tapi pertahankan kemajuan
|
|
_processingImage = false;
|
|
|
|
// Update pesan instruksi
|
|
currentInstruction.value = 'Retrying face detection...';
|
|
|
|
// Tampilkan debug info
|
|
print('Retrying face detection...');
|
|
}
|
|
}
|
|
|
|
// // Improve camera input image processing
|
|
// InputImage? _getInputImageFromCameraImage(CameraImage image) {
|
|
// final sensorOrientation = frontCamera.sensorOrientation;
|
|
// InputImageRotation? rotation;
|
|
|
|
// if (Platform.isIOS) {
|
|
// rotation = InputImageRotationValue.fromRawValue(sensorOrientation);
|
|
// } else if (Platform.isAndroid) {
|
|
// var rotationCompensation =
|
|
// orientations[_cameraController!.value.deviceOrientation];
|
|
// if (rotationCompensation == null) {
|
|
// print("Warning: null rotation compensation");
|
|
// rotationCompensation = 0; // Provide default value
|
|
// }
|
|
|
|
// if (frontCamera.lensDirection == CameraLensDirection.front) {
|
|
// rotationCompensation = (sensorOrientation + rotationCompensation) % 360;
|
|
// } else {
|
|
// rotationCompensation =
|
|
// (sensorOrientation - rotationCompensation + 360) % 360;
|
|
// }
|
|
// rotation = InputImageRotationValue.fromRawValue(rotationCompensation!);
|
|
// }
|
|
|
|
// if (rotation == null) {
|
|
// print("Warning: null input image rotation");
|
|
// return null;
|
|
// }
|
|
|
|
// final format = InputImageFormatValue.fromRawValue(image.format.raw);
|
|
// if (format == null) {
|
|
// print(
|
|
// "Warning: null input image format from raw value: ${image.format.raw}",
|
|
// );
|
|
// return null;
|
|
// }
|
|
|
|
// if ((Platform.isAndroid && format != InputImageFormat.nv21) ||
|
|
// (Platform.isIOS && format != InputImageFormat.bgra8888)) {
|
|
// print("Warning: unexpected format for platform: $format");
|
|
// return null;
|
|
// }
|
|
|
|
// if (image.planes.length != 1) {
|
|
// print("Warning: expected 1 plane, got ${image.planes.length}");
|
|
// return null;
|
|
// }
|
|
|
|
// final plane = image.planes.first;
|
|
|
|
// return InputImage.fromBytes(
|
|
// bytes: plane.bytes,
|
|
// metadata: InputImageMetadata(
|
|
// size: Size(image.width.toDouble(), image.height.toDouble()),
|
|
// rotation: rotation,
|
|
// format: format,
|
|
// bytesPerRow: plane.bytesPerRow,
|
|
// ),
|
|
// );
|
|
// }
|
|
|
|
// Metode untuk ambil foto dengan ukuran penuh
|
|
Future<void> _captureImage() async {
|
|
if (_cameraController!.value.isTakingPicture) return;
|
|
|
|
try {
|
|
status.value = LivenessStatus.photoTaken;
|
|
currentInstruction.value = 'Capturing photo...';
|
|
|
|
// Hentikan stream untuk foto berkualitas tinggi
|
|
await _cameraController!.stopImageStream();
|
|
|
|
// Beri jeda singkat agar kamera stabil
|
|
await Future.delayed(Duration(milliseconds: 500));
|
|
|
|
// Ambil foto dengan resolusi penuh
|
|
final XFile file = await _cameraController!.takePicture();
|
|
print("Image captured: ${file.path}");
|
|
|
|
// Set status captured
|
|
_isCaptured.value = true;
|
|
_capturedImage.value = file;
|
|
|
|
// Verifikasi bahwa gambar memenuhi kriteria minimum (min 480x360)
|
|
final imageFile = File(file.path);
|
|
if (await imageFile.exists()) {
|
|
final fileSize = await imageFile.length();
|
|
print("Captured image size: $fileSize bytes");
|
|
|
|
if (fileSize < 5000) {
|
|
// Gambar terlalu kecil mungkin rusak
|
|
print("Warning: Captured image is too small ($fileSize bytes)");
|
|
// Bisa retry atau handle error di sini
|
|
}
|
|
}
|
|
|
|
status.value = LivenessStatus.completed;
|
|
currentInstruction.value = 'Liveness check successful!';
|
|
|
|
// Tutup detektor wajah untuk hemat resources
|
|
_faceDetector.close();
|
|
} catch (e) {
|
|
print('Error capturing image: $e');
|
|
status.value = LivenessStatus.failed;
|
|
currentInstruction.value = 'Failed to capture image: $e';
|
|
|
|
// Restart preview jika gagal
|
|
try {
|
|
_cameraController!.startImageStream((CameraImage img) {
|
|
_processCameraImage(img);
|
|
});
|
|
} catch (e) {
|
|
print('Error restarting preview: $e');
|
|
}
|
|
}
|
|
}
|
|
|
|
// Handle detection of a different person (duplicate face)
|
|
void _duplicatePersonFaceDetect() {
|
|
print(
|
|
"Different person detected! Please ensure only one person is in front of the camera.",
|
|
);
|
|
_isDifferentPerson.value = true;
|
|
currentInstruction.value =
|
|
'Different person detected. Please ensure only one person is in front of the camera.';
|
|
status.value = LivenessStatus.failed;
|
|
}
|
|
|
|
// Face comparison methods
|
|
Future<List<double>> _extractFaceEmbeddings(Face face) async {
|
|
return [
|
|
face.boundingBox.left,
|
|
face.boundingBox.top,
|
|
face.boundingBox.right,
|
|
face.boundingBox.bottom,
|
|
];
|
|
}
|
|
|
|
Future<void> _compareFaces(Face currentFace) async {
|
|
final currentEmbedding = await _extractFaceEmbeddings(currentFace);
|
|
|
|
if (_firstPersonEmbedding == null) {
|
|
_firstPersonEmbedding = currentEmbedding;
|
|
} else {
|
|
final double similarity = _calculateSimilarity(
|
|
_firstPersonEmbedding!,
|
|
currentEmbedding,
|
|
);
|
|
_isDifferentPerson.value = similarity < 0.8;
|
|
}
|
|
}
|
|
|
|
double _calculateSimilarity(
|
|
List<double> embedding1,
|
|
List<double> embedding2,
|
|
) {
|
|
double dotProduct = 0.0;
|
|
double norm1 = 0.0;
|
|
double norm2 = 0.0;
|
|
|
|
for (int i = 0; i < embedding1.length; i++) {
|
|
dotProduct += embedding1[i] * embedding2[i];
|
|
norm1 += embedding1[i] * embedding1[i];
|
|
norm2 += embedding2[i] * embedding2[i];
|
|
}
|
|
|
|
return dotProduct / (Math.sqrt(norm1) * Math.sqrt(norm2));
|
|
}
|
|
|
|
String getCurrentDirection() {
|
|
// Use the currentInstruction instead
|
|
return currentInstruction.value;
|
|
}
|
|
|
|
@override
|
|
void didChangeAppLifecycleState(AppLifecycleState state) {
|
|
final CameraController? cameraController = _cameraController;
|
|
if (cameraController == null || !cameraController.value.isInitialized) {
|
|
return;
|
|
}
|
|
if (state == AppLifecycleState.inactive) {
|
|
cameraController.dispose();
|
|
} else if (state == AppLifecycleState.resumed) {
|
|
_initializeCamera();
|
|
}
|
|
}
|
|
|
|
// Track if user left the screen
|
|
final RxBool wasDetectionCancelled = RxBool(false);
|
|
|
|
// Add a method to handle cleanup when users cancel
|
|
void handleCancellation() {
|
|
wasDetectionCancelled.value = true;
|
|
|
|
// Make sure to update the selfie controller
|
|
if (Get.isRegistered<SelfieVerificationController>()) {
|
|
final selfieController = Get.find<SelfieVerificationController>();
|
|
selfieController.cancelLivenessDetection();
|
|
}
|
|
}
|
|
|
|
@override
|
|
void onClose() {
|
|
// If detection was not complete and not cancelled already, mark as cancelled
|
|
if (!isCaptured && !wasDetectionCancelled.value) {
|
|
handleCancellation();
|
|
}
|
|
|
|
_faceDetector.close();
|
|
if (_cameraController != null) _cameraController!.dispose();
|
|
WidgetsBinding.instance.removeObserver(this);
|
|
_faceMeshDetector.close();
|
|
super.onClose();
|
|
}
|
|
|
|
/// Generate a FaceModel from the captured image
|
|
FaceModel generateFaceModel() {
|
|
if (_capturedImage.value == null) {
|
|
return FaceModel.empty();
|
|
}
|
|
|
|
final uuid = Uuid();
|
|
|
|
return FaceModel(
|
|
imagePath: _capturedImage.value!.path,
|
|
faceId: uuid.v4(),
|
|
confidence: 0.95,
|
|
boundingBox: {'x': 0.1, 'y': 0.1, 'width': 0.8, 'height': 0.8},
|
|
).withLiveness(
|
|
isLive: true,
|
|
confidence: 0.92,
|
|
message: 'Liveness check passed successfully',
|
|
);
|
|
}
|
|
|
|
// Add a helper method to manually advance for testing purposes
|
|
void forceAdvanceToNextStep() {
|
|
switch (status.value) {
|
|
case LivenessStatus.detectingFace:
|
|
_isFaceInFrame.value = true;
|
|
_addSuccessfulStep('Face in frame');
|
|
status.value = LivenessStatus.checkLeftRotation;
|
|
currentInstruction.value = 'Great! Now rotate your face to the left';
|
|
print("Forced: Face in frame step completed");
|
|
break;
|
|
case LivenessStatus.checkLeftRotation:
|
|
_isFaceLeft.value = true;
|
|
_addSuccessfulStep('Face rotated left');
|
|
status.value = LivenessStatus.checkRightRotation;
|
|
currentInstruction.value = 'Good! Now rotate your face to the right';
|
|
print("Forced: Face left rotation step completed");
|
|
break;
|
|
case LivenessStatus.checkRightRotation:
|
|
_isFaceRight.value = true;
|
|
_addSuccessfulStep('Face rotated right');
|
|
status.value = LivenessStatus.checkSmile;
|
|
currentInstruction.value = 'Great! Now smile for the camera';
|
|
print("Forced: Face right rotation step completed");
|
|
break;
|
|
case LivenessStatus.checkSmile:
|
|
_isSmiled.value = true;
|
|
_addSuccessfulStep('Smiling');
|
|
status.value = LivenessStatus.checkEyesOpen;
|
|
currentInstruction.value = 'Excellent! Now open your eyes wide';
|
|
print("Forced: Smile step completed");
|
|
break;
|
|
case LivenessStatus.checkEyesOpen:
|
|
_isEyeOpen.value = true;
|
|
_isFaceReadyForPhoto.value = true;
|
|
_addSuccessfulStep('Eyes Open');
|
|
_addSuccessfulStep('Face Ready For Photo');
|
|
status.value = LivenessStatus.readyForPhoto;
|
|
currentInstruction.value = 'Perfect! Hold still for photo capture';
|
|
print("Forced: Eyes open step completed");
|
|
break;
|
|
case LivenessStatus.readyForPhoto:
|
|
forceCaptureImage();
|
|
break;
|
|
default:
|
|
print("Forced: No action for current state: ${status.value}");
|
|
break;
|
|
}
|
|
}
|
|
|
|
// Method to force capture image manually (for debugging)
|
|
Future<void> forceCaptureImage() async {
|
|
if (_cameraController == null || !_cameraController!.value.isInitialized) {
|
|
print("Cannot force capture: camera not initialized");
|
|
return;
|
|
}
|
|
|
|
if (_cameraController!.value.isTakingPicture) {
|
|
print("Cannot force capture: camera already taking picture");
|
|
return;
|
|
}
|
|
|
|
try {
|
|
print("Forcing manual image capture...");
|
|
status.value = LivenessStatus.photoTaken;
|
|
currentInstruction.value = 'Capturing photo...';
|
|
|
|
final XFile file = await _cameraController!.takePicture();
|
|
_isCaptured.value = true;
|
|
_capturedImage.value = file;
|
|
|
|
status.value = LivenessStatus.completed;
|
|
currentInstruction.value = 'Liveness check successful! (Manual capture)';
|
|
print("Manual image capture successful");
|
|
} catch (e) {
|
|
print('Error during manual image capture: $e');
|
|
status.value = LivenessStatus.failed;
|
|
currentInstruction.value = 'Failed to capture image manually: $e';
|
|
}
|
|
}
|
|
|
|
// Reset Process
|
|
void resetProcess() {
|
|
// Reset all state variables
|
|
_isFaceInFrame.value = false;
|
|
_isFaceLeft.value = false;
|
|
_isFaceRight.value = false;
|
|
_isEyeOpen.value = false;
|
|
_isNoFace.value = false;
|
|
_isMultiFace.value = false;
|
|
_isCaptured.value = false;
|
|
_isSmiled.value = false;
|
|
_isFaceReadyForPhoto.value = false;
|
|
_isDifferentPerson.value = false;
|
|
_processingImage = false;
|
|
_throttled = false;
|
|
|
|
status.value = LivenessStatus.preparing;
|
|
currentInstruction.value = 'Initializing camera...';
|
|
_successfulSteps.clear();
|
|
|
|
// Try to adjust camera parameters if controller exists
|
|
if (_cameraController != null && _cameraController!.value.isInitialized) {
|
|
// Reset zoom to minimum for better face detection
|
|
_cameraController!
|
|
.getMinZoomLevel()
|
|
.then((minZoom) {
|
|
_cameraController!.setZoomLevel(minZoom);
|
|
})
|
|
.catchError((e) {
|
|
print("Failed to reset zoom level: $e");
|
|
});
|
|
} else {
|
|
// Reinitialize camera if needed
|
|
_initializeCamera();
|
|
}
|
|
}
|
|
|
|
// Kunci orientasi ke portrait
|
|
Future<void> _lockDeviceOrientation() async {
|
|
try {
|
|
print("🔒 Locking device orientation to portrait");
|
|
await SystemChrome.setPreferredOrientations([
|
|
DeviceOrientation.portraitUp,
|
|
]);
|
|
print("✅ Device orientation locked to portrait");
|
|
} catch (e) {
|
|
print("❌ Failed to lock orientation: $e");
|
|
}
|
|
}
|
|
|
|
// Tambahkan fungsi untuk mengembalikan orientasi
|
|
Future<void> _resetDeviceOrientation() async {
|
|
try {
|
|
print("🔓 Resetting device orientation");
|
|
await SystemChrome.setPreferredOrientations([
|
|
DeviceOrientation.portraitUp,
|
|
DeviceOrientation.portraitDown,
|
|
DeviceOrientation.landscapeLeft,
|
|
DeviceOrientation.landscapeRight,
|
|
]);
|
|
} catch (e) {
|
|
print("❌ Failed to reset orientation: $e");
|
|
}
|
|
}
|
|
|
|
String _getCurrentDeviceOrientation() {
|
|
final orientation = MediaQuery.of(Get.context!).orientation;
|
|
return orientation == Orientation.portrait ? "Portrait" : "Landscape";
|
|
}
|
|
}
|
|
|
|
|
|
import 'package:camera/camera.dart';
|
|
import 'package:flutter/material.dart';
|
|
|
|
class CameraScreen extends StatefulWidget {
|
|
const CameraScreen({super.key});
|
|
|
|
@override
|
|
State<CameraScreen> createState() => _CameraScreenState();
|
|
}
|
|
|
|
class _CameraScreenState extends State<CameraScreen>
|
|
with WidgetsBindingObserver, TickerProviderStateMixin {
|
|
CameraController? controller;
|
|
String error='';
|
|
|
|
FaceDetector faceDetector = FaceDetector(
|
|
options: FaceDetectorOptions(
|
|
enableClassification: true,
|
|
enableContours: true,
|
|
enableTracking: true,
|
|
enableLandmarks: true,
|
|
performanceMode: Platform.isAndroid
|
|
? FaceDetectorMode.fast
|
|
: FaceDetectorMode.accurate,
|
|
),
|
|
);
|
|
|
|
final orientations = {
|
|
DeviceOrientation.portraitUp: 0,
|
|
DeviceOrientation.landscapeLeft: 90,
|
|
DeviceOrientation.portraitDown: 180,
|
|
DeviceOrientation.landscapeRight: 270,
|
|
};
|
|
|
|
initialize() async {
|
|
controller = CameraController(
|
|
cameras[1],
|
|
ResolutionPreset.medium,
|
|
imageFormatGroup: Platform.isAndroid
|
|
? ImageFormatGroup.nv21
|
|
: ImageFormatGroup.bgra8888,
|
|
);
|
|
if (mounted) setState(() {});
|
|
initializeCameraController(controller!.description);
|
|
if (mounted) setState(() {});
|
|
}
|
|
|
|
@override
|
|
void initState() {
|
|
super.initState();
|
|
WidgetsBinding.instance.addObserver(this);
|
|
initialize();
|
|
}
|
|
|
|
@override
|
|
void dispose() {
|
|
faceDetector.close();
|
|
if (controller != null) controller!.dispose();
|
|
WidgetsBinding.instance.removeObserver(this);
|
|
super.dispose();
|
|
}
|
|
|
|
@override
|
|
void didChangeAppLifecycleState(AppLifecycleState state) {
|
|
final CameraController? cameraController = controller;
|
|
if (cameraController == null || !cameraController.value.isInitialized) {
|
|
return;
|
|
}
|
|
|
|
if (state == AppLifecycleState.inactive) {
|
|
cameraController.dispose();
|
|
} else if (state == AppLifecycleState.resumed) {
|
|
initialize();
|
|
}
|
|
}
|
|
|
|
Future<void> initializeCameraController() async {
|
|
try {
|
|
await controller!.initialize();
|
|
controller!.setFlashMode(FlashMode.off);
|
|
// If the controller is updated then update the UI.
|
|
controller!.addListener(() {
|
|
if (mounted) setState(() {});
|
|
controller!.startImageStream(_processCameraImage).then((value) {});
|
|
});
|
|
if (mounted) setState(() {});
|
|
} on CameraException catch (e) {
|
|
switch (e.code) {
|
|
case 'CameraAccessDenied':
|
|
Get.back();
|
|
cameraDialog();
|
|
break;
|
|
case 'CameraAccessDeniedWithoutPrompt':
|
|
Get.back();
|
|
cameraDialog();
|
|
break;
|
|
case 'CameraAccessRestricted':
|
|
Get.back();
|
|
cameraDialog();
|
|
break;
|
|
default:
|
|
Get.back();
|
|
EasyLoading.showError(e.description!);
|
|
break;
|
|
}
|
|
}
|
|
if (mounted) setState(() {});
|
|
}
|
|
|
|
cameraDialog() {
|
|
Get.defaultDialog(
|
|
title: 'Camera Access',
|
|
middleText:
|
|
'Looks like you have not provided permission to camera completely. Enable it in the settings.',
|
|
onConfirm: () async {
|
|
Get.back();
|
|
await openAppSettings();
|
|
},
|
|
);
|
|
}
|
|
|
|
clearCaptured(String message) {
|
|
debugPrint(message);
|
|
imageFile = null;
|
|
error = message;
|
|
if (mounted) setState(() {});
|
|
return;
|
|
}
|
|
|
|
void _processCameraImage(CameraImage image) {
|
|
final inputImage = inputImageFromCameraImage(image);
|
|
if (inputImage == null) return;
|
|
checkImage(inputImage);
|
|
}
|
|
|
|
checkImage(InputImage inputImage) async {
|
|
List<Face> faces = await faceDetector.processImage(inputImage);
|
|
debugPrint('faces: $faces');
|
|
if (faces.isEmpty) {
|
|
clearCaptured('No faces detected in the image.');
|
|
}
|
|
if (faces.length > 1) {
|
|
clearCaptured(
|
|
'Multiple faces detected. Please try capturing a single face.',
|
|
);
|
|
}
|
|
// Handle the detected faces
|
|
for (Face face in faces) {
|
|
if (face.leftEyeOpenProbability != null &&
|
|
face.rightEyeOpenProbability != null) {
|
|
final leftEyeOpen = face.leftEyeOpenProbability! > 0.5;
|
|
final rightEyeOpen = face.rightEyeOpenProbability! > 0.5;
|
|
if (leftEyeOpen && rightEyeOpen) {
|
|
error = 'Click capture to save this image';
|
|
if (mounted) setState(() {});
|
|
return;
|
|
} else if (!leftEyeOpen && !rightEyeOpen) {
|
|
clearCaptured('Both eyes are closed!');
|
|
} else if (leftEyeOpen && !rightEyeOpen) {
|
|
clearCaptured('Left eye is open, right eye is closed.');
|
|
} else if (!leftEyeOpen && rightEyeOpen) {
|
|
clearCaptured('Right eye is open, left eye is closed.');
|
|
}
|
|
} else {
|
|
clearCaptured(
|
|
'Looks like either one or both eyes have not been captured properly. Please try again.',
|
|
);
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
InputImage? inputImageFromCameraImage(CameraImage image) {
|
|
final camera = cameras[1];
|
|
final sensorOrientation = camera.sensorOrientation;
|
|
InputImageRotation? rotation;
|
|
if (Platform.isIOS) {
|
|
rotation = InputImageRotationValue.fromRawValue(sensorOrientation);
|
|
} else if (Platform.isAndroid) {
|
|
var rotationCompensation =
|
|
orientations[controller!.value.deviceOrientation];
|
|
if (rotationCompensation == null) return null;
|
|
if (camera.lensDirection == CameraLensDirection.front) {
|
|
// front-facing
|
|
rotationCompensation = (sensorOrientation + rotationCompensation) % 360;
|
|
} else {
|
|
// back-facing
|
|
rotationCompensation =
|
|
(sensorOrientation - rotationCompensation + 360) % 360;
|
|
}
|
|
rotation = InputImageRotationValue.fromRawValue(rotationCompensation);
|
|
}
|
|
if (rotation == null) return null;
|
|
|
|
// get image format
|
|
final format = InputImageFormatValue.fromRawValue(image.format.raw);
|
|
|
|
if (format == null ||
|
|
(Platform.isAndroid && format != InputImageFormat.nv21) ||
|
|
(Platform.isIOS && format != InputImageFormat.bgra8888)) return null;
|
|
|
|
// since format is constraint to nv21 or bgra8888, both only have one plane
|
|
if (image.planes.length != 1) return null;
|
|
final plane = image.planes.first;
|
|
|
|
// compose InputImage using bytes
|
|
return InputImage.fromBytes(
|
|
bytes: plane.bytes,
|
|
metadata: InputImageMetadata(
|
|
size: Size(image.width.toDouble(), image.height.toDouble()),
|
|
rotation: rotation, // used only in Android
|
|
format: format, // used only in iOS
|
|
bytesPerRow: plane.bytesPerRow, // used only in iOS
|
|
),
|
|
);
|
|
}
|
|
|
|
@override
|
|
Widget build(BuildContext context) {
|
|
return Scaffold(
|
|
appBar: AppBar(
|
|
centerTitle: true,
|
|
title: Text('Capture Your Selfie'),
|
|
),
|
|
bottomNavigationBar: controller == null
|
|
? null
|
|
: Column(
|
|
mainAxisSize: MainAxisSize.min,
|
|
children: [
|
|
Text(error),
|
|
SizedBox(height:30),
|
|
ElevatedButton(
|
|
child: Text('Capture'),
|
|
onPressed: () async {
|
|
await controller!.takePicture().then((value) {});
|
|
},
|
|
),
|
|
SizedBox(height:20),
|
|
],
|
|
),
|
|
body: controller == null
|
|
? Center(child: Text('Need to access camera to capture selfie'))
|
|
: CameraPreview(controller!),
|
|
);
|
|
}
|
|
} |