TIF_E41210598/resources/views/deteksi/realtime.blade.php

203 lines
5.6 KiB
PHP

@extends('layouts.app')
@section('title', 'Deteksi Real-Time')
@section('contents')
<section class="about section" id="about">
<h2 class="section-title text-center">Deteksi Pose Jurus 1 Tangan Kosong IPSI</h2>
<div class="about__container" style="display: flex; flex-wrap: wrap; justify-content: center; gap: 20px;">
<div class="video-container" style="position: relative;">
<video id="webcam" width="640" height="480" autoplay playsinline muted></video>
<canvas id="output-canvas" width="640" height="480"
style="position: absolute; top: 0; left: 0;"></canvas>
</div>
<div id="prediction-result" class="prediction-box">
<p>Prediksi: <span id="pose-label">-</span></p>
<p>Probability: <span id="pose-accuracy">-</span></p>
</div>
</div>
</section>
@endsection
@section('script')
<script src="https://code.jquery.com/jquery-3.6.0.min.js" defer></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/pose"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/camera_utils"></script>
<script>
const poseClasses = [
'A1_benar', 'A1_salah','A2_benar', 'A2_salah', 'A3_benar', 'A3_salah',
'A4_benar', 'A4_salah', 'A5_benar', 'A5_salah', 'A6_benar', 'A6_salah',
'A7_benar', 'A7_salah'
];
const videoElement = document.getElementById('webcam');
const canvasElement = document.getElementById('output-canvas');
const ctx = canvasElement.getContext('2d');
const poseLabelElement = document.getElementById('pose-label');
const poseAccuracyElement = document.getElementById('pose-accuracy');
let model = null;
let poseDetector = null;
let camera = null;
async function loadModel() {
try {
model = await tf.loadLayersModel('/models/model.json');
console.log('✅ Model loaded');
} catch (err) {
console.error('❌ Gagal load model:', err);
alert('Model gagal dimuat');
}
}
function setupPose() {
poseDetector = new Pose({
locateFile: (file) => `https://cdn.jsdelivr.net/npm/@mediapipe/pose/${file}`
});
poseDetector.setOptions({
modelComplexity: 1,
smoothLandmarks: true,
enableSegmentation: false,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
});
poseDetector.onResults(onPoseResults);
}
function onPoseResults(results) {
if (!results.poseLandmarks) return;
ctx.clearRect(0, 0, canvasElement.width, canvasElement.height);
ctx.drawImage(results.image, 0, 0, canvasElement.width, canvasElement.height);
drawLandmarks(results.poseLandmarks);
const landmarks = results.poseLandmarks.map(landmark => [
landmark.x * canvasElement.height,
landmark.y * canvasElement.height,
landmark.z * canvasElement.width
]).flat();
predictPose(landmarks);
}
function drawLandmarks(landmarks) {
drawConnections(landmarks);
ctx.fillStyle = '#FF0000';
landmarks.forEach(landmark => {
ctx.beginPath();
ctx.arc(landmark.x * canvasElement.width, landmark.y * canvasElement.height, 4, 0, 2 * Math.PI);
ctx.fill();
});
}
function drawConnections(landmarks) {
const connections = [
[0, 1], [1, 2], [2, 3], [3, 7], [0, 4], [4, 5], [5, 6], [6, 8],
[11, 13], [13, 15], [15, 17], [15, 19], [15, 21],
[12, 14], [14, 16], [16, 18], [16, 20], [16, 22],
[11, 12], [11, 23], [12, 24], [23, 24],
[23, 25], [25, 27], [27, 29], [27, 31],
[24, 26], [26, 28], [28, 30], [28, 32]
];
ctx.strokeStyle = '#00FF00';
ctx.lineWidth = 2;
connections.forEach(([i, j]) => {
if (landmarks[i] && landmarks[j]) {
ctx.beginPath();
ctx.moveTo(landmarks[i].x * canvasElement.width, landmarks[i].y * canvasElement.height);
ctx.lineTo(landmarks[j].x * canvasElement.width, landmarks[j].y * canvasElement.height);
ctx.stroke();
}
});
}
async function predictPose(landmarks) {
if (!model) return;
const inputTensor = tf.tensor2d([landmarks]);
const prediction = model.predict(inputTensor);
const [predictedClass] = await prediction.argMax(1).data();
const confidence = await prediction.max(1).data();
poseLabelElement.textContent = poseClasses[predictedClass] || 'Unknown';
poseAccuracyElement.textContent = `${(confidence[0]).toFixed(4)}`;
inputTensor.dispose();
prediction.dispose();
}
function startCamera() {
camera = new Camera(videoElement, {
onFrame: async () => {
await poseDetector.send({ image: videoElement });
},
width: 640,
height: 480
});
camera.start();
}
async function init() {
await loadModel();
setupPose();
startCamera();
}
document.addEventListener('DOMContentLoaded', init);
</script>
<style>
.section-title {
text-align: center;
font-size: 1.8rem;
margin-bottom: 20px;
color: #001f3f; /* biru dongker */
}
.about__container {
display: flex;
flex-wrap: wrap;
justify-content: center;
gap: 20px;
}
.video-container {
position: relative;
width: 640px;
height: 480px;
border: 2px solid #ccc;
border-radius: 10px;
overflow: hidden;
}
.prediction-box {
flex-shrink: 0;
width: 280px;
height: fit-content;
padding: 20px;
background: #f4f4f4;
text-align: center;
border-radius: 10px;
font-size: 1.2rem;
box-shadow: 0 2px 6px rgba(0,0,0,0.1);
}
#pose-label {
font-weight: bold;
color: #2c3e50;
}
#pose-accuracy {
font-weight: bold;
color: #27ae60;
}
</style>
@endsection