TIF_E41210598/resources/views/deteksi/upload.blade.php

290 lines
8.2 KiB
PHP

@extends('layouts.app')
@section('title', 'Deteksi Video')
@section('contents')
<section class="about section" id="about">
<h2 class="section-title text-center">Deteksi Pose Jurus 1 Tangan Kosong IPSI</h2>
<div class="about__container">
<div style="display: flex; gap: 20px; flex-wrap: wrap; justify-content: center;">
<div class="video-container">
<video id="video-upload" controls></video>
<canvas id="output-canvas"></canvas>
</div>
<div>
<div class="text-center mt-4 flex gap-3 justify-center">
<label class="cursor-pointer inline-block px-6 py-3 bg-blue-600 text-white rounded-xl shadow hover:bg-blue-700 transition">
Upload Video
<input type="file" accept="video/*" onchange="handleVideoUpload(event)" class="hidden" id="video-input">
</label>
<button onclick="resetVideo()" class="inline-block px-6 py-3 bg-red-600 text-white rounded-xl shadow hover:bg-red-700 transition">
Reset
</button>
</div>
<div id="prediction-result" class="prediction-box mt-4">
<p>Prediksi: <span id="pose-label">-</span></p>
<p>Probability: <span id="pose-accuracy">-</span></p>
<ul id="all-probabilities" class="text-left mt-2"></ul>
</div>
</div>
</div>
</div>
</section>
@endsection
@section('script')
<script src="https://code.jquery.com/jquery-3.6.0.min.js" defer></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/camera_utils"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/pose"></script>
<script>
const poseClasses = [
'A1_benar', 'A1_salah','A2_benar', 'A2_salah', 'A3_benar', 'A3_salah',
'A4_benar', 'A4_salah', 'A5_benar', 'A5_salah', 'A6_benar', 'A6_salah',
'A7_benar', 'A7_salah'
];
const videoElement = document.getElementById('video-upload');
const canvasElement = document.getElementById('output-canvas');
const ctx = canvasElement.getContext('2d');
const poseLabelElement = document.getElementById('pose-label');
const poseAccuracyElement = document.getElementById('pose-accuracy');
const fileInput = document.getElementById('video-input');
const probList = document.getElementById('all-probabilities');
let model = null;
let poseDetector = null;
let animationFrameId = null;
async function loadTFModel() {
try {
model = await tf.loadLayersModel('/models/model.json');
console.log('✅ Model loaded');
} catch (error) {
alert('Model gagal dimuat');
}
}
function setupPoseDetection() {
poseDetector = new Pose({
locateFile: (file) => `https://cdn.jsdelivr.net/npm/@mediapipe/pose/${file}`
});
poseDetector.setOptions({
modelComplexity: 1,
smoothLandmarks: true,
enableSegmentation: false,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
});
poseDetector.onResults(onPoseResults);
}
function onPoseResults(results) {
if (!results.poseLandmarks) return;
ctx.clearRect(0, 0, canvasElement.width, canvasElement.height);
ctx.drawImage(results.image, 0, 0, canvasElement.width, canvasElement.height);
drawLandmarks(results.poseLandmarks);
const landmarks = results.poseLandmarks.map(landmark => [
landmark.x * canvasElement.height,
landmark.y * canvasElement.height,
landmark.z * canvasElement.width
]).flat();
predictPose(landmarks);
}
function drawLandmarks(landmarks) {
drawConnections(landmarks);
ctx.fillStyle = '#FF0000';
landmarks.forEach(landmark => {
ctx.beginPath();
ctx.arc(landmark.x * canvasElement.width, landmark.y * canvasElement.height, 5, 0, 2 * Math.PI);
ctx.fill();
});
}
function drawConnections(landmarks) {
const connections = [
[0, 1], [1, 2], [2, 3], [3, 7], [0, 4], [4, 5], [5, 6], [6, 8],
[11, 13], [13, 15], [15, 17], [15, 19], [15, 21],
[12, 14], [14, 16], [16, 18], [16, 20], [16, 22],
[11, 12], [11, 23], [12, 24], [23, 24],
[23, 25], [25, 27], [27, 29], [27, 31],
[24, 26], [26, 28], [28, 30], [28, 32]
];
ctx.strokeStyle = '#00FF00';
ctx.lineWidth = 2;
connections.forEach(([i, j]) => {
if (landmarks[i] && landmarks[j]) {
ctx.beginPath();
ctx.moveTo(landmarks[i].x * canvasElement.width, landmarks[i].y * canvasElement.height);
ctx.lineTo(landmarks[j].x * canvasElement.width, landmarks[j].y * canvasElement.height);
ctx.stroke();
}
});
}
async function predictPose(landmarks) {
if (!model) return;
const inputTensor = tf.tensor2d([landmarks]);
const prediction = model.predict(inputTensor);
const predictionData = await prediction.data();
const [predictedClass] = await prediction.argMax(1).data();
const confidence = predictionData[predictedClass];
poseLabelElement.textContent = poseClasses[predictedClass] || 'Unknown';
poseAccuracyElement.textContent = `${confidence.toFixed(4)}`;
// Tampilkan semua probabilitas
probList.innerHTML = '';
predictionData.forEach((prob, idx) => {
const li = document.createElement('li');
li.textContent = `${poseClasses[idx] || 'Label-' + idx}: ${prob.toFixed(4)}`;
probList.appendChild(li);
});
inputTensor.dispose();
prediction.dispose();
}
function handleVideoUpload(event) {
const file = event.target.files[0];
if (!file) return;
const url = URL.createObjectURL(file);
videoElement.src = url;
videoElement.load();
videoElement.onloadedmetadata = () => {
canvasElement.width = videoElement.videoWidth;
canvasElement.height = videoElement.videoHeight;
};
}
function analyzeVideoFrame() {
if (!poseDetector || !videoElement) return;
const process = async () => {
if (videoElement.paused || videoElement.ended) {
cancelAnimationFrame(animationFrameId);
return;
}
await poseDetector.send({ image: videoElement });
animationFrameId = requestAnimationFrame(process);
};
animationFrameId = requestAnimationFrame(process);
}
function resetVideo() {
videoElement.pause();
videoElement.currentTime = 0;
videoElement.removeAttribute('src');
videoElement.load();
ctx.clearRect(0, 0, canvasElement.width, canvasElement.height);
poseLabelElement.textContent = '-';
poseAccuracyElement.textContent = '-';
probList.innerHTML = '';
cancelAnimationFrame(animationFrameId);
fileInput.value = '';
}
videoElement.addEventListener('play', () => {
analyzeVideoFrame();
});
videoElement.addEventListener('pause', () => {
cancelAnimationFrame(animationFrameId);
});
videoElement.addEventListener('ended', () => {
cancelAnimationFrame(animationFrameId);
});
async function initializeApp() {
await loadTFModel();
setupPoseDetection();
}
document.addEventListener('DOMContentLoaded', initializeApp);
</script>
<style>
.section-title {
text-align: center;
margin-bottom: 10px;
font-weight: bold;
font-size: 1.8rem;
}
.video-container {
position: relative;
width: 640px;
height: 480px;
border: 2px solid #ddd;
border-radius: 8px;
overflow: hidden;
background-color: black;
}
video, canvas {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
object-fit: contain;
}
.prediction-box {
margin-bottom: 20px;
padding: 15px;
background: #f8f9fa;
border-radius: 8px;
text-align: center;
font-size: 1.2rem;
box-shadow: 0 2px 5px rgba(0,0,0,0.1);
min-width: 300px;
}
#pose-label {
font-weight: bold;
color: #2c3e50;
}
#pose-accuracy {
font-weight: bold;
color: #27ae60;
}
#all-probabilities {
max-height: 200px;
overflow-y: auto;
padding-left: 0;
list-style: none;
margin-top: 10px;
font-size: 0.95rem;
text-align: left;
}
#all-probabilities li {
margin-bottom: 4px;
}
canvas {
pointer-events: none;
}
</style>
@endsection