Upload files to "/"
This commit is contained in:
commit
57d6da7480
|
@ -0,0 +1,280 @@
|
|||
# import cv2
|
||||
# from keras.models import Sequential
|
||||
# from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Flatten
|
||||
# from keras.optimizers import Adam
|
||||
# from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
|
||||
# from keras.preprocessing.image import ImageDataGenerator
|
||||
# import matplotlib.pyplot as plt
|
||||
# import numpy as np
|
||||
# from keras.regularizers import l2
|
||||
|
||||
# train_data_gen = ImageDataGenerator(rescale=1./255)
|
||||
# validation_data_gen = ImageDataGenerator(rescale=1./255)
|
||||
|
||||
# datagen = ImageDataGenerator(
|
||||
# rotation_range=20,
|
||||
# width_shift_range=0.2,
|
||||
# height_shift_range=0.2,
|
||||
# horizontal_flip=True,
|
||||
# shear_range=0.2,
|
||||
# rescale=1./255,
|
||||
# fill_mode='nearest')
|
||||
|
||||
# train_generator = train_data_gen.flow_from_directory(
|
||||
# 'data/training',
|
||||
# target_size=(48, 48),
|
||||
# batch_size=32,
|
||||
# color_mode="grayscale",
|
||||
# class_mode='categorical',
|
||||
# shuffle=True
|
||||
# )
|
||||
|
||||
# validation_generator = validation_data_gen.flow_from_directory(
|
||||
# 'data/test',
|
||||
# target_size=(48, 48),
|
||||
# batch_size=32,
|
||||
# color_mode="grayscale",
|
||||
# class_mode='categorical',
|
||||
# shuffle=False
|
||||
# )
|
||||
|
||||
# emotion_model = Sequential()
|
||||
|
||||
# #layer 1
|
||||
# emotion_model.add(Conv2D(32, kernel_size=(3, 3),padding='same',activation='relu',input_shape=(48, 48, 1)))
|
||||
# emotion_model.add(Conv2D(64, kernel_size=(3, 3),padding='same', activation='relu'))
|
||||
# emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
# emotion_model.add(Dropout(0.5))
|
||||
|
||||
# #layer 2
|
||||
# emotion_model.add(Conv2D(128, kernel_size=(3, 3),padding='same', activation='relu'))
|
||||
# emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
# emotion_model.add(Dropout(0.5))
|
||||
|
||||
# #layer 3
|
||||
# emotion_model.add(Conv2D(128, kernel_size=(3, 3),padding='same', activation='relu'))
|
||||
# emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
# emotion_model.add(Dropout(0.5))
|
||||
|
||||
# emotion_model.add(Flatten())
|
||||
# emotion_model.add(Dense(1024, activation='relu'))
|
||||
# emotion_model.add(Dropout(0.5))
|
||||
# emotion_model.add(Dense(2, activation='softmax'))
|
||||
|
||||
# cv2.ocl.setUseOpenCL(False)
|
||||
|
||||
# emotion_model.compile(
|
||||
# loss='categorical_crossentropy',
|
||||
# optimizer=Adam(learning_rate=0.0001, decay=1e-6),
|
||||
# metrics=['accuracy'])
|
||||
|
||||
# checkpoint = ModelCheckpoint('model_weights.h5', monitor='accuracy', verbose=1, save_best_only=True)
|
||||
|
||||
# ealy_stopping = EarlyStopping(monitor='accuracy',
|
||||
# min_delta=0,
|
||||
# patience=3,
|
||||
# verbose=1,
|
||||
# restore_best_weights=True)
|
||||
|
||||
# reduce_learningrate = ReduceLROnPlateau(monitor='accuracy',
|
||||
# factor=0.2,
|
||||
# patience=3,
|
||||
# verbose=1,
|
||||
# min_delta=0.0001)
|
||||
# callbacks_list = [checkpoint]
|
||||
|
||||
# emotion_model_info = emotion_model.fit(
|
||||
# train_generator,
|
||||
# steps_per_epoch=480 // 32,
|
||||
# epochs=60,
|
||||
# validation_data=validation_generator,
|
||||
# validation_steps=120 // 32,
|
||||
# callbacks=callbacks_list)
|
||||
|
||||
# model_json = emotion_model.to_json()
|
||||
# with open("emotion_model.json", "w") as json_file:
|
||||
# json_file.write(model_json)
|
||||
|
||||
# emotion_model.save_weights('emotion_model.h5')
|
||||
|
||||
# plt.figure(figsize=(20,10))
|
||||
# plt.subplot(1, 2, 1)
|
||||
# plt.suptitle('Optimizer : Adam', fontsize=10)
|
||||
# plt.ylabel('loss', fontsize=16)
|
||||
# plt.plot(emotion_model_info.history['loss'], label='Training Loss')
|
||||
# plt.plot(emotion_model_info.history['val_loss'], label='Validation Loss')
|
||||
# plt.legend(loc='upper right')
|
||||
|
||||
# plt.subplot(1, 2, 2)
|
||||
# plt.ylabel('Accuracy', fontsize=16)
|
||||
# plt.plot(emotion_model_info.history['accuracy'], label='Training Accuracy')
|
||||
# plt.plot(emotion_model_info.history['val_accuracy'], label='Validation Accuracy')
|
||||
# plt.legend(loc='lower right')
|
||||
# plt.savefig('Pakai6.png')
|
||||
# plt.show()
|
||||
|
||||
|
||||
############## Confusion Matrix ###############
|
||||
import cv2
|
||||
from keras.models import Sequential
|
||||
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Flatten
|
||||
from keras.optimizers import Adam
|
||||
from keras.preprocessing.image import ImageDataGenerator
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from keras.regularizers import l2
|
||||
from sklearn.metrics import confusion_matrix, classification_report, roc_auc_score, f1_score, roc_curve, auc
|
||||
import seaborn as sns
|
||||
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
|
||||
|
||||
train_data_gen = ImageDataGenerator(rescale=1./255)
|
||||
validation_data_gen = ImageDataGenerator(rescale=1./255)
|
||||
|
||||
datagen = ImageDataGenerator(
|
||||
rotation_range=20,
|
||||
width_shift_range=0.2,
|
||||
height_shift_range=0.2,
|
||||
horizontal_flip=True,
|
||||
shear_range=0.2,
|
||||
rescale=1./255,
|
||||
fill_mode='nearest')
|
||||
|
||||
train_generator = train_data_gen.flow_from_directory(
|
||||
'data/training',
|
||||
target_size=(48, 48),
|
||||
batch_size=32,
|
||||
color_mode="grayscale",
|
||||
class_mode='categorical',
|
||||
shuffle=True
|
||||
)
|
||||
|
||||
validation_generator = validation_data_gen.flow_from_directory(
|
||||
'data/test',
|
||||
target_size=(48, 48),
|
||||
batch_size=32,
|
||||
color_mode="grayscale",
|
||||
class_mode='categorical',
|
||||
shuffle=False
|
||||
)
|
||||
|
||||
emotion_model = Sequential()
|
||||
|
||||
#layer 1
|
||||
emotion_model.add(Conv2D(32, kernel_size=(3, 3),padding='same',activation='relu',input_shape=(48, 48, 1)))
|
||||
emotion_model.add(Conv2D(64, kernel_size=(3, 3),padding='same', activation='relu'))
|
||||
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
emotion_model.add(Dropout(0.2))
|
||||
|
||||
#layer 2
|
||||
emotion_model.add(Conv2D(128, kernel_size=(3, 3),padding='same', activation='relu'))
|
||||
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
emotion_model.add(Dropout(0.2))
|
||||
|
||||
#layer 3
|
||||
emotion_model.add(Conv2D(128, kernel_size=(3, 3),padding='same', activation='relu'))
|
||||
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
emotion_model.add(Dropout(0.2))
|
||||
|
||||
emotion_model.add(Flatten())
|
||||
emotion_model.add(Dense(1024, activation='relu'))
|
||||
emotion_model.add(Dropout(0.2))
|
||||
emotion_model.add(Dense(2, activation='softmax'))
|
||||
|
||||
cv2.ocl.setUseOpenCL(False)
|
||||
|
||||
emotion_model.compile(
|
||||
loss='categorical_crossentropy',
|
||||
optimizer=Adam(learning_rate=0.0001, decay=1e-6),
|
||||
metrics=['accuracy'])
|
||||
|
||||
checkpoint = ModelCheckpoint('model_weights.h5', monitor='accuracy', verbose=1, save_best_only=True)
|
||||
|
||||
ealy_stopping = EarlyStopping(monitor='accuracy',
|
||||
min_delta=0,
|
||||
patience=3,
|
||||
verbose=1,
|
||||
restore_best_weights=True)
|
||||
|
||||
reduce_learningrate = ReduceLROnPlateau(monitor='accuracy',
|
||||
factor=0.2,
|
||||
patience=3,
|
||||
verbose=1,
|
||||
min_delta=0.0001)
|
||||
callbacks_list = [checkpoint]
|
||||
|
||||
emotion_model_info = emotion_model.fit(
|
||||
train_generator,
|
||||
steps_per_epoch=800 // 32,
|
||||
epochs=60,
|
||||
validation_data=validation_generator,
|
||||
validation_steps=200 // 32,
|
||||
callbacks=callbacks_list)
|
||||
|
||||
model_json = emotion_model.to_json()
|
||||
with open("emotion_model.json", "w") as json_file:
|
||||
json_file.write(model_json)
|
||||
|
||||
emotion_model.save_weights('emotion_model.h5')
|
||||
|
||||
# Make predictions on the validation set
|
||||
validation_steps = len(validation_generator)
|
||||
validation_result = emotion_model.evaluate(validation_generator, steps=validation_steps)
|
||||
|
||||
# Get true labels and predicted labels
|
||||
validation_generator.reset()
|
||||
y_true = validation_generator.classes
|
||||
y_pred = emotion_model.predict(validation_generator, steps=validation_steps, verbose=1)
|
||||
y_pred_labels = np.argmax(y_pred, axis=1)
|
||||
|
||||
# Calculate and print confusion matrix
|
||||
conf_matrix = confusion_matrix(y_true, y_pred_labels)
|
||||
plt.figure(figsize=(8, 6))
|
||||
sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=['Class 0', 'Class 1'], yticklabels=['Class 0', 'Class 1'])
|
||||
plt.xlabel('Predicted')
|
||||
plt.ylabel('True')
|
||||
plt.title('Confusion Matrix')
|
||||
plt.savefig('Confusion Matrix 4.png')
|
||||
plt.show()
|
||||
|
||||
# Calculate and print classification report
|
||||
class_report = classification_report(y_true, y_pred_labels, target_names=['Class 0', 'Class 1'])
|
||||
print(class_report)
|
||||
|
||||
# Calculate and print AUC
|
||||
roc_auc = roc_auc_score(y_true, y_pred_labels)
|
||||
print(f'AUC: {roc_auc}')
|
||||
|
||||
# Calculate and print F1 score
|
||||
f1 = f1_score(y_true, y_pred_labels)
|
||||
print(f'F1 Score: {f1}')
|
||||
|
||||
# Plot ROC curve
|
||||
fpr, tpr, _ = roc_curve(y_true, y_pred_labels)
|
||||
roc_auc = auc(fpr, tpr)
|
||||
|
||||
plt.figure(figsize=(8, 6))
|
||||
plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = {:.2f})'.format(roc_auc))
|
||||
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
||||
plt.xlabel('False Positive Rate')
|
||||
plt.ylabel('True Positive Rate')
|
||||
plt.title('Receiver Operating Characteristic Curve')
|
||||
plt.legend(loc='lower right')
|
||||
plt.savefig('ROC 4.png')
|
||||
plt.show()
|
||||
|
||||
# Plotting accuracy
|
||||
plt.figure(figsize=(20,10))
|
||||
plt.subplot(1, 2, 1)
|
||||
plt.suptitle('Optimizer : Adam', fontsize=10)
|
||||
plt.ylabel('loss', fontsize=16)
|
||||
plt.plot(emotion_model_info.history['loss'], label='Training Loss')
|
||||
plt.plot(emotion_model_info.history['val_loss'], label='Validation Loss')
|
||||
plt.legend(loc='upper right')
|
||||
|
||||
plt.subplot(1, 2, 2)
|
||||
plt.ylabel('Accuracy', fontsize=16)
|
||||
plt.plot(emotion_model_info.history['accuracy'], label='Training Accuracy')
|
||||
plt.plot(emotion_model_info.history['val_accuracy'], label='Validation Accuracy')
|
||||
plt.legend(loc='lower right')
|
||||
plt.savefig('Accuracy 4.png')
|
||||
plt.show()
|
|
@ -0,0 +1,578 @@
|
|||
# from flask import Flask, render_template, Response, jsonify, request
|
||||
# from flask_socketio import SocketIO, emit
|
||||
# import cv2
|
||||
# import time
|
||||
# import numpy as np
|
||||
# from keras.models import model_from_json
|
||||
# # from keras.models import load_model
|
||||
# from collections import Counter
|
||||
# import json
|
||||
# import os
|
||||
# from keras import backend as K
|
||||
|
||||
# # import eventlet
|
||||
|
||||
# app = Flask(__name__)
|
||||
# socketio = SocketIO(app)
|
||||
|
||||
# json_file = open('emotion_model.json', 'r')
|
||||
# loaded_model_json = json_file.read()
|
||||
# json_file.close()
|
||||
# emotion_model = model_from_json(loaded_model_json)
|
||||
|
||||
# # Load model klasifikasi ekspresi wajah (gantilah dengan jalur yang sesuai)
|
||||
# emotion_model.load_weights("emotion_model.h5")
|
||||
|
||||
# # # Load detektor wajah (misalnya, menggunakan OpenCV Haar Cascades)
|
||||
# face_cascade = cv2.CascadeClassifier('Haarcascades/haarcascade_frontalface_default.xml')
|
||||
|
||||
# duration = 120
|
||||
# image_interval = 2
|
||||
|
||||
# expression_results = []
|
||||
# puas_points = 0
|
||||
# tidak_puas_points = 0
|
||||
|
||||
# # Direktori untuk menyimpan gambar
|
||||
# image_dir = "expression_images"
|
||||
|
||||
# # Fungsi untuk membuat folder jika belum ada
|
||||
# def create_image_folder():
|
||||
# if not os.path.exists(image_dir):
|
||||
# os.makedirs(image_dir)
|
||||
|
||||
# # Panggil fungsi untuk membuat folder
|
||||
# create_image_folder()
|
||||
|
||||
# # Baca poin ekspresi dari file jika ada
|
||||
# try:
|
||||
# with open('points.json', 'r') as file:
|
||||
# data = json.load(file)
|
||||
# puas_points = data.get('puas_points', 0)
|
||||
# tidak_puas_points = data.get('tidak_puas_points', 0)
|
||||
# except FileNotFoundError:
|
||||
# pass
|
||||
|
||||
# video_running = False
|
||||
|
||||
# def save_expression_images(most_common_expression, face_images):
|
||||
# # Membuat nama folder untuk hasil ekspresi terbanyak
|
||||
# expression_dir = os.path.join(image_dir, most_common_expression)
|
||||
|
||||
# # Mencari nama folder yang unik jika sudah ada
|
||||
# unique_expression_dir = expression_dir
|
||||
# index = 1
|
||||
# while os.path.exists(unique_expression_dir):
|
||||
# unique_expression_dir = f"{expression_dir}_{index}"
|
||||
# index += 1
|
||||
|
||||
# # Membuat folder untuk menyimpan hasil ekspresi
|
||||
# os.makedirs(unique_expression_dir)
|
||||
|
||||
# # Membuat nama unik untuk setiap gambar
|
||||
# timestamp = int(time.time() * 1000)
|
||||
|
||||
# for i, face_image in enumerate(face_images):
|
||||
# image_path = os.path.join(unique_expression_dir, f"{timestamp}_{i}.jpg")
|
||||
# cv2.imwrite(image_path, face_image)
|
||||
|
||||
# # Fungsi untuk mengambil video dari kamera
|
||||
# def gen_frames():
|
||||
# global video_running
|
||||
# cap = cv2.VideoCapture(1) # Nomor 0 mengacu pada kamera default (biasanya kamera built-in)
|
||||
|
||||
# start_time = time.time()
|
||||
# current_time = time.time()
|
||||
# image_count = 0 # Menyimpan hasil ekspresi pada setiap iterasi
|
||||
# face_images = [] # Menyimpan gambar wajah yang terdeteksi
|
||||
|
||||
# while (current_time - start_time) < duration:
|
||||
# success, frame = cap.read() # Membaca frame dari kamera
|
||||
# if not success:
|
||||
# break
|
||||
# else:
|
||||
# current_time = time.time()
|
||||
# image_count += 1
|
||||
|
||||
# # Konversi frame ke citra grayscale
|
||||
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# # Konversi citra grayscale ke citra RGB (ini tidak perlu)
|
||||
# # face_roi_rgb = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
|
||||
|
||||
# # Deteksi wajah
|
||||
# faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(48, 48))
|
||||
|
||||
# # Untuk setiap wajah yang terdeteksi
|
||||
# for (x, y, w, h) in faces:
|
||||
# # Potong kotak area wajah dari citra
|
||||
# face_roi = gray[y:y+h, x:x+w]
|
||||
# #ini tidak perlu
|
||||
# # face_roi = face_roi_rgb[y:y+h, x:x+w]
|
||||
# face_roi = cv2.resize(face_roi, (48, 48)) # Rescale ke ukuran yang sesuai dengan model klasifikasi
|
||||
|
||||
# # Normalisasi piksel
|
||||
# face_roi = face_roi / 255.0
|
||||
|
||||
# # Prediksi ekspresi
|
||||
# expression_probs = emotion_model.predict(np.expand_dims(face_roi, axis=0))
|
||||
# predicted_expression_label = np.argmax(expression_probs)
|
||||
|
||||
# # Menentukan label ekspresi yang sesuai
|
||||
# expression_labels = ["Puas", "TidakPuas"]
|
||||
# predicted_expression = expression_labels[predicted_expression_label]
|
||||
# expression_results.append(predicted_expression) # Menyimpan hasil ekspresi
|
||||
|
||||
# # Menyimpan gambar wajah yang terdeteksi
|
||||
# face_images.append(frame[y:y+h, x:x+w])
|
||||
|
||||
# # Hitung akurasi
|
||||
# accuracy = expression_probs[0][predicted_expression_label] * 100
|
||||
|
||||
# # Menampilkan hasil prediksi pada layar
|
||||
# text = f'Ekspresi: {predicted_expression} ({accuracy:.2f}%)'
|
||||
# cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
|
||||
# cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
||||
|
||||
# ret, buffer = cv2.imencode('.jpg', frame)
|
||||
# frame = buffer.tobytes()
|
||||
|
||||
# yield (b'--frame\r\n'
|
||||
# b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # Mengirim frame sebagai respons HTTP
|
||||
# # Setelah selesai mengambil gambar, simpan gambar-gambar ke dalam folder
|
||||
# if face_images:
|
||||
# most_common_expression = Counter(expression_results).most_common(1)[0][0]
|
||||
# save_expression_images(most_common_expression, face_images)
|
||||
# # Setelah selesai mengambil gambar, hitung ekspresi terbanyak
|
||||
# expression_count = Counter(expression_results)
|
||||
# most_common_expression = expression_count.most_common(1)
|
||||
# print (f'Ekspresi Terbanyak: {most_common_expression[0][0]}')
|
||||
|
||||
# global puas_points, tidak_puas_points
|
||||
# if most_common_expression:
|
||||
# most_common_expression = most_common_expression[0][0]
|
||||
# if most_common_expression == "Puas":
|
||||
# puas_points += 1
|
||||
# elif most_common_expression == "TidakPuas":
|
||||
# tidak_puas_points += 1
|
||||
# # Simpan poin ke file setiap kali ada perubahan
|
||||
# save_points_to_file()
|
||||
|
||||
# # Mengirim hasil ekspresi terbanyak dan poin ke klien
|
||||
# socketio.emit('update_expression', {'most_common_expression': predicted_expression,
|
||||
# 'puas_points': puas_points, 'tidak_puas_points': tidak_puas_points})
|
||||
|
||||
# def save_points_to_file():
|
||||
# data = {'puas_points': puas_points, 'tidak_puas_points': tidak_puas_points}
|
||||
# with open('points.json', 'w') as file:
|
||||
# json.dump(data, file)
|
||||
|
||||
# @app.route('/')
|
||||
# def index():
|
||||
# return render_template('tidak_puas.html')
|
||||
# # return render_template('tentang.html', video_running=video_running, puas_points=puas_points, tidak_puas_points=tidak_puas_points)
|
||||
|
||||
# @app.route('/tentang')
|
||||
# def tentang():
|
||||
# return render_template('tentang.html', video_running=video_running, puas_points=puas_points, tidak_puas_points=tidak_puas_points)
|
||||
|
||||
# # @app.route('/start_system', methods=['POST'])
|
||||
# # def start_system():
|
||||
# # print('System started manually.')
|
||||
# # # Tambahkan logika untuk memulai sistem di sini
|
||||
# # return jsonify({'status': 'System started'})
|
||||
|
||||
# # # Rute untuk menghentikan sistem
|
||||
# # @app.route('/stop_system', methods=['POST'])
|
||||
# # def stop_system():
|
||||
# # print('System stopped manually.')
|
||||
# # # Tambahkan logika untuk menghentikan sistem di sini
|
||||
# # return jsonify({'status': 'System stopped'})
|
||||
|
||||
# @socketio.on('get_expression')
|
||||
# def get_expression():
|
||||
# expression_count = Counter(expression_results)
|
||||
# most_common_expression = expression_count.most_common(1)
|
||||
|
||||
# if most_common_expression:
|
||||
# most_common_expression = most_common_expression[0][0]
|
||||
# else:
|
||||
# most_common_expression = "Tidak ada hasil ekspresi yang terdeteksi"
|
||||
# print(f"Most Common Expression: {most_common_expression}")
|
||||
# socketio.emit('update_expression', {'most_common_expression': most_common_expression, 'puas_points': puas_points, 'tidak_puas_points': tidak_puas_points})
|
||||
|
||||
# @app.route('/get_points')
|
||||
# def get_points():
|
||||
# return jsonify({'puas': puas_points, 'tidak_puas': tidak_puas_points})
|
||||
|
||||
|
||||
# @app.route('/video_feed')
|
||||
# def video_feed():
|
||||
# return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
|
||||
# # return Response(mimetype='multipart/x-mixed-replace; boundary=frame')
|
||||
|
||||
# # if __name__ == '__main__':
|
||||
# # app.run(debug=True)
|
||||
|
||||
# if __name__ == '__main__':
|
||||
# socketio.run(app, debug=True)
|
||||
|
||||
|
||||
|
||||
# ================= Sidang =========================
|
||||
# from flask import Flask, render_template, Response, jsonify, request
|
||||
# from flask_socketio import SocketIO, emit
|
||||
# import cv2
|
||||
# import time
|
||||
# import numpy as np
|
||||
# from keras.models import model_from_json
|
||||
# from collections import Counter
|
||||
# import json
|
||||
# import os
|
||||
# from keras import backend as K
|
||||
|
||||
# app = Flask(__name__)
|
||||
# socketio = SocketIO(app)
|
||||
|
||||
# json_file = open('emotion_model.json', 'r')
|
||||
# loaded_model_json = json_file.read()
|
||||
# json_file.close()
|
||||
# emotion_model = model_from_json(loaded_model_json)
|
||||
|
||||
# emotion_model.load_weights("emotion_model.h5")
|
||||
|
||||
# face_cascade = cv2.CascadeClassifier('Haarcascades/haarcascade_frontalface_default.xml')
|
||||
|
||||
# duration = 30
|
||||
# image_interval = 1
|
||||
|
||||
# expression_results = []
|
||||
# puas_points = 0
|
||||
# tidak_puas_points = 0
|
||||
|
||||
# image_dir = "expression_images"
|
||||
|
||||
# def create_image_folder():
|
||||
# if not os.path.exists(image_dir):
|
||||
# os.makedirs(image_dir)
|
||||
|
||||
# create_image_folder()
|
||||
|
||||
# try:
|
||||
# with open('points.json', 'r') as file:
|
||||
# data = json.load(file)
|
||||
# puas_points = data.get('puas_points', 0)
|
||||
# tidak_puas_points = data.get('tidak_puas_points', 0)
|
||||
# except FileNotFoundError:
|
||||
# pass
|
||||
|
||||
# video_running = False
|
||||
|
||||
# def save_expression_images(most_common_expression, face_images):
|
||||
# expression_dir = os.path.join(image_dir, most_common_expression)
|
||||
# unique_expression_dir = expression_dir
|
||||
# index = 1
|
||||
# while os.path.exists(unique_expression_dir):
|
||||
# unique_expression_dir = f"{expression_dir}_{index}"
|
||||
# index += 1
|
||||
|
||||
# os.makedirs(unique_expression_dir, exist_ok=True)
|
||||
|
||||
# timestamp = int(time.time() * 1000)
|
||||
|
||||
# for i, face_image in enumerate(face_images):
|
||||
# predicted_expression = expression_results[i]
|
||||
# image_path = os.path.join(unique_expression_dir, f"{predicted_expression}_{timestamp}_{i}.jpg")
|
||||
# cv2.imwrite(image_path, face_image)
|
||||
|
||||
# print(f"Predicted Expression: {predicted_expression}, Most Common Expression: {most_common_expression}")
|
||||
|
||||
# def gen_frames():
|
||||
# global video_running
|
||||
# global expression_results
|
||||
# cap = cv2.VideoCapture(1)
|
||||
# expression_results = []
|
||||
# start_time = time.time()
|
||||
# current_time = time.time()
|
||||
# image_count = 0
|
||||
# face_images = []
|
||||
|
||||
# while (current_time - start_time) < duration:
|
||||
# success, frame = cap.read()
|
||||
# if not success:
|
||||
# break
|
||||
# else:
|
||||
# current_time = time.time()
|
||||
# if (current_time - start_time) >= (image_count * image_interval):
|
||||
# image_count += 1
|
||||
|
||||
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(24, 24))
|
||||
|
||||
# for (x, y, w, h) in faces:
|
||||
# face_roi = gray[y:y+h, x:x+w]
|
||||
# face_roi = cv2.resize(face_roi, (48, 48))
|
||||
|
||||
# face_images.append(face_roi)
|
||||
|
||||
# face_roi = face_roi / 255.0
|
||||
|
||||
# # Prediksi ekspresi
|
||||
# expression_probs = emotion_model.predict(np.expand_dims(face_roi, axis=0))
|
||||
# predicted_expression_label = np.argmax(expression_probs)
|
||||
|
||||
# expression_labels = ["Puas", "TidakPuas"]
|
||||
# predicted_expression = expression_labels[predicted_expression_label]
|
||||
# expression_results.append(predicted_expression)
|
||||
|
||||
# accuracy = expression_probs[0][predicted_expression_label] * 100
|
||||
|
||||
# text = f'Ekspresi: {predicted_expression} ({accuracy:.2f}%)'
|
||||
# cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
|
||||
# cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
||||
# print(f'Expression Results: {expression_results}')
|
||||
|
||||
# ret, buffer = cv2.imencode('.jpg', frame)
|
||||
# frame = buffer.tobytes()
|
||||
|
||||
# yield (b'--frame\r\n'
|
||||
# b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
|
||||
|
||||
# if face_images:
|
||||
# most_common_expression = Counter(expression_results).most_common(1)[0][0]
|
||||
# save_expression_images(most_common_expression, face_images)
|
||||
# expression_count = Counter(expression_results)
|
||||
# most_common_expression = expression_count.most_common(1)
|
||||
# print (f'Ekspresi Terbanyak: {most_common_expression[0][0]}')
|
||||
|
||||
# global puas_points, tidak_puas_points
|
||||
# if most_common_expression:
|
||||
# most_common_expression = most_common_expression[0][0]
|
||||
# if most_common_expression == "Puas":
|
||||
# puas_points += 1
|
||||
# elif most_common_expression == "TidakPuas":
|
||||
# tidak_puas_points += 1
|
||||
# save_points_to_file()
|
||||
|
||||
# socketio.emit('update_expression', {'most_common_expression': most_common_expression,
|
||||
# 'puas_points': puas_points, 'tidak_puas_points': tidak_puas_points})
|
||||
|
||||
# def save_points_to_file():
|
||||
# data = {'puas_points': puas_points, 'tidak_puas_points': tidak_puas_points}
|
||||
# with open('points.json', 'w') as file:
|
||||
# json.dump(data, file)
|
||||
|
||||
# @app.route('/')
|
||||
# def index():
|
||||
# return render_template('utama.html')
|
||||
# # return render_template('tentang.html', video_running=video_running, puas_points=puas_points, tidak_puas_points=tidak_puas_points)
|
||||
|
||||
# @app.route('/tentang')
|
||||
# def tentang():
|
||||
# return render_template('deteksi.html', video_running=video_running, puas_points=puas_points, tidak_puas_points=tidak_puas_points)
|
||||
|
||||
# @socketio.on('get_expression')
|
||||
# def get_expression():
|
||||
# expression_count = Counter(expression_results)
|
||||
# most_common_expression = expression_count.most_common(1)
|
||||
|
||||
# if most_common_expression:
|
||||
# most_common_expression = most_common_expression[0][0]
|
||||
# else:
|
||||
# most_common_expression = "Tidak ada hasil ekspresi yang terdeteksi"
|
||||
# print(f"Most Common Expression: {most_common_expression}")
|
||||
# socketio.emit('update_expression', {'most_common_expression': most_common_expression, 'puas_points': puas_points, 'tidak_puas_points': tidak_puas_points})
|
||||
|
||||
# @app.route('/get_points')
|
||||
# def get_points():
|
||||
# return jsonify({'puas': puas_points, 'tidak_puas': tidak_puas_points})
|
||||
|
||||
|
||||
# @app.route('/video_feed')
|
||||
# def video_feed():
|
||||
# return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
|
||||
|
||||
# if __name__ == '__main__':
|
||||
# socketio.run(app, debug=True)
|
||||
|
||||
|
||||
from flask import Flask, render_template, Response, jsonify, request
|
||||
from flask_socketio import SocketIO, emit
|
||||
import cv2
|
||||
import time
|
||||
import numpy as np
|
||||
from keras.models import model_from_json
|
||||
from collections import Counter
|
||||
import json
|
||||
import os
|
||||
from keras import backend as K
|
||||
|
||||
app = Flask(__name__)
|
||||
socketio = SocketIO(app)
|
||||
|
||||
json_file = open('emotion_model.json', 'r')
|
||||
loaded_model_json = json_file.read()
|
||||
json_file.close()
|
||||
emotion_model = model_from_json(loaded_model_json)
|
||||
|
||||
emotion_model.load_weights("emotion_model.h5")
|
||||
|
||||
face_cascade = cv2.CascadeClassifier('Haarcascades/haarcascade_frontalface_default.xml')
|
||||
|
||||
duration = 30
|
||||
image_interval = 1
|
||||
|
||||
expression_results = []
|
||||
puas_points = 0
|
||||
tidak_puas_points = 0
|
||||
|
||||
image_dir = "expression_images"
|
||||
|
||||
def create_image_folder():
|
||||
if not os.path.exists(image_dir):
|
||||
os.makedirs(image_dir)
|
||||
|
||||
create_image_folder()
|
||||
|
||||
try:
|
||||
with open('points.json', 'r') as file:
|
||||
data = json.load(file)
|
||||
puas_points = data.get('puas_points', 0)
|
||||
tidak_puas_points = data.get('tidak_puas_points', 0)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
video_running = False
|
||||
|
||||
def save_expression_images(most_common_expression, face_images):
|
||||
expression_dir = os.path.join(image_dir, most_common_expression)
|
||||
unique_expression_dir = expression_dir
|
||||
index = 1
|
||||
while os.path.exists(unique_expression_dir):
|
||||
unique_expression_dir = f"{expression_dir}_{index}"
|
||||
index += 1
|
||||
|
||||
os.makedirs(unique_expression_dir, exist_ok=True)
|
||||
|
||||
timestamp = int(time.time() * 1000)
|
||||
|
||||
for i, face_image in enumerate(face_images):
|
||||
predicted_expression = expression_results[i]
|
||||
image_path = os.path.join(unique_expression_dir, f"{predicted_expression}_{timestamp}_{i}.jpg")
|
||||
cv2.imwrite(image_path, face_image)
|
||||
|
||||
print(f"Predicted Expression: {predicted_expression}, Most Common Expression: {most_common_expression}")
|
||||
|
||||
def gen_frames():
|
||||
global video_running
|
||||
global expression_results
|
||||
cap = cv2.VideoCapture(0)
|
||||
expression_results = []
|
||||
start_time = time.time()
|
||||
current_time = time.time()
|
||||
last_face_detected_time = time.time()
|
||||
image_count = 0
|
||||
face_images = []
|
||||
|
||||
while (current_time - start_time) < duration:
|
||||
success, frame = cap.read()
|
||||
if not success:
|
||||
break
|
||||
else:
|
||||
current_time = time.time()
|
||||
|
||||
if (current_time - last_face_detected_time) >= 5:
|
||||
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(24, 24))
|
||||
|
||||
if len(faces) == 0:
|
||||
break
|
||||
else:
|
||||
last_face_detected_time = current_time
|
||||
|
||||
for (x, y, w, h) in faces:
|
||||
face_roi = gray[y:y+h, x:x+w]
|
||||
face_roi = cv2.resize(face_roi, (48, 48))
|
||||
|
||||
face_images.append(face_roi)
|
||||
|
||||
face_roi = face_roi / 255.0
|
||||
|
||||
# Prediksi ekspresi
|
||||
expression_probs = emotion_model.predict(np.expand_dims(face_roi, axis=0))
|
||||
predicted_expression_label = np.argmax(expression_probs)
|
||||
|
||||
expression_labels = ["Puas", "TidakPuas"]
|
||||
predicted_expression = expression_labels[predicted_expression_label]
|
||||
expression_results.append(predicted_expression)
|
||||
|
||||
accuracy = expression_probs[0][predicted_expression_label] * 100
|
||||
|
||||
text = f'Ekspresi: {predicted_expression} ({accuracy:.2f}%)'
|
||||
cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
|
||||
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
||||
print(f'Expression Results: {expression_results}')
|
||||
|
||||
ret, buffer = cv2.imencode('.jpg', frame)
|
||||
frame = buffer.tobytes()
|
||||
|
||||
yield (b'--frame\r\n'
|
||||
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
|
||||
|
||||
if face_images:
|
||||
most_common_expression = Counter(expression_results).most_common(1)[0][0]
|
||||
save_expression_images(most_common_expression, face_images)
|
||||
expression_count = Counter(expression_results)
|
||||
most_common_expression = expression_count.most_common(1)
|
||||
print (f'Ekspresi Terbanyak: {most_common_expression[0][0]}')
|
||||
|
||||
global puas_points, tidak_puas_points
|
||||
if most_common_expression:
|
||||
most_common_expression = most_common_expression[0][0]
|
||||
if most_common_expression == "Puas":
|
||||
puas_points += 1
|
||||
elif most_common_expression == "TidakPuas":
|
||||
tidak_puas_points += 1
|
||||
save_points_to_file()
|
||||
|
||||
socketio.emit('update_expression', {'most_common_expression': most_common_expression,
|
||||
'puas_points': puas_points, 'tidak_puas_points': tidak_puas_points})
|
||||
|
||||
def save_points_to_file():
|
||||
data = {'puas_points': puas_points, 'tidak_puas_points': tidak_puas_points}
|
||||
with open('points.json', 'w') as file:
|
||||
json.dump(data, file)
|
||||
|
||||
@app.route('/')
|
||||
def index():
|
||||
return render_template('utama.html')
|
||||
# return render_template('tentang.html', video_running=video_running, puas_points=puas_points, tidak_puas_points=tidak_puas_points)
|
||||
|
||||
@app.route('/tentang')
|
||||
def tentang():
|
||||
return render_template('deteksi.html', video_running=video_running, puas_points=puas_points, tidak_puas_points=tidak_puas_points)
|
||||
|
||||
@socketio.on('get_expression')
|
||||
def get_expression():
|
||||
expression_count = Counter(expression_results)
|
||||
most_common_expression = expression_count.most_common(1)
|
||||
|
||||
if most_common_expression:
|
||||
most_common_expression = most_common_expression[0][0]
|
||||
else:
|
||||
most_common_expression = "Tidak ada hasil ekspresi yang terdeteksi"
|
||||
print(f"Most Common Expression: {most_common_expression}")
|
||||
socketio.emit('update_expression', {'most_common_expression': most_common_expression, 'puas_points': puas_points, 'tidak_puas_points': tidak_puas_points})
|
||||
|
||||
@app.route('/get_points')
|
||||
def get_points():
|
||||
return jsonify({'puas': puas_points, 'tidak_puas': tidak_puas_points})
|
||||
|
||||
|
||||
@app.route('/video_feed')
|
||||
def video_feed():
|
||||
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
|
||||
|
||||
if __name__ == '__main__':
|
||||
socketio.run(app, debug=True)
|
|
@ -0,0 +1,249 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta
|
||||
name="viewport"
|
||||
content="width=device-width, initial-scale=1.0, shrink-to-fit=no"
|
||||
/>
|
||||
<title>{% if judul %}{{ judul }} - {% endif %}Kepuasan Pelanggan</title>
|
||||
<link
|
||||
rel="stylesheet"
|
||||
href="{{ url_for('static', filename='bootstrap/css/bootstrap.min.css') }}"
|
||||
/>
|
||||
<link rel="stylesheet" type="text/css" href="styles.css" />
|
||||
<script
|
||||
src="https://kit.fontawesome.com/f2151fe6dc.js"
|
||||
crossorigin="anonymous"
|
||||
></script>
|
||||
<script src="https://code.jquery.com/jquery-3.6.4.min.js"></script>
|
||||
<style>
|
||||
.judul {
|
||||
font-family: "Montserrat", sans-serif;
|
||||
font-weight: 700;
|
||||
}
|
||||
.navbar-container {
|
||||
height: 85px; /* Set the height of the navbar container */
|
||||
background: #3591ae;
|
||||
}
|
||||
.btn-back:hover {
|
||||
background-color: red; /* Ubah warna background saat dihover */
|
||||
}
|
||||
.badge {
|
||||
transform: translate(-50%, -50%);
|
||||
}
|
||||
|
||||
.fa-thumbs-up,
|
||||
.fa-thumbs-down {
|
||||
position: relative;
|
||||
padding-right: 7px;
|
||||
}
|
||||
|
||||
.top-0 {
|
||||
top: 0;
|
||||
}
|
||||
|
||||
.start-100 {
|
||||
left: 100%;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="navbar-container">
|
||||
<nav
|
||||
class="navbar navbar-expand-lg"
|
||||
style="background: #3591ae; height: 100%"
|
||||
>
|
||||
<div class="container-fluid d-flex justify-content-center h-100">
|
||||
<p class="h2 text-center judul" style="color: white; margin: 0">
|
||||
Kepuasan Pelanggan
|
||||
</p>
|
||||
</div>
|
||||
<div class="navbar-collapse collapse">
|
||||
<ul class="navbar-nav ml-auto">
|
||||
<li class="nav-item">
|
||||
<button
|
||||
type="button"
|
||||
class="btn btn-primary btn-back"
|
||||
onclick="backSystem()"
|
||||
>
|
||||
Kembali
|
||||
</button>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
</div>
|
||||
<div class="container-fluid">
|
||||
<div class="row">
|
||||
<div class="col-sm-6">
|
||||
<div
|
||||
class="card border-info border rounded-5 shadow"
|
||||
style="margin-bottom: 2%; margin-top: 3%; background: #3591ae"
|
||||
>
|
||||
<div
|
||||
class="card-body d-flex justify-content-between align-items-center"
|
||||
>
|
||||
<h4 class="text-white">Kamera</h4>
|
||||
<button
|
||||
type="button"
|
||||
class="btn btn-primary"
|
||||
id="reloadButton"
|
||||
onclick="reloadSystem()"
|
||||
>
|
||||
Deteksi
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div
|
||||
class="card border-info border rounded-5 shadow"
|
||||
style="margin-bottom: 2%"
|
||||
>
|
||||
<div class="card-body text-center">
|
||||
<div id="video_container">
|
||||
<img
|
||||
id="video_feed"
|
||||
src="{{ url_for('video_feed') }}"
|
||||
alt="Video Feed"
|
||||
style="max-width: 100%; max-height: 100%"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-sm-6">
|
||||
<div
|
||||
class="card border-info border rounded-5 shadow"
|
||||
style="margin-bottom: 2%; margin-top: 3%; background: #3591ae"
|
||||
>
|
||||
<div
|
||||
class="card-body d-flex justify-content-between align-items-center"
|
||||
>
|
||||
<h4 class="text-white">Prediksi</h4>
|
||||
<div style="font-size: 0">
|
||||
<div
|
||||
style="
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
font-size: 16px;
|
||||
margin-right: 30px;
|
||||
"
|
||||
>
|
||||
<i
|
||||
class="fa-solid fa-thumbs-up text-white"
|
||||
style="font-size: 25px"
|
||||
></i>
|
||||
<span
|
||||
class="top-0 start-100 translate-middle badge rounded-pill bg-success text-white"
|
||||
id="puasPoints"
|
||||
style="position: absolute"
|
||||
>{{ puas_points }}</span
|
||||
>
|
||||
</div>
|
||||
<div
|
||||
style="
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
font-size: 16px;
|
||||
margin-right: 10px;
|
||||
"
|
||||
>
|
||||
<i
|
||||
class="fa-solid fa-thumbs-down text-white"
|
||||
style="font-size: 25px"
|
||||
></i>
|
||||
<span
|
||||
class="top-0 start-100 translate-middle badge rounded-pill bg-success text-white"
|
||||
id="tidakPuasPoints"
|
||||
style="position: absolute"
|
||||
>{{ tidak_puas_points }}</span
|
||||
>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card border-info border rounded-5 shadow">
|
||||
<div
|
||||
class="card-body d-flex justify-content-center align-items-center"
|
||||
>
|
||||
<div id="initialView">
|
||||
<p>Studi ekspresi wajah sedang berlangsung...</p>
|
||||
</div>
|
||||
<div
|
||||
id="resultView"
|
||||
style="
|
||||
display: none;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
flex-direction: column;
|
||||
text-align: center;
|
||||
"
|
||||
>
|
||||
<i
|
||||
id="expressionIcon"
|
||||
class="fa-10x fa-regular fa-face-meh text-center"
|
||||
style="color: #3591ae; font-size: 28vw; margin-bottom: 20px"
|
||||
></i>
|
||||
<p
|
||||
id="result"
|
||||
class="text-center mb-0"
|
||||
style="font-size: 1.3em"
|
||||
>
|
||||
Tidak ada hasil ekspresi yang terdeteksi
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/4.0.1/socket.io.js"></script>
|
||||
<script>
|
||||
var socket = io.connect(
|
||||
"http://" + document.domain + ":" + location.port
|
||||
);
|
||||
|
||||
socket.on("connect", function () {
|
||||
socket.emit("get_expression");
|
||||
});
|
||||
|
||||
socket.on("update_expression", function (data) {
|
||||
console.log("Data dari Server:", data);
|
||||
|
||||
if (data.most_common_expression === "Puas") {
|
||||
console.log("Ekspresi Puas");
|
||||
$("#expressionIcon")
|
||||
.removeClass()
|
||||
.addClass("fa-10x fa-regular fa-face-smile");
|
||||
} else if (data.most_common_expression === "TidakPuas") {
|
||||
console.log("Ekspresi Tidak Puas");
|
||||
$("#expressionIcon")
|
||||
.removeClass()
|
||||
.addClass("fa-10x fa-regular fa-face-frown");
|
||||
} else {
|
||||
console.log("Ekspresi Tidak Terdeteksi");
|
||||
$("#expressionIcon")
|
||||
.removeClass()
|
||||
.addClass("fa-10x fa-regular fa-face-meh");
|
||||
}
|
||||
$("#result").text(data.most_common_expression);
|
||||
$("#initialView").hide();
|
||||
$("#resultView").show();
|
||||
|
||||
// Update poin pada tampilan HTML
|
||||
$("#puasPoints").text(data.puas_points);
|
||||
$("#tidakPuasPoints").text(data.tidak_puas_points);
|
||||
});
|
||||
|
||||
function reloadSystem() {
|
||||
// Memuat ulang halaman atau memulai sistem deteksi ekspresi lagi
|
||||
window.location.reload();
|
||||
}
|
||||
|
||||
function backSystem() {
|
||||
// Redirect ke halaman tentang.html saat tombol ditekan
|
||||
window.location.href = "/";
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,9 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<body>
|
||||
<h1>Live streaming</h1>
|
||||
<div>
|
||||
<img src="{{ url_for('video_feed') }}" width="10%" />
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
Loading…
Reference in New Issue