feat: adding model
This commit is contained in:
parent
93edec8a85
commit
5b5b614897
Binary file not shown.
Binary file not shown.
|
@ -685,5 +685,140 @@
|
|||
"answer": "False"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"context": "VOC (Vereenigde Oostindische Compagnie) adalah perusahaan dagang Belanda yang didirikan pada tahun 1602 dan merupakan salah satu perusahaan multinasional pertama di dunia. VOC memainkan peran penting dalam perdagangan rempah-rempah di Nusantara dan berkontribusi besar terhadap pembentukan sejarah kolonial di Indonesia.",
|
||||
"qa_pairs": [
|
||||
{
|
||||
"type": "multiple_choice",
|
||||
"question": "Apa kepanjangan dari VOC?",
|
||||
"options": [
|
||||
"Vereenigde Oostindische Compagnie",
|
||||
"Volatile Organic Compounds",
|
||||
"Vocal Organ Company",
|
||||
"Vereenigde Oostelijke Commercie"
|
||||
],
|
||||
"answer": "Vereenigde Oostindische Compagnie"
|
||||
},
|
||||
{
|
||||
"type": "true_false",
|
||||
"question": "VOC didirikan pada tahun 1602.",
|
||||
"answer": "True"
|
||||
},
|
||||
{
|
||||
"type": "fill_in_the_blank",
|
||||
"question": "VOC memiliki hak monopoli dalam perdagangan _______.",
|
||||
"answer": "rempah-rempah"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"context": "VOC memiliki hak istimewa dari pemerintah Belanda, termasuk hak untuk mendirikan benteng, mengadakan perjanjian dengan penguasa setempat, dan memiliki angkatan perang sendiri.",
|
||||
"qa_pairs": [
|
||||
{
|
||||
"type": "multiple_choice",
|
||||
"question": "Apa salah satu hak istimewa VOC?",
|
||||
"options": [
|
||||
"Mencetak uang",
|
||||
"Memiliki angkatan perang sendiri",
|
||||
"Mengadakan pemilu",
|
||||
"Menghapus pajak"
|
||||
],
|
||||
"answer": "Memiliki angkatan perang sendiri"
|
||||
},
|
||||
{
|
||||
"type": "true_false",
|
||||
"question": "VOC tidak memiliki hak untuk membuat perjanjian dengan penguasa setempat.",
|
||||
"answer": "False"
|
||||
},
|
||||
{
|
||||
"type": "fill_in_the_blank",
|
||||
"question": "VOC berwenang mendirikan ______ di wilayah koloninya.",
|
||||
"answer": "benteng"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"context": "VOC mengalami kebangkrutan pada akhir abad ke-18 akibat korupsi, biaya perang yang tinggi, dan persaingan dengan negara lain dalam perdagangan internasional.",
|
||||
"qa_pairs": [
|
||||
{
|
||||
"type": "multiple_choice",
|
||||
"question": "Apa salah satu penyebab kebangkrutan VOC?",
|
||||
"options": [
|
||||
"Kekurangan sumber daya alam",
|
||||
"Korupsi dalam administrasi",
|
||||
"Perdagangan terlalu menguntungkan",
|
||||
"Kurangnya pegawai"
|
||||
],
|
||||
"answer": "Korupsi dalam administrasi"
|
||||
},
|
||||
{
|
||||
"type": "true_false",
|
||||
"question": "VOC tetap bertahan hingga abad ke-20.",
|
||||
"answer": "False"
|
||||
},
|
||||
{
|
||||
"type": "fill_in_the_blank",
|
||||
"question": "VOC dibubarkan secara resmi pada tahun _______.",
|
||||
"answer": "1799"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"context": "Pada abad ke-17, VOC menguasai perdagangan rempah-rempah di kepulauan Nusantara dan menerapkan sistem monopoli yang ketat terhadap produk seperti cengkeh, pala, dan lada.",
|
||||
"qa_pairs": [
|
||||
{
|
||||
"type": "multiple_choice",
|
||||
"question": "Produk apa yang dimonopoli oleh VOC?",
|
||||
"options": [
|
||||
"Padi, jagung, dan kedelai",
|
||||
"Cengkeh, pala, dan lada",
|
||||
"Kopi, teh, dan gula",
|
||||
"Batu bara, besi, dan emas"
|
||||
],
|
||||
"answer": "Cengkeh, pala, dan lada"
|
||||
},
|
||||
{
|
||||
"type": "true_false",
|
||||
"question": "VOC menerapkan sistem perdagangan bebas di Nusantara.",
|
||||
"answer": "False"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"context": "VOC memiliki kebijakan yang dikenal sebagai 'Pelayaran Hongi', di mana armada kapal perang mereka digunakan untuk menghancurkan kebun rempah-rempah yang tidak berada di bawah kendali mereka guna mempertahankan harga tetap tinggi.",
|
||||
"qa_pairs": [
|
||||
{
|
||||
"type": "fill_in_the_blank",
|
||||
"question": "Kebijakan VOC yang bertujuan untuk mempertahankan harga rempah-rempah disebut _______.",
|
||||
"answer": "Pelayaran Hongi"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"context": "Pada tahun 1619, Jan Pieterszoon Coen menaklukkan Jayakarta dan menggantinya dengan nama Batavia, yang menjadi pusat kekuasaan VOC di Nusantara.",
|
||||
"qa_pairs": [
|
||||
{
|
||||
"type": "true_false",
|
||||
"question": "Batavia didirikan oleh VOC pada tahun 1619 setelah menaklukkan Jayakarta.",
|
||||
"answer": "True"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"context": "Selain berdagang, VOC juga memiliki peran dalam politik di Nusantara, dengan sering kali campur tangan dalam urusan kerajaan lokal untuk memastikan kepentingan ekonomi mereka tetap terjaga.",
|
||||
"qa_pairs": [
|
||||
{
|
||||
"type": "multiple_choice",
|
||||
"question": "Bagaimana VOC mempertahankan kepentingan ekonominya di Nusantara?",
|
||||
"options": [
|
||||
"Menghindari campur tangan dalam politik lokal",
|
||||
"Bekerjasama dengan kerajaan lokal tanpa syarat",
|
||||
"Menjalin aliansi dan intervensi politik",
|
||||
"Membatasi aktivitas perdagangan lokal"
|
||||
],
|
||||
"answer": "Menjalin aliansi dan intervensi politik"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
125
testing.py
125
testing.py
|
@ -1,77 +1,70 @@
|
|||
from tensorflow.keras.models import load_model
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import pickle
|
||||
import numpy as np
|
||||
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
||||
|
||||
# Load Trained Model
|
||||
model = load_model("lstm_multi_output_model.keras")
|
||||
|
||||
# Load Tokenizer
|
||||
with open("tokenizer.pkl", "rb") as handle:
|
||||
tokenizer = pickle.load(handle)
|
||||
class QuizGeneratorService:
|
||||
def __init__(
|
||||
self,
|
||||
model_path="lstm_multi_output_model.keras",
|
||||
tokenizer_path="tokenizer.pkl",
|
||||
max_length=100,
|
||||
):
|
||||
# Load the tokenizer
|
||||
with open(tokenizer_path, "rb") as handle:
|
||||
self.tokenizer = pickle.load(handle)
|
||||
# Load the trained model
|
||||
self.model = tf.keras.models.load_model(model_path)
|
||||
self.max_length = max_length
|
||||
|
||||
# Define max sequence length (should match training phase)
|
||||
MAX_LENGTH = 100
|
||||
|
||||
# Mapping for question type labels
|
||||
question_type_dict = {0: "Fill in the Blank", 1: "True/False", 2: "Multiple Choice"}
|
||||
|
||||
|
||||
def predict_question_answer_type(context_text):
|
||||
"""
|
||||
Given a context (paragraph), predict a question, answer, and question type.
|
||||
"""
|
||||
# === Tokenize and Pad the Context === #
|
||||
context_seq = tokenizer.texts_to_sequences([context_text])
|
||||
context_padded = pad_sequences(
|
||||
context_seq, maxlen=MAX_LENGTH, padding="post", truncating="post"
|
||||
)
|
||||
|
||||
# === Create a Dummy Input for the Question Decoder === #
|
||||
# Since our model is seq2seq, we initialize an empty decoder input
|
||||
decoder_input_seq = np.zeros((1, MAX_LENGTH))
|
||||
|
||||
# === Predict Outputs === #
|
||||
predicted_question_seq, predicted_answer_seq, predicted_type = model.predict(
|
||||
[context_padded, decoder_input_seq]
|
||||
)
|
||||
|
||||
# === Convert Predicted Sequences to Text === #
|
||||
index_to_word = {v: k for k, v in tokenizer.word_index.items()}
|
||||
|
||||
# Convert predicted question
|
||||
predicted_question = " ".join(
|
||||
[
|
||||
index_to_word[idx]
|
||||
for idx in np.argmax(predicted_question_seq, axis=2)[0]
|
||||
if idx in index_to_word
|
||||
def sequence_to_text(self, sequence):
|
||||
"""
|
||||
Convert a sequence of indices to text using the tokenizer's index_word mapping.
|
||||
Skips any padding token (0).
|
||||
"""
|
||||
return [
|
||||
self.tokenizer.index_word.get(idx, "<OOV>") for idx in sequence if idx != 0
|
||||
]
|
||||
)
|
||||
|
||||
# Convert predicted answer
|
||||
predicted_answer = " ".join(
|
||||
[
|
||||
index_to_word[idx]
|
||||
for idx in np.argmax(predicted_answer_seq, axis=2)[0]
|
||||
if idx in index_to_word
|
||||
]
|
||||
)
|
||||
def generate_quiz(self, context):
|
||||
"""
|
||||
Given a raw context string, this method tokenizes the input, performs inference
|
||||
using the loaded model, and returns the generated question, answer, and question type.
|
||||
|
||||
# Convert predicted question type (numerical label → text)
|
||||
predicted_question_type = question_type_dict[np.argmax(predicted_type)]
|
||||
Parameters:
|
||||
context (str): The raw context text.
|
||||
|
||||
return predicted_question, predicted_answer, predicted_question_type
|
||||
Returns:
|
||||
dict: A dictionary containing the generated question, answer, and question type.
|
||||
"""
|
||||
# Tokenize the context directly (assuming no extra preprocessing is needed)
|
||||
sequence = self.tokenizer.texts_to_sequences([context])
|
||||
print(sequence)
|
||||
padded_sequence = pad_sequences(
|
||||
sequence, maxlen=self.max_length, padding="post", truncating="post"
|
||||
)
|
||||
|
||||
# Use the same padded sequence for both the context and the question decoder input.
|
||||
pred_question, pred_answer, pred_qtype = self.model.predict(
|
||||
[padded_sequence, padded_sequence]
|
||||
)
|
||||
|
||||
# Convert predicted sequences to text (using argmax for each timestep)
|
||||
question_tokens = self.sequence_to_text(np.argmax(pred_question[0], axis=-1))
|
||||
answer_tokens = self.sequence_to_text(np.argmax(pred_answer[0], axis=-1))
|
||||
qtype = int(np.argmax(pred_qtype[0]))
|
||||
|
||||
return {
|
||||
"generated_question": " ".join(question_tokens),
|
||||
"generated_answer": " ".join(answer_tokens),
|
||||
"question_type": qtype, # You can map this integer to a descriptive label if needed
|
||||
}
|
||||
|
||||
|
||||
# Sample Test Context
|
||||
context_example = "Ki Hajar Dewantara adalah pelopor pendidikan di Indonesia dan pendiri Taman Siswa. Ia dikenal dengan semboyannya 'Ing Ngarsa Sung Tuladha, Ing Madya Mangun Karsa, Tut Wuri Handayani', yang menekankan peran guru dalam pendidikan."
|
||||
|
||||
# Run the prediction
|
||||
predicted_question, predicted_answer, predicted_question_type = (
|
||||
predict_question_answer_type(context_example)
|
||||
)
|
||||
|
||||
# Print the Results
|
||||
print(f"🔹 Predicted Question: {predicted_question}")
|
||||
print(f"🔹 Predicted Answer: {predicted_answer}")
|
||||
print(f"🔹 Predicted Question Type: {predicted_question_type}")
|
||||
# Example usage:
|
||||
if __name__ == "__main__":
|
||||
quiz_service = QuizGeneratorService()
|
||||
context_input = "Pada tahun 1619, Jan Pieterszoon Coen menaklukkan Jayakarta dan menggantinya dengan nama Batavia, yang menjadi pusat kekuasaan VOC di Nusantara."
|
||||
result = quiz_service.generate_quiz(context_input)
|
||||
print(result)
|
||||
|
|
BIN
tokenizer.pkl
BIN
tokenizer.pkl
Binary file not shown.
|
@ -2,7 +2,7 @@
|
|||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
@ -41,7 +41,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
|
@ -64,7 +64,7 @@
|
|||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 39,
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
@ -79,7 +79,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
|
@ -100,8 +100,8 @@
|
|||
"3 [{'type': 'fill_in_the_blank', 'question': 'Hu... \n",
|
||||
"4 [{'type': 'fill_in_the_blank', 'question': 'Bu... \n",
|
||||
"\n",
|
||||
"Total Context: 25\n",
|
||||
"Total QA Pairs: 57\n"
|
||||
"Total Context: 49\n",
|
||||
"Total QA Pairs: 95\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -126,7 +126,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
@ -234,7 +234,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
|
@ -242,7 +242,7 @@
|
|||
"output_type": "stream",
|
||||
"text": [
|
||||
"✅ Data processing complete!\n",
|
||||
"Samples: 57\n"
|
||||
"Samples: 95\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -297,15 +297,15 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Training samples: 45\n",
|
||||
"Testing samples: 12\n"
|
||||
"Training samples: 76\n",
|
||||
"Testing samples: 19\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -329,7 +329,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
|
@ -337,25 +337,25 @@
|
|||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 1/10\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 536ms/step - answer_output_accuracy: 0.0143 - answer_output_loss: 5.8964 - loss: 12.9070 - question_output_accuracy: 3.7037e-04 - question_output_loss: 5.9030 - question_type_output_accuracy: 0.3843 - question_type_output_loss: 1.0984 - val_answer_output_accuracy: 0.2689 - val_answer_output_loss: 5.8541 - val_loss: 12.8370 - val_question_output_accuracy: 0.0100 - val_question_output_loss: 5.8942 - val_question_type_output_accuracy: 0.4444 - val_question_type_output_loss: 1.0888\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 681ms/step - answer_output_accuracy: 0.0297 - answer_output_loss: 6.2965 - loss: 13.6985 - question_output_accuracy: 0.0000e+00 - question_output_loss: 6.3022 - question_type_output_accuracy: 0.2625 - question_type_output_loss: 1.0992 - val_answer_output_accuracy: 0.2044 - val_answer_output_loss: 6.2629 - val_loss: 13.6638 - val_question_output_accuracy: 0.0069 - val_question_output_loss: 6.2961 - val_question_type_output_accuracy: 0.2500 - val_question_type_output_loss: 1.1048\n",
|
||||
"Epoch 2/10\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 171ms/step - answer_output_accuracy: 0.3408 - answer_output_loss: 5.8273 - loss: 12.8054 - question_output_accuracy: 0.0138 - question_output_loss: 5.8815 - question_type_output_accuracy: 0.5868 - question_type_output_loss: 1.0860 - val_answer_output_accuracy: 0.9856 - val_answer_output_loss: 5.7489 - val_loss: 12.7114 - val_question_output_accuracy: 0.0100 - val_question_output_loss: 5.8837 - val_question_type_output_accuracy: 0.4444 - val_question_type_output_loss: 1.0788\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 305ms/step - answer_output_accuracy: 0.2145 - answer_output_loss: 6.2378 - loss: 13.6041 - question_output_accuracy: 0.0127 - question_output_loss: 6.2865 - question_type_output_accuracy: 0.6076 - question_type_output_loss: 1.0785 - val_answer_output_accuracy: 0.9844 - val_answer_output_loss: 6.1644 - val_loss: 13.5630 - val_question_output_accuracy: 0.0100 - val_question_output_loss: 6.2887 - val_question_type_output_accuracy: 0.3750 - val_question_type_output_loss: 1.1100\n",
|
||||
"Epoch 3/10\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 172ms/step - answer_output_accuracy: 0.9826 - answer_output_loss: 5.6819 - loss: 12.6314 - question_output_accuracy: 0.0214 - question_output_loss: 5.8659 - question_type_output_accuracy: 0.5579 - question_type_output_loss: 1.0659 - val_answer_output_accuracy: 0.9856 - val_answer_output_loss: 5.4070 - val_loss: 12.3515 - val_question_output_accuracy: 0.0122 - val_question_output_loss: 5.8714 - val_question_type_output_accuracy: 0.4444 - val_question_type_output_loss: 1.0730\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 297ms/step - answer_output_accuracy: 0.9846 - answer_output_loss: 6.0887 - loss: 13.4154 - question_output_accuracy: 0.0168 - question_output_loss: 6.2692 - question_type_output_accuracy: 0.5542 - question_type_output_loss: 1.0537 - val_answer_output_accuracy: 0.9844 - val_answer_output_loss: 5.8030 - val_loss: 13.2110 - val_question_output_accuracy: 0.0094 - val_question_output_loss: 6.2794 - val_question_type_output_accuracy: 0.4375 - val_question_type_output_loss: 1.1287\n",
|
||||
"Epoch 4/10\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 170ms/step - answer_output_accuracy: 0.9825 - answer_output_loss: 4.9562 - loss: 12.0453 - question_output_accuracy: 0.0214 - question_output_loss: 5.8386 - question_type_output_accuracy: 0.5972 - question_type_output_loss: 1.0674 - val_answer_output_accuracy: 0.9856 - val_answer_output_loss: 4.0363 - val_loss: 10.9596 - val_question_output_accuracy: 0.0078 - val_question_output_loss: 5.8545 - val_question_type_output_accuracy: 0.4444 - val_question_type_output_loss: 1.0688\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 300ms/step - answer_output_accuracy: 0.9853 - answer_output_loss: 5.4983 - loss: 12.7772 - question_output_accuracy: 0.0143 - question_output_loss: 6.2471 - question_type_output_accuracy: 0.5111 - question_type_output_loss: 1.0153 - val_answer_output_accuracy: 0.9844 - val_answer_output_loss: 3.9937 - val_loss: 11.4697 - val_question_output_accuracy: 0.0050 - val_question_output_loss: 6.2620 - val_question_type_output_accuracy: 0.4375 - val_question_type_output_loss: 1.2140\n",
|
||||
"Epoch 5/10\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 180ms/step - answer_output_accuracy: 0.9827 - answer_output_loss: 3.0621 - loss: 10.1366 - question_output_accuracy: 0.0117 - question_output_loss: 5.8038 - question_type_output_accuracy: 0.5868 - question_type_output_loss: 1.0336 - val_answer_output_accuracy: 0.9856 - val_answer_output_loss: 1.7133 - val_loss: 8.5937 - val_question_output_accuracy: 0.0078 - val_question_output_loss: 5.8148 - val_question_type_output_accuracy: 0.5556 - val_question_type_output_loss: 1.0657\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 308ms/step - answer_output_accuracy: 0.9855 - answer_output_loss: 3.4938 - loss: 10.7212 - question_output_accuracy: 0.0046 - question_output_loss: 6.1959 - question_type_output_accuracy: 0.4903 - question_type_output_loss: 1.0066 - val_answer_output_accuracy: 0.9844 - val_answer_output_loss: 1.4529 - val_loss: 9.4832 - val_question_output_accuracy: 0.0037 - val_question_output_loss: 6.2063 - val_question_type_output_accuracy: 0.4375 - val_question_type_output_loss: 1.8240\n",
|
||||
"Epoch 6/10\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 167ms/step - answer_output_accuracy: 0.9824 - answer_output_loss: 1.1229 - loss: 8.0884 - question_output_accuracy: 0.0054 - question_output_loss: 5.7361 - question_type_output_accuracy: 0.5394 - question_type_output_loss: 1.1969 - val_answer_output_accuracy: 0.9856 - val_answer_output_loss: 0.6490 - val_loss: 7.4129 - val_question_output_accuracy: 0.0078 - val_question_output_loss: 5.6864 - val_question_type_output_accuracy: 0.5556 - val_question_type_output_loss: 1.0775\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 296ms/step - answer_output_accuracy: 0.9853 - answer_output_loss: 1.4168 - loss: 8.7444 - question_output_accuracy: 0.0029 - question_output_loss: 6.0149 - question_type_output_accuracy: 0.5007 - question_type_output_loss: 1.3046 - val_answer_output_accuracy: 0.9844 - val_answer_output_loss: 0.7223 - val_loss: 8.8975 - val_question_output_accuracy: 0.0019 - val_question_output_loss: 6.1260 - val_question_type_output_accuracy: 0.2500 - val_question_type_output_loss: 2.0492\n",
|
||||
"Epoch 7/10\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 171ms/step - answer_output_accuracy: 0.9824 - answer_output_loss: 0.5961 - loss: 7.2279 - question_output_accuracy: 0.0045 - question_output_loss: 5.4419 - question_type_output_accuracy: 0.4340 - question_type_output_loss: 1.1878 - val_answer_output_accuracy: 0.9856 - val_answer_output_loss: 0.5096 - val_loss: 7.2387 - val_question_output_accuracy: 0.0078 - val_question_output_loss: 5.5038 - val_question_type_output_accuracy: 0.2222 - val_question_type_output_loss: 1.2253\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 316ms/step - answer_output_accuracy: 0.9855 - answer_output_loss: 0.7297 - loss: 7.7458 - question_output_accuracy: 0.0020 - question_output_loss: 5.7405 - question_type_output_accuracy: 0.5653 - question_type_output_loss: 1.2641 - val_answer_output_accuracy: 0.9844 - val_answer_output_loss: 0.6319 - val_loss: 8.0634 - val_question_output_accuracy: 0.0019 - val_question_output_loss: 6.0919 - val_question_type_output_accuracy: 0.3125 - val_question_type_output_loss: 1.3396\n",
|
||||
"Epoch 8/10\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 183ms/step - answer_output_accuracy: 0.9824 - answer_output_loss: 0.6082 - loss: 6.8731 - question_output_accuracy: 0.0045 - question_output_loss: 5.1820 - question_type_output_accuracy: 0.4132 - question_type_output_loss: 1.0811 - val_answer_output_accuracy: 0.9856 - val_answer_output_loss: 0.5227 - val_loss: 7.0044 - val_question_output_accuracy: 0.0067 - val_question_output_loss: 5.4302 - val_question_type_output_accuracy: 0.5556 - val_question_type_output_loss: 1.0514\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 300ms/step - answer_output_accuracy: 0.9852 - answer_output_loss: 0.6516 - loss: 7.1078 - question_output_accuracy: 0.0025 - question_output_loss: 5.5006 - question_type_output_accuracy: 0.5424 - question_type_output_loss: 0.9515 - val_answer_output_accuracy: 0.9844 - val_answer_output_loss: 0.6445 - val_loss: 7.8441 - val_question_output_accuracy: 0.0031 - val_question_output_loss: 6.1414 - val_question_type_output_accuracy: 0.3125 - val_question_type_output_loss: 1.0582\n",
|
||||
"Epoch 9/10\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 164ms/step - answer_output_accuracy: 0.9824 - answer_output_loss: 0.6137 - loss: 6.7673 - question_output_accuracy: 0.0045 - question_output_loss: 4.9309 - question_type_output_accuracy: 0.4815 - question_type_output_loss: 1.3263 - val_answer_output_accuracy: 0.9856 - val_answer_output_loss: 0.5185 - val_loss: 6.9885 - val_question_output_accuracy: 0.0044 - val_question_output_loss: 5.4844 - val_question_type_output_accuracy: 0.6667 - val_question_type_output_loss: 0.9857\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 301ms/step - answer_output_accuracy: 0.9855 - answer_output_loss: 0.5892 - loss: 6.9181 - question_output_accuracy: 0.0027 - question_output_loss: 5.2758 - question_type_output_accuracy: 0.4993 - question_type_output_loss: 1.0541 - val_answer_output_accuracy: 0.9844 - val_answer_output_loss: 0.6490 - val_loss: 7.9863 - val_question_output_accuracy: 0.0031 - val_question_output_loss: 6.2535 - val_question_type_output_accuracy: 0.4375 - val_question_type_output_loss: 1.0839\n",
|
||||
"Epoch 10/10\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 176ms/step - answer_output_accuracy: 0.9824 - answer_output_loss: 0.6061 - loss: 6.6094 - question_output_accuracy: 0.0045 - question_output_loss: 4.8379 - question_type_output_accuracy: 0.4132 - question_type_output_loss: 1.1360 - val_answer_output_accuracy: 0.9856 - val_answer_output_loss: 0.4949 - val_loss: 7.1082 - val_question_output_accuracy: 0.0044 - val_question_output_loss: 5.5818 - val_question_type_output_accuracy: 0.5556 - val_question_type_output_loss: 1.0315\n",
|
||||
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 296ms/step - answer_output_accuracy: 0.9854 - answer_output_loss: 0.5727 - loss: 6.6346 - question_output_accuracy: 0.0027 - question_output_loss: 5.0879 - question_type_output_accuracy: 0.5319 - question_type_output_loss: 0.9754 - val_answer_output_accuracy: 0.9844 - val_answer_output_loss: 0.6354 - val_loss: 8.3545 - val_question_output_accuracy: 0.0037 - val_question_output_loss: 6.4062 - val_question_type_output_accuracy: 0.2500 - val_question_type_output_loss: 1.3129\n",
|
||||
"✅ Model training complete and saved!\n"
|
||||
]
|
||||
}
|
||||
|
@ -432,29 +432,29 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 255ms/step\n",
|
||||
"\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 264ms/step\n",
|
||||
"=== Evaluation on Test Data ===\n",
|
||||
"Classification Report for Question Type:\n",
|
||||
" precision recall f1-score support\n",
|
||||
"\n",
|
||||
" 0 0.33 0.40 0.36 5\n",
|
||||
" 1 0.33 0.50 0.40 4\n",
|
||||
" 2 0.00 0.00 0.00 3\n",
|
||||
" 0 0.00 0.00 0.00 8\n",
|
||||
" 1 0.50 0.83 0.62 6\n",
|
||||
" 2 0.44 0.80 0.57 5\n",
|
||||
"\n",
|
||||
" accuracy 0.33 12\n",
|
||||
" macro avg 0.22 0.30 0.25 12\n",
|
||||
"weighted avg 0.25 0.33 0.28 12\n",
|
||||
" accuracy 0.47 19\n",
|
||||
" macro avg 0.31 0.54 0.40 19\n",
|
||||
"weighted avg 0.27 0.47 0.35 19\n",
|
||||
"\n",
|
||||
"Accuracy: 0.3333333333333333\n",
|
||||
"Precision: 0.25\n",
|
||||
"Recall: 0.3333333333333333\n",
|
||||
"Accuracy: 0.47368421052631576\n",
|
||||
"Precision: 0.27485380116959063\n",
|
||||
"Recall: 0.47368421052631576\n",
|
||||
"BLEU score for first test sample (question generation): 0\n"
|
||||
]
|
||||
},
|
||||
|
@ -485,8 +485,7 @@
|
|||
"print(\"Precision:\", precision_score(qtype_test, pred_qtype_labels, average='weighted'))\n",
|
||||
"print(\"Recall:\", recall_score(qtype_test, pred_qtype_labels, average='weighted'))\n",
|
||||
"\n",
|
||||
"# Optional: Evaluate sequence generation using BLEU score for the first sample\n",
|
||||
"import nltk\n",
|
||||
"\n",
|
||||
"def sequence_to_text(sequence, tokenizer):\n",
|
||||
" return [tokenizer.index_word.get(idx, \"<OOV>\") for idx in sequence if idx != 0]\n",
|
||||
"\n",
|
||||
|
|
Loading…
Reference in New Issue