{ "cells": [ { "cell_type": "code", "execution_count": 51, "id": "9bf2159a", "metadata": {}, "outputs": [], "source": [ "import json\n", "import numpy as np\n", "from pathlib import Path\n", "from sklearn.model_selection import train_test_split\n", "from tensorflow.keras.preprocessing.text import Tokenizer\n", "from tensorflow.keras.preprocessing.sequence import pad_sequences\n", "from tensorflow.keras.utils import to_categorical\n", "\n", "from tensorflow.keras.models import Model\n", "from tensorflow.keras.layers import (\n", " Input,\n", " Embedding,\n", " LSTM,\n", " Concatenate,\n", " Dense,\n", " TimeDistributed,\n", ")\n", "from tensorflow.keras.callbacks import EarlyStopping\n", "from sklearn.metrics import classification_report\n", "from collections import Counter" ] }, { "cell_type": "code", "execution_count": 52, "id": "50118278", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", " Jumlah data valid: 70 / 70\n", " Jumlah data tidak valid: 0\n", "Counter({'tof': 30, 'isian': 30, 'opsi': 10})\n" ] } ], "source": [ "# Load raw data\n", "with open(\"qg_dataset.json\", encoding=\"utf-8\") as f:\n", " raw_data = json.load(f)\n", "\n", "# Validasi lengkap\n", "required_keys = {\"tokens\", \"ner\", \"srl\", \"question\", \"answer\", \"type\"}\n", "valid_data = []\n", "invalid_data = []\n", "\n", "for idx, item in enumerate(raw_data):\n", " error_messages = []\n", "\n", " if not isinstance(item, dict):\n", " error_messages.append(\"bukan dictionary\")\n", "\n", " missing_keys = required_keys - item.keys()\n", " if missing_keys:\n", " error_messages.append(f\"missing keys: {missing_keys}\")\n", "\n", " if not error_messages:\n", " # Cek tipe data dan None\n", " if (not isinstance(item[\"tokens\"], list) or\n", " not isinstance(item[\"ner\"], list) or\n", " not isinstance(item[\"srl\"], list) or\n", " not isinstance(item[\"question\"], list) or\n", " not isinstance(item[\"answer\"], list) or\n", " not isinstance(item[\"type\"], str)):\n", " error_messages.append(\"field type tidak sesuai\")\n", " \n", " if error_messages:\n", " print(f\"\\n Index {idx} | Masalah: {', '.join(error_messages)}\")\n", " print(json.dumps(item, indent=2, ensure_ascii=False))\n", " invalid_data.append(item)\n", " continue\n", "\n", " valid_data.append(item)\n", "\n", "# Statistik\n", "print(f\"\\n Jumlah data valid: {len(valid_data)} / {len(raw_data)}\")\n", "print(f\" Jumlah data tidak valid: {len(invalid_data)}\")\n", "\n", "# Proses data valid\n", "tokens = [[t.lower().strip() for t in item[\"tokens\"]] for item in valid_data]\n", "ner_tags = [item[\"ner\"] for item in valid_data]\n", "srl_tags = [item[\"srl\"] for item in valid_data]\n", "questions = [[token.lower().strip() for token in item[\"question\"]] for item in valid_data]\n", "answers = [[token.lower().strip() for token in item[\"answer\"]] for item in valid_data]\n", "types = [item[\"type\"] for item in valid_data]\n", "\n", "type_counts = Counter(types)\n", "\n", "print(type_counts)\n" ] }, { "cell_type": "code", "execution_count": 53, "id": "4e3a0088", "metadata": {}, "outputs": [], "source": [ "# tokenize\n", "token_tok = Tokenizer(lower=False, oov_token=\"UNK\")\n", "token_ner = Tokenizer(lower=False)\n", "token_srl = Tokenizer(lower=False)\n", "token_q = Tokenizer(lower=False)\n", "token_a = Tokenizer(lower=False)\n", "token_type = Tokenizer(lower=False)\n", "\n", "token_tok.fit_on_texts(tokens)\n", "token_ner.fit_on_texts(ner_tags)\n", "token_srl.fit_on_texts(srl_tags)\n", "token_q.fit_on_texts(questions)\n", "token_a.fit_on_texts(answers)\n", "token_type.fit_on_texts(types)\n", "\n", "\n", "maxlen = 20" ] }, { "cell_type": "code", "execution_count": 54, "id": "555f9e22", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "{'isian', 'tof', 'opsi'}\n" ] } ], "source": [ "\n", "X_tok = pad_sequences(\n", " token_tok.texts_to_sequences(tokens), padding=\"post\", maxlen=maxlen\n", ")\n", "X_ner = pad_sequences(\n", " token_ner.texts_to_sequences(ner_tags), padding=\"post\", maxlen=maxlen\n", ")\n", "X_srl = pad_sequences(\n", " token_srl.texts_to_sequences(srl_tags), padding=\"post\", maxlen=maxlen\n", ")\n", "y_q = pad_sequences(token_q.texts_to_sequences(questions), padding=\"post\", maxlen=maxlen)\n", "y_a = pad_sequences(token_a.texts_to_sequences(answers), padding=\"post\", maxlen=maxlen)\n", "\n", "print(set(types))\n", "\n", "y_type = [seq[0] for seq in token_type.texts_to_sequences(types)] # list of int\n", "y_type = to_categorical(np.array(y_type) - 1, num_classes=len(token_type.word_index))\n", "\n" ] }, { "cell_type": "code", "execution_count": 55, "id": "f530cfe7", "metadata": {}, "outputs": [], "source": [ "X_tok_train, X_tok_test, X_ner_train, X_ner_test, X_srl_train, X_srl_test, \\\n", "y_q_train, y_q_test, y_a_train, y_a_test, y_type_train, y_type_test = train_test_split(\n", " X_tok, X_ner, X_srl, y_q, y_a, y_type, test_size=0.2, random_state=42\n", ")\n", "\n", "X_train = [X_tok_train, X_ner_train, X_srl_train]\n", "X_test = [X_tok_test, X_ner_test, X_srl_test]" ] }, { "cell_type": "code", "execution_count": 56, "id": "255e2a9a", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Model: \"functional_5\"\n",
       "
\n" ], "text/plain": [ "\u001b[1mModel: \"functional_5\"\u001b[0m\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┓\n",
       "┃ Layer (type)         Output Shape          Param #  Connected to      ┃\n",
       "┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩\n",
       "│ tok_input           │ (None, None)      │          0 │ -                 │\n",
       "│ (InputLayer)        │                   │            │                   │\n",
       "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
       "│ ner_input           │ (None, None)      │          0 │ -                 │\n",
       "│ (InputLayer)        │                   │            │                   │\n",
       "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
       "│ srl_input           │ (None, None)      │          0 │ -                 │\n",
       "│ (InputLayer)        │                   │            │                   │\n",
       "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
       "│ embedding_15        │ (None, None, 128) │     41,600 │ tok_input[0][0]   │\n",
       "│ (Embedding)         │                   │            │                   │\n",
       "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
       "│ embedding_16        │ (None, None, 16)  │        272 │ ner_input[0][0]   │\n",
       "│ (Embedding)         │                   │            │                   │\n",
       "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
       "│ embedding_17        │ (None, None, 16)  │        272 │ srl_input[0][0]   │\n",
       "│ (Embedding)         │                   │            │                   │\n",
       "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
       "│ concatenate_5       │ (None, None, 160) │          0 │ embedding_15[0][ │\n",
       "│ (Concatenate)       │                   │            │ embedding_16[0][ │\n",
       "│                     │                   │            │ embedding_17[0][ │\n",
       "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
       "│ lstm_5 (LSTM)       │ (None, None, 256) │    427,008 │ concatenate_5[0]… │\n",
       "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
       "│ get_item_5          │ (None, 256)       │          0 │ lstm_5[0][0]      │\n",
       "│ (GetItem)           │                   │            │                   │\n",
       "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
       "│ question_output     │ (None, None, 272) │     69,904 │ lstm_5[0][0]      │\n",
       "│ (TimeDistributed)   │                   │            │                   │\n",
       "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
       "│ answer_output       │ (None, None, 60)  │     15,420 │ lstm_5[0][0]      │\n",
       "│ (TimeDistributed)   │                   │            │                   │\n",
       "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
       "│ type_output (Dense) │ (None, 3)         │        771 │ get_item_5[0][0]  │\n",
       "└─────────────────────┴───────────────────┴────────────┴───────────────────┘\n",
       "
\n" ], "text/plain": [ "┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┓\n", "┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mConnected to \u001b[0m\u001b[1m \u001b[0m┃\n", "┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩\n", "│ tok_input │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │ - │\n", "│ (\u001b[38;5;33mInputLayer\u001b[0m) │ │ │ │\n", "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n", "│ ner_input │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │ - │\n", "│ (\u001b[38;5;33mInputLayer\u001b[0m) │ │ │ │\n", "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n", "│ srl_input │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │ - │\n", "│ (\u001b[38;5;33mInputLayer\u001b[0m) │ │ │ │\n", "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n", "│ embedding_15 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m128\u001b[0m) │ \u001b[38;5;34m41,600\u001b[0m │ tok_input[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n", "│ (\u001b[38;5;33mEmbedding\u001b[0m) │ │ │ │\n", "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n", "│ embedding_16 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m16\u001b[0m) │ \u001b[38;5;34m272\u001b[0m │ ner_input[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n", "│ (\u001b[38;5;33mEmbedding\u001b[0m) │ │ │ │\n", "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n", "│ embedding_17 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m16\u001b[0m) │ \u001b[38;5;34m272\u001b[0m │ srl_input[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n", "│ (\u001b[38;5;33mEmbedding\u001b[0m) │ │ │ │\n", "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n", "│ concatenate_5 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m160\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │ embedding_15[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m…\u001b[0m │\n", "│ (\u001b[38;5;33mConcatenate\u001b[0m) │ │ │ embedding_16[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m…\u001b[0m │\n", "│ │ │ │ embedding_17[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m…\u001b[0m │\n", "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n", "│ lstm_5 (\u001b[38;5;33mLSTM\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m256\u001b[0m) │ \u001b[38;5;34m427,008\u001b[0m │ concatenate_5[\u001b[38;5;34m0\u001b[0m]… │\n", "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n", "│ get_item_5 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m256\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │ lstm_5[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n", "│ (\u001b[38;5;33mGetItem\u001b[0m) │ │ │ │\n", "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n", "│ question_output │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m272\u001b[0m) │ \u001b[38;5;34m69,904\u001b[0m │ lstm_5[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n", "│ (\u001b[38;5;33mTimeDistributed\u001b[0m) │ │ │ │\n", "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n", "│ answer_output │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m60\u001b[0m) │ \u001b[38;5;34m15,420\u001b[0m │ lstm_5[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n", "│ (\u001b[38;5;33mTimeDistributed\u001b[0m) │ │ │ │\n", "├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n", "│ type_output (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m3\u001b[0m) │ \u001b[38;5;34m771\u001b[0m │ get_item_5[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n", "└─────────────────────┴───────────────────┴────────────┴───────────────────┘\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
 Total params: 555,247 (2.12 MB)\n",
       "
\n" ], "text/plain": [ "\u001b[1m Total params: \u001b[0m\u001b[38;5;34m555,247\u001b[0m (2.12 MB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
 Trainable params: 555,247 (2.12 MB)\n",
       "
\n" ], "text/plain": [ "\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m555,247\u001b[0m (2.12 MB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
 Non-trainable params: 0 (0.00 B)\n",
       "
\n" ], "text/plain": [ "\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m0\u001b[0m (0.00 B)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 3s/step - answer_output_accuracy: 0.0030 - answer_output_loss: 4.1163 - loss: 10.8193 - question_output_accuracy: 0.0030 - question_output_loss: 5.6031 - type_output_accuracy: 0.2000 - type_output_loss: 1.0999 - val_answer_output_accuracy: 0.8833 - val_answer_output_loss: 4.0123 - val_loss: 10.6706 - val_question_output_accuracy: 0.6000 - val_question_output_loss: 5.5595 - val_type_output_accuracy: 0.1667 - val_type_output_loss: 1.0987\n", "Epoch 2/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 95ms/step - answer_output_accuracy: 0.8800 - answer_output_loss: 4.0174 - loss: 10.6778 - question_output_accuracy: 0.5640 - question_output_loss: 5.5631 - type_output_accuracy: 0.4200 - type_output_loss: 1.0973 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 3.8939 - val_loss: 10.4860 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 5.4945 - val_type_output_accuracy: 0.3333 - val_type_output_loss: 1.0976\n", "Epoch 3/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 91ms/step - answer_output_accuracy: 0.9370 - answer_output_loss: 3.9075 - loss: 10.5064 - question_output_accuracy: 0.5870 - question_output_loss: 5.5043 - type_output_accuracy: 0.6200 - type_output_loss: 1.0946 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 3.7157 - val_loss: 10.1938 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 5.3815 - val_type_output_accuracy: 0.3333 - val_type_output_loss: 1.0965\n", "Epoch 4/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 90ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 3.7435 - loss: 10.2381 - question_output_accuracy: 0.5890 - question_output_loss: 5.4027 - type_output_accuracy: 0.6200 - type_output_loss: 1.0919 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 3.4257 - val_loss: 9.7085 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 5.1873 - val_type_output_accuracy: 0.3333 - val_type_output_loss: 1.0955\n", "Epoch 5/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 90ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 3.4788 - loss: 9.7970 - question_output_accuracy: 0.5850 - question_output_loss: 5.2288 - type_output_accuracy: 0.6600 - type_output_loss: 1.0894 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 2.9617 - val_loss: 8.9146 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 4.8585 - val_type_output_accuracy: 0.3333 - val_type_output_loss: 1.0944\n", "Epoch 6/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 95ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 3.0565 - loss: 9.0790 - question_output_accuracy: 0.5850 - question_output_loss: 4.9355 - type_output_accuracy: 0.6600 - type_output_loss: 1.0869 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 2.3649 - val_loss: 7.8024 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 4.3441 - val_type_output_accuracy: 0.3333 - val_type_output_loss: 1.0933\n", "Epoch 7/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 95ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 2.5004 - loss: 8.0585 - question_output_accuracy: 0.5850 - question_output_loss: 4.4735 - type_output_accuracy: 0.6600 - type_output_loss: 1.0845 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 1.8898 - val_loss: 6.6823 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 3.7005 - val_type_output_accuracy: 0.3333 - val_type_output_loss: 1.0920\n", "Epoch 8/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 88ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 2.0239 - loss: 6.9823 - question_output_accuracy: 0.5850 - question_output_loss: 3.8764 - type_output_accuracy: 0.6600 - type_output_loss: 1.0821 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 1.5873 - val_loss: 5.7713 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 3.0934 - val_type_output_accuracy: 0.3333 - val_type_output_loss: 1.0906\n", "Epoch 9/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 93ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 1.6939 - loss: 6.0594 - question_output_accuracy: 0.5850 - question_output_loss: 3.2857 - type_output_accuracy: 0.6600 - type_output_loss: 1.0798 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 1.3585 - val_loss: 5.0778 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.6303 - val_type_output_accuracy: 0.3333 - val_type_output_loss: 1.0890\n", "Epoch 10/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 97ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 1.4268 - loss: 5.3244 - question_output_accuracy: 0.5850 - question_output_loss: 2.8203 - type_output_accuracy: 0.6600 - type_output_loss: 1.0774 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 1.1559 - val_loss: 4.5630 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.3200 - val_type_output_accuracy: 0.3333 - val_type_output_loss: 1.0871\n", "Epoch 11/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 93ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 1.1880 - loss: 4.7795 - question_output_accuracy: 0.5850 - question_output_loss: 2.5167 - type_output_accuracy: 0.6600 - type_output_loss: 1.0748 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.9716 - val_loss: 4.2001 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.1437 - val_type_output_accuracy: 0.3333 - val_type_output_loss: 1.0848\n", "Epoch 12/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 95ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.9857 - loss: 4.4356 - question_output_accuracy: 0.5850 - question_output_loss: 2.3778 - type_output_accuracy: 0.6600 - type_output_loss: 1.0721 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.8171 - val_loss: 3.9799 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.0807 - val_type_output_accuracy: 0.3333 - val_type_output_loss: 1.0822\n", "Epoch 13/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 92ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.8280 - loss: 4.2715 - question_output_accuracy: 0.5850 - question_output_loss: 2.3745 - type_output_accuracy: 0.6600 - type_output_loss: 1.0690 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.6995 - val_loss: 3.8760 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.0974 - val_type_output_accuracy: 0.3333 - val_type_output_loss: 1.0790\n", "Epoch 14/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 91ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.7110 - loss: 4.2264 - question_output_accuracy: 0.5850 - question_output_loss: 2.4498 - type_output_accuracy: 0.6400 - type_output_loss: 1.0656 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.6143 - val_loss: 3.8415 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.1518 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0754\n", "Epoch 15/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 95ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.6254 - loss: 4.2353 - question_output_accuracy: 0.5850 - question_output_loss: 2.5482 - type_output_accuracy: 0.6000 - type_output_loss: 1.0617 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.5529 - val_loss: 3.8335 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.2091 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0714\n", "Epoch 16/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 94ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.5623 - loss: 4.2530 - question_output_accuracy: 0.5850 - question_output_loss: 2.6334 - type_output_accuracy: 0.6000 - type_output_loss: 1.0573 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.5083 - val_loss: 3.8255 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.2502 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0670\n", "Epoch 17/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 96ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.5150 - loss: 4.2561 - question_output_accuracy: 0.5850 - question_output_loss: 2.6886 - type_output_accuracy: 0.6000 - type_output_loss: 1.0525 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.4752 - val_loss: 3.8053 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.2678 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0623\n", "Epoch 18/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 104ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.4790 - loss: 4.2357 - question_output_accuracy: 0.5850 - question_output_loss: 2.7094 - type_output_accuracy: 0.6000 - type_output_loss: 1.0473 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.4503 - val_loss: 3.7689 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.2612 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0573\n", "Epoch 19/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 94ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.4511 - loss: 4.1904 - question_output_accuracy: 0.5850 - question_output_loss: 2.6974 - type_output_accuracy: 0.5600 - type_output_loss: 1.0419 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.4313 - val_loss: 3.7162 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.2327 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0523\n", "Epoch 20/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 94ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.4293 - loss: 4.1218 - question_output_accuracy: 0.5850 - question_output_loss: 2.6564 - type_output_accuracy: 0.5600 - type_output_loss: 1.0361 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.4164 - val_loss: 3.6494 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.1859 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0471\n", "Epoch 21/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 93ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.4118 - loss: 4.0332 - question_output_accuracy: 0.5850 - question_output_loss: 2.5912 - type_output_accuracy: 0.5600 - type_output_loss: 1.0302 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.4046 - val_loss: 3.5722 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.1256 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0420\n", "Epoch 22/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 95ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.3975 - loss: 3.9297 - question_output_accuracy: 0.5850 - question_output_loss: 2.5080 - type_output_accuracy: 0.5600 - type_output_loss: 1.0242 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.3951 - val_loss: 3.4909 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.0587 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0370\n", "Epoch 23/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 94ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.3858 - loss: 3.8184 - question_output_accuracy: 0.5850 - question_output_loss: 2.4147 - type_output_accuracy: 0.5600 - type_output_loss: 1.0180 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.3875 - val_loss: 3.4143 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 1.9948 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0321\n", "Epoch 24/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 91ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.3759 - loss: 3.7097 - question_output_accuracy: 0.5850 - question_output_loss: 2.3222 - type_output_accuracy: 0.5600 - type_output_loss: 1.0116 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.3812 - val_loss: 3.3557 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 1.9473 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0273\n", "Epoch 25/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 95ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.3674 - loss: 3.6180 - question_output_accuracy: 0.5850 - question_output_loss: 2.2455 - type_output_accuracy: 0.5600 - type_output_loss: 1.0051 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.3759 - val_loss: 3.3316 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 1.9330 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0227\n", "Epoch 26/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 96ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.3600 - loss: 3.5615 - question_output_accuracy: 0.5850 - question_output_loss: 2.2030 - type_output_accuracy: 0.5400 - type_output_loss: 0.9985 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.3715 - val_loss: 3.3519 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 1.9622 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0183\n", "Epoch 27/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 90ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.3534 - loss: 3.5516 - question_output_accuracy: 0.5850 - question_output_loss: 2.2064 - type_output_accuracy: 0.5400 - type_output_loss: 0.9917 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.3677 - val_loss: 3.4014 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.0195 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0141\n", "Epoch 28/30\n", "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 96ms/step - answer_output_accuracy: 0.9380 - answer_output_loss: 0.3474 - loss: 3.5737 - question_output_accuracy: 0.5850 - question_output_loss: 2.2414 - type_output_accuracy: 0.5400 - type_output_loss: 0.9848 - val_answer_output_accuracy: 0.9250 - val_answer_output_loss: 0.3645 - val_loss: 3.4429 - val_question_output_accuracy: 0.6250 - val_question_output_loss: 2.0682 - val_type_output_accuracy: 0.5000 - val_type_output_loss: 1.0102\n" ] } ], "source": [ "\n", "inp_tok = Input(shape=(None,), name=\"tok_input\")\n", "inp_ner = Input(shape=(None,), name=\"ner_input\")\n", "inp_srl = Input(shape=(None,), name=\"srl_input\")\n", "\n", "emb_tok = Embedding(input_dim=len(token_tok.word_index) + 1, output_dim=128)(inp_tok)\n", "emb_ner = Embedding(input_dim=len(token_ner.word_index) + 1, output_dim=16)(inp_ner)\n", "emb_srl = Embedding(input_dim=len(token_srl.word_index) + 1, output_dim=16)(inp_srl)\n", "\n", "# emb_tok = Embedding(input_dim=..., output_dim=..., mask_zero=True)(inp_tok)\n", "# emb_ner = Embedding(input_dim=..., output_dim=..., mask_zero=True)(inp_ner)\n", "# emb_srl = Embedding(input_dim=..., output_dim=..., mask_zero=True)(inp_srl)\n", "\n", "merged = Concatenate()([emb_tok, emb_ner, emb_srl])\n", "\n", "x = LSTM(256, return_sequences=True)(merged)\n", "\n", "out_question = TimeDistributed(Dense(len(token_q.word_index) + 1, activation=\"softmax\"), name=\"question_output\")(x)\n", "out_answer = TimeDistributed(Dense(len(token_a.word_index) + 1, activation=\"softmax\"), name=\"answer_output\")(x)\n", "out_type = Dense(len(token_type.word_index), activation=\"softmax\", name=\"type_output\")(\n", " x[:, 0, :]\n", ") # gunakan step pertama\n", "\n", "model = Model(\n", " inputs=[inp_tok, inp_ner, inp_srl], outputs=[out_question, out_answer, out_type]\n", ")\n", "model.compile(\n", " optimizer=\"adam\",\n", " loss={\n", " \"question_output\": \"sparse_categorical_crossentropy\",\n", " \"answer_output\": \"sparse_categorical_crossentropy\",\n", " \"type_output\": \"categorical_crossentropy\",\n", " },\n", " metrics={\n", " \"question_output\": \"accuracy\",\n", " \"answer_output\": \"accuracy\",\n", " \"type_output\": \"accuracy\",\n", " },\n", ")\n", "\n", "model.summary()\n", "\n", "# ----------------------------------------------------------------------------\n", "# 5. TRAINING\n", "# ----------------------------------------------------------------------------\n", "model.fit(\n", " X_train,\n", " {\n", " \"question_output\": np.expand_dims(y_q_train, -1),\n", " \"answer_output\": np.expand_dims(y_a_train, -1),\n", " \"type_output\": y_type_train,\n", " },\n", " batch_size=64,\n", " epochs=30,\n", " validation_split=0.1,\n", " callbacks=[EarlyStopping(patience=3, restore_best_weights=True)],\n", ")\n", "\n", "import pickle\n", "\n", "\n", "model.save(\"new_model_lstm_qg.keras\")\n", "with open(\"tokenizers.pkl\", \"wb\") as f:\n", " pickle.dump({\n", " \"token\": token_tok,\n", " \"ner\": token_ner,\n", " \"srl\": token_srl,\n", " \"question\": token_q,\n", " \"answer\": token_a,\n", " \"type\": token_type\n", " }, f)\n", "\n" ] }, { "cell_type": "code", "execution_count": 57, "id": "06fd86c7", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 239ms/step\n", "\n", "=== Akurasi Detail ===\n", "Question Accuracy (Token-level): 0.0000\n", "Answer Accuracy (Token-level) : 0.0000\n", "Type Accuracy (Class-level) : 0.29\n" ] } ], "source": [ "\n", "def token_level_accuracy(y_true, y_pred):\n", " correct = 0\n", " total = 0\n", " for true_seq, pred_seq in zip(y_true, y_pred):\n", " for t, p in zip(true_seq, pred_seq):\n", " if t != 0: # ignore padding\n", " total += 1\n", " if t == p:\n", " correct += 1\n", " return correct / total if total > 0 else 0\n", "\n", "\n", "# Predict on test set\n", "y_pred_q, y_pred_a, y_pred_type = model.predict(X_test)\n", "\n", "# Decode predictions to class indices\n", "y_pred_q = np.argmax(y_pred_q, axis=-1)\n", "y_pred_a = np.argmax(y_pred_a, axis=-1)\n", "y_pred_type = np.argmax(y_pred_type, axis=-1)\n", "y_true_type = np.argmax(y_type_test, axis=-1)\n", "\n", "# Calculate token-level accuracy\n", "acc_q = token_level_accuracy(y_q_test, y_pred_q)\n", "acc_a = token_level_accuracy(y_a_test, y_pred_a)\n", "\n", "# Type classification report\n", "report_type = classification_report(y_true_type, y_pred_type, zero_division=0)\n", "\n", "# Print Results\n", "print(\"\\n=== Akurasi Detail ===\")\n", "print(f\"Question Accuracy (Token-level): {acc_q:.4f}\")\n", "print(f\"Answer Accuracy (Token-level) : {acc_a:.4f}\")\n", "print(f\"Type Accuracy (Class-level) : {np.mean(y_true_type == y_pred_type):.2f}\")" ] }, { "cell_type": "code", "execution_count": 58, "id": "b17b6470", "metadata": {}, "outputs": [], "source": [ "# import sacrebleu\n", "# from sacrebleu.metrics import BLEU # optional kalau mau smoothing/effective_order\n", "\n", "# idx2tok = {v:k for k,v in word2idx.items()}\n", "# PAD_ID = word2idx[\"PAD\"]\n", "# SOS_ID = word2idx.get(\"SOS\", None)\n", "# EOS_ID = word2idx.get(\"EOS\", None)\n", "\n", "# def seq2str(seq):\n", "# \"\"\"Konversi list index -> kalimat string, sambil buang token spesial.\"\"\"\n", "# toks = [idx2tok[i] for i in seq\n", "# if i not in {PAD_ID, SOS_ID, EOS_ID}]\n", "# return \" \".join(toks).strip().lower()\n", "\n", "# bleu_metric = BLEU(effective_order=True) # lebih stabil utk kalimat pendek\n", "\n", "# def bleu_corpus(pred_seqs, true_seqs):\n", "# preds = [seq2str(p) for p in pred_seqs]\n", "# refs = [[seq2str(t)] for t in true_seqs] # list‑of‑list, satu ref/kalimat\n", "# return bleu_metric.corpus_score(preds, refs).score\n" ] }, { "cell_type": "code", "execution_count": 59, "id": "d5ed106c", "metadata": {}, "outputs": [], "source": [ "\n", "# flat_true_a, flat_pred_a = flatten_valid(y_a_test, y_pred_a_class)\n", "# print(\"\\n=== Classification Report: ANSWER ===\")\n", "# print(classification_report(flat_true_a, flat_pred_a))\n" ] }, { "cell_type": "code", "execution_count": 60, "id": "aa3860de", "metadata": {}, "outputs": [], "source": [ "\n", "# print(\"\\n=== Classification Report: TYPE ===\")\n", "# print(classification_report(y_true_type_class, y_pred_type_class))" ] } ], "metadata": { "kernelspec": { "display_name": "myenv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.16" } }, "nbformat": 4, "nbformat_minor": 5 }