557 lines
41 KiB
Plaintext
557 lines
41 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "9bf2159a",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"2025-04-29 19:13:19.968628: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
|
|
"2025-04-29 19:13:19.975821: I external/local_xla/xla/tsl/cuda/cudart_stub.cc:32] Could not find cuda drivers on your machine, GPU will not be used.\n",
|
|
"2025-04-29 19:13:20.061422: I external/local_xla/xla/tsl/cuda/cudart_stub.cc:32] Could not find cuda drivers on your machine, GPU will not be used.\n",
|
|
"2025-04-29 19:13:20.109564: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
|
|
"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n",
|
|
"E0000 00:00:1745928800.155971 272184 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
|
|
"E0000 00:00:1745928800.168166 272184 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
|
|
"W0000 00:00:1745928800.263286 272184 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n",
|
|
"W0000 00:00:1745928800.263312 272184 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n",
|
|
"W0000 00:00:1745928800.263313 272184 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n",
|
|
"W0000 00:00:1745928800.263314 272184 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n",
|
|
"2025-04-29 19:13:20.274608: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
|
|
"To enable the following instructions: AVX2 AVX_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"import json\n",
|
|
"import numpy as np\n",
|
|
"from pathlib import Path\n",
|
|
"from sklearn.model_selection import train_test_split\n",
|
|
"from tensorflow.keras.preprocessing.text import Tokenizer\n",
|
|
"from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
|
|
"from tensorflow.keras.utils import to_categorical\n",
|
|
"\n",
|
|
"from tensorflow.keras.models import Model\n",
|
|
"from tensorflow.keras.layers import (\n",
|
|
" Input,\n",
|
|
" Embedding,\n",
|
|
" LSTM,\n",
|
|
" Concatenate,\n",
|
|
" Dense,\n",
|
|
" TimeDistributed,\n",
|
|
")\n",
|
|
"from tensorflow.keras.callbacks import EarlyStopping\n",
|
|
"from sklearn.metrics import classification_report\n",
|
|
"from collections import Counter"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 12,
|
|
"id": "50118278",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
" Jumlah data valid: 287 / 287\n",
|
|
" Jumlah data tidak valid: 0\n",
|
|
"Counter({'ftb': 202, 'tof': 45, 'none': 40})\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# Load raw data\n",
|
|
"with open(\"normalize_dataset.json\", encoding=\"utf-8\") as f:\n",
|
|
" raw_data = json.load(f)\n",
|
|
"\n",
|
|
"# Validasi lengkap\n",
|
|
"required_keys = {\"tokens\", \"ner\", \"srl\", \"question\", \"answer\", \"type\"}\n",
|
|
"valid_data = []\n",
|
|
"invalid_data = []\n",
|
|
"\n",
|
|
"for idx, item in enumerate(raw_data):\n",
|
|
" error_messages = []\n",
|
|
"\n",
|
|
" if not isinstance(item, dict):\n",
|
|
" error_messages.append(\"bukan dictionary\")\n",
|
|
"\n",
|
|
" missing_keys = required_keys - item.keys()\n",
|
|
" if missing_keys:\n",
|
|
" error_messages.append(f\"missing keys: {missing_keys}\")\n",
|
|
"\n",
|
|
" if not error_messages:\n",
|
|
" # Cek tipe data dan None\n",
|
|
" if (not isinstance(item[\"tokens\"], list) or\n",
|
|
" not isinstance(item[\"ner\"], list) or\n",
|
|
" not isinstance(item[\"srl\"], list) or\n",
|
|
" not isinstance(item[\"question\"], list) or\n",
|
|
" not isinstance(item[\"answer\"], list) or\n",
|
|
" not isinstance(item[\"type\"], str)):\n",
|
|
" error_messages.append(\"field type tidak sesuai\")\n",
|
|
" \n",
|
|
" if error_messages:\n",
|
|
" print(f\"\\n Index {idx} | Masalah: {', '.join(error_messages)}\")\n",
|
|
" print(json.dumps(item, indent=2, ensure_ascii=False))\n",
|
|
" invalid_data.append(item)\n",
|
|
" continue\n",
|
|
"\n",
|
|
" valid_data.append(item)\n",
|
|
"\n",
|
|
"# Statistik\n",
|
|
"print(f\"\\n Jumlah data valid: {len(valid_data)} / {len(raw_data)}\")\n",
|
|
"print(f\" Jumlah data tidak valid: {len(invalid_data)}\")\n",
|
|
"\n",
|
|
"# Proses data valid\n",
|
|
"tokens = [[t.lower().strip() for t in item[\"tokens\"]] for item in valid_data]\n",
|
|
"ner_tags = [item[\"ner\"] for item in valid_data]\n",
|
|
"srl_tags = [item[\"srl\"] for item in valid_data]\n",
|
|
"questions = [[token.lower().strip() for token in item[\"question\"]] for item in valid_data]\n",
|
|
"answers = [[token.lower().strip() for token in item[\"answer\"]] for item in valid_data]\n",
|
|
"types = [item[\"type\"] for item in valid_data]\n",
|
|
"\n",
|
|
"type_counts = Counter(types)\n",
|
|
"\n",
|
|
"print(type_counts)\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "4e3a0088",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# tokenize\n",
|
|
"token_tok = Tokenizer(lower=False, oov_token=\"UNK\")\n",
|
|
"token_ner = Tokenizer(lower=False)\n",
|
|
"token_srl = Tokenizer(lower=False)\n",
|
|
"token_q = Tokenizer(lower=False)\n",
|
|
"token_a = Tokenizer(lower=False)\n",
|
|
"token_type = Tokenizer(lower=False)\n",
|
|
"\n",
|
|
"token_tok.fit_on_texts(tokens)\n",
|
|
"token_ner.fit_on_texts(ner_tags)\n",
|
|
"token_srl.fit_on_texts(srl_tags)\n",
|
|
"token_q.fit_on_texts(questions)\n",
|
|
"token_a.fit_on_texts(answers)\n",
|
|
"token_type.fit_on_texts(types)\n",
|
|
"\n",
|
|
"\n",
|
|
"maxlen = 20"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"id": "555f9e22",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"{'ftb', 'tof', 'none'}\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"\n",
|
|
"X_tok = pad_sequences(\n",
|
|
" token_tok.texts_to_sequences(tokens), padding=\"post\", maxlen=maxlen\n",
|
|
")\n",
|
|
"X_ner = pad_sequences(\n",
|
|
" token_ner.texts_to_sequences(ner_tags), padding=\"post\", maxlen=maxlen\n",
|
|
")\n",
|
|
"X_srl = pad_sequences(\n",
|
|
" token_srl.texts_to_sequences(srl_tags), padding=\"post\", maxlen=maxlen\n",
|
|
")\n",
|
|
"y_q = pad_sequences(token_q.texts_to_sequences(questions), padding=\"post\", maxlen=maxlen)\n",
|
|
"y_a = pad_sequences(token_a.texts_to_sequences(answers), padding=\"post\", maxlen=maxlen)\n",
|
|
"\n",
|
|
"print(set(types))\n",
|
|
"\n",
|
|
"y_type = [seq[0] for seq in token_type.texts_to_sequences(types)] # list of int\n",
|
|
"y_type = to_categorical(np.array(y_type) - 1, num_classes=len(token_type.word_index))\n",
|
|
"\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"id": "f530cfe7",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"X_tok_train, X_tok_test, X_ner_train, X_ner_test, X_srl_train, X_srl_test, \\\n",
|
|
"y_q_train, y_q_test, y_a_train, y_a_test, y_type_train, y_type_test = train_test_split(\n",
|
|
" X_tok, X_ner, X_srl, y_q, y_a, y_type, test_size=0.2, random_state=42\n",
|
|
")\n",
|
|
"\n",
|
|
"X_train = [X_tok_train, X_ner_train, X_srl_train]\n",
|
|
"X_test = [X_tok_test, X_ner_test, X_srl_test]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"id": "255e2a9a",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"2025-04-29 19:13:22.481835: E external/local_xla/xla/stream_executor/cuda/cuda_platform.cc:51] failed call to cuInit: INTERNAL: CUDA error: Failed call to cuInit: UNKNOWN ERROR (303)\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"font-weight: bold\">Model: \"functional\"</span>\n",
|
|
"</pre>\n"
|
|
],
|
|
"text/plain": [
|
|
"\u001b[1mModel: \"functional\"\u001b[0m\n"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┓\n",
|
|
"┃<span style=\"font-weight: bold\"> Layer (type) </span>┃<span style=\"font-weight: bold\"> Output Shape </span>┃<span style=\"font-weight: bold\"> Param # </span>┃<span style=\"font-weight: bold\"> Connected to </span>┃\n",
|
|
"┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩\n",
|
|
"│ tok_input │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">0</span> │ - │\n",
|
|
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">InputLayer</span>) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ ner_input │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">0</span> │ - │\n",
|
|
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">InputLayer</span>) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ srl_input │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">0</span> │ - │\n",
|
|
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">InputLayer</span>) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ embedding │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">128</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">126,080</span> │ tok_input[<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>][<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>] │\n",
|
|
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">Embedding</span>) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ embedding_1 │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">16</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">352</span> │ ner_input[<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>][<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>] │\n",
|
|
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">Embedding</span>) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ embedding_2 │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">16</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">432</span> │ srl_input[<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>][<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>] │\n",
|
|
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">Embedding</span>) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ concatenate │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">160</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">0</span> │ embedding[<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>][<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>], │\n",
|
|
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">Concatenate</span>) │ │ │ embedding_1[<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>][<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>… │\n",
|
|
"│ │ │ │ embedding_2[<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>][<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>] │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ lstm (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">LSTM</span>) │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">256</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">427,008</span> │ concatenate[<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>][<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>] │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ get_item (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">GetItem</span>) │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">256</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">0</span> │ lstm[<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>][<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>] │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ question_output │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">473</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">121,561</span> │ lstm[<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>][<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>] │\n",
|
|
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">TimeDistributed</span>) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ answer_output │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">383</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">98,431</span> │ lstm[<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>][<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>] │\n",
|
|
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">TimeDistributed</span>) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ type_output (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">Dense</span>) │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">3</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">771</span> │ get_item[<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>][<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>] │\n",
|
|
"└─────────────────────┴───────────────────┴────────────┴───────────────────┘\n",
|
|
"</pre>\n"
|
|
],
|
|
"text/plain": [
|
|
"┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┓\n",
|
|
"┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mConnected to \u001b[0m\u001b[1m \u001b[0m┃\n",
|
|
"┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩\n",
|
|
"│ tok_input │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │ - │\n",
|
|
"│ (\u001b[38;5;33mInputLayer\u001b[0m) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ ner_input │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │ - │\n",
|
|
"│ (\u001b[38;5;33mInputLayer\u001b[0m) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ srl_input │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │ - │\n",
|
|
"│ (\u001b[38;5;33mInputLayer\u001b[0m) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ embedding │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m128\u001b[0m) │ \u001b[38;5;34m126,080\u001b[0m │ tok_input[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n",
|
|
"│ (\u001b[38;5;33mEmbedding\u001b[0m) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ embedding_1 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m16\u001b[0m) │ \u001b[38;5;34m352\u001b[0m │ ner_input[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n",
|
|
"│ (\u001b[38;5;33mEmbedding\u001b[0m) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ embedding_2 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m16\u001b[0m) │ \u001b[38;5;34m432\u001b[0m │ srl_input[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n",
|
|
"│ (\u001b[38;5;33mEmbedding\u001b[0m) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ concatenate │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m160\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │ embedding[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m], │\n",
|
|
"│ (\u001b[38;5;33mConcatenate\u001b[0m) │ │ │ embedding_1[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m… │\n",
|
|
"│ │ │ │ embedding_2[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ lstm (\u001b[38;5;33mLSTM\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m256\u001b[0m) │ \u001b[38;5;34m427,008\u001b[0m │ concatenate[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ get_item (\u001b[38;5;33mGetItem\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m256\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │ lstm[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ question_output │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m473\u001b[0m) │ \u001b[38;5;34m121,561\u001b[0m │ lstm[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n",
|
|
"│ (\u001b[38;5;33mTimeDistributed\u001b[0m) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ answer_output │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m383\u001b[0m) │ \u001b[38;5;34m98,431\u001b[0m │ lstm[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n",
|
|
"│ (\u001b[38;5;33mTimeDistributed\u001b[0m) │ │ │ │\n",
|
|
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
|
|
"│ type_output (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m3\u001b[0m) │ \u001b[38;5;34m771\u001b[0m │ get_item[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n",
|
|
"└─────────────────────┴───────────────────┴────────────┴───────────────────┘\n"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"font-weight: bold\"> Total params: </span><span style=\"color: #00af00; text-decoration-color: #00af00\">774,635</span> (2.95 MB)\n",
|
|
"</pre>\n"
|
|
],
|
|
"text/plain": [
|
|
"\u001b[1m Total params: \u001b[0m\u001b[38;5;34m774,635\u001b[0m (2.95 MB)\n"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"font-weight: bold\"> Trainable params: </span><span style=\"color: #00af00; text-decoration-color: #00af00\">774,635</span> (2.95 MB)\n",
|
|
"</pre>\n"
|
|
],
|
|
"text/plain": [
|
|
"\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m774,635\u001b[0m (2.95 MB)\n"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"font-weight: bold\"> Non-trainable params: </span><span style=\"color: #00af00; text-decoration-color: #00af00\">0</span> (0.00 B)\n",
|
|
"</pre>\n"
|
|
],
|
|
"text/plain": [
|
|
"\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m0\u001b[0m (0.00 B)\n"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Epoch 1/30\n",
|
|
"\u001b[1m7/7\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 101ms/step - answer_output_accuracy: 0.5626 - answer_output_loss: 5.7629 - loss: 12.9112 - question_output_accuracy: 0.3867 - question_output_loss: 6.0185 - type_output_accuracy: 0.5290 - type_output_loss: 1.0943 - val_answer_output_accuracy: 0.9261 - val_answer_output_loss: 3.9036 - val_loss: 9.5865 - val_question_output_accuracy: 0.7500 - val_question_output_loss: 4.5947 - val_type_output_accuracy: 0.5652 - val_type_output_loss: 1.0883\n",
|
|
"Epoch 2/30\n",
|
|
"\u001b[1m7/7\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 32ms/step - answer_output_accuracy: 0.8791 - answer_output_loss: 2.9526 - loss: 7.7800 - question_output_accuracy: 0.6837 - question_output_loss: 3.7162 - type_output_accuracy: 0.7148 - type_output_loss: 1.0672 - val_answer_output_accuracy: 0.9261 - val_answer_output_loss: 1.1139 - val_loss: 4.1230 - val_question_output_accuracy: 0.7500 - val_question_output_loss: 1.9489 - val_type_output_accuracy: 0.5652 - val_type_output_loss: 1.0601\n",
|
|
"Epoch 3/30\n",
|
|
"\u001b[1m7/7\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 34ms/step - answer_output_accuracy: 0.8726 - answer_output_loss: 1.2047 - loss: 4.4213 - question_output_accuracy: 0.6797 - question_output_loss: 2.2016 - type_output_accuracy: 0.7251 - type_output_loss: 1.0092 - val_answer_output_accuracy: 0.9261 - val_answer_output_loss: 0.7679 - val_loss: 3.7423 - val_question_output_accuracy: 0.7500 - val_question_output_loss: 1.9604 - val_type_output_accuracy: 0.5652 - val_type_output_loss: 1.0140\n",
|
|
"Epoch 4/30\n",
|
|
"\u001b[1m7/7\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 32ms/step - answer_output_accuracy: 0.8633 - answer_output_loss: 1.1478 - loss: 4.4374 - question_output_accuracy: 0.6639 - question_output_loss: 2.3671 - type_output_accuracy: 0.7490 - type_output_loss: 0.9088 - val_answer_output_accuracy: 0.9261 - val_answer_output_loss: 0.7059 - val_loss: 3.6255 - val_question_output_accuracy: 0.7500 - val_question_output_loss: 1.9356 - val_type_output_accuracy: 0.5652 - val_type_output_loss: 0.9840\n",
|
|
"Epoch 5/30\n",
|
|
"\u001b[1m7/7\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 33ms/step - answer_output_accuracy: 0.8783 - answer_output_loss: 1.0187 - loss: 4.0230 - question_output_accuracy: 0.6760 - question_output_loss: 2.1959 - type_output_accuracy: 0.7563 - type_output_loss: 0.8131 - val_answer_output_accuracy: 0.9261 - val_answer_output_loss: 0.6848 - val_loss: 3.5743 - val_question_output_accuracy: 0.7500 - val_question_output_loss: 1.9039 - val_type_output_accuracy: 0.5652 - val_type_output_loss: 0.9857\n",
|
|
"Epoch 6/30\n",
|
|
"\u001b[1m7/7\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 33ms/step - answer_output_accuracy: 0.8800 - answer_output_loss: 0.9845 - loss: 3.8171 - question_output_accuracy: 0.6878 - question_output_loss: 2.0357 - type_output_accuracy: 0.7328 - type_output_loss: 0.7942 - val_answer_output_accuracy: 0.9261 - val_answer_output_loss: 0.6742 - val_loss: 3.5592 - val_question_output_accuracy: 0.7500 - val_question_output_loss: 1.8777 - val_type_output_accuracy: 0.5652 - val_type_output_loss: 1.0074\n",
|
|
"Epoch 7/30\n",
|
|
"\u001b[1m7/7\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 32ms/step - answer_output_accuracy: 0.8768 - answer_output_loss: 0.9756 - loss: 3.8569 - question_output_accuracy: 0.6743 - question_output_loss: 2.0795 - type_output_accuracy: 0.7030 - type_output_loss: 0.8039 - val_answer_output_accuracy: 0.9261 - val_answer_output_loss: 0.6769 - val_loss: 3.5671 - val_question_output_accuracy: 0.7500 - val_question_output_loss: 1.8631 - val_type_output_accuracy: 0.5652 - val_type_output_loss: 1.0272\n",
|
|
"Epoch 8/30\n",
|
|
"\u001b[1m7/7\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 32ms/step - answer_output_accuracy: 0.8814 - answer_output_loss: 0.9217 - loss: 3.7726 - question_output_accuracy: 0.6798 - question_output_loss: 2.0253 - type_output_accuracy: 0.6785 - type_output_loss: 0.8194 - val_answer_output_accuracy: 0.9261 - val_answer_output_loss: 0.6900 - val_loss: 3.5722 - val_question_output_accuracy: 0.7500 - val_question_output_loss: 1.8469 - val_type_output_accuracy: 0.5652 - val_type_output_loss: 1.0354\n",
|
|
"Epoch 9/30\n",
|
|
"\u001b[1m7/7\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 31ms/step - answer_output_accuracy: 0.8703 - answer_output_loss: 0.9799 - loss: 3.6985 - question_output_accuracy: 0.6843 - question_output_loss: 1.9755 - type_output_accuracy: 0.7160 - type_output_loss: 0.7474 - val_answer_output_accuracy: 0.9261 - val_answer_output_loss: 0.6958 - val_loss: 3.5849 - val_question_output_accuracy: 0.7500 - val_question_output_loss: 1.8401 - val_type_output_accuracy: 0.5652 - val_type_output_loss: 1.0490\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"\n",
|
|
"inp_tok = Input(shape=(None,), name=\"tok_input\")\n",
|
|
"inp_ner = Input(shape=(None,), name=\"ner_input\")\n",
|
|
"inp_srl = Input(shape=(None,), name=\"srl_input\")\n",
|
|
"\n",
|
|
"emb_tok = Embedding(input_dim=len(token_tok.word_index) + 1, output_dim=128)(inp_tok)\n",
|
|
"emb_ner = Embedding(input_dim=len(token_ner.word_index) + 1, output_dim=16)(inp_ner)\n",
|
|
"emb_srl = Embedding(input_dim=len(token_srl.word_index) + 1, output_dim=16)(inp_srl)\n",
|
|
"\n",
|
|
"# emb_tok = Embedding(input_dim=..., output_dim=..., mask_zero=True)(inp_tok)\n",
|
|
"# emb_ner = Embedding(input_dim=..., output_dim=..., mask_zero=True)(inp_ner)\n",
|
|
"# emb_srl = Embedding(input_dim=..., output_dim=..., mask_zero=True)(inp_srl)\n",
|
|
"\n",
|
|
"merged = Concatenate()([emb_tok, emb_ner, emb_srl])\n",
|
|
"\n",
|
|
"x = LSTM(256, return_sequences=True)(merged)\n",
|
|
"\n",
|
|
"out_question = TimeDistributed(Dense(len(token_q.word_index) + 1, activation=\"softmax\"), name=\"question_output\")(x)\n",
|
|
"out_answer = TimeDistributed(Dense(len(token_a.word_index) + 1, activation=\"softmax\"), name=\"answer_output\")(x)\n",
|
|
"out_type = Dense(len(token_type.word_index), activation=\"softmax\", name=\"type_output\")(\n",
|
|
" x[:, 0, :]\n",
|
|
") # gunakan step pertama\n",
|
|
"\n",
|
|
"model = Model(\n",
|
|
" inputs=[inp_tok, inp_ner, inp_srl], outputs=[out_question, out_answer, out_type]\n",
|
|
")\n",
|
|
"model.compile(\n",
|
|
" optimizer=\"adam\",\n",
|
|
" loss={\n",
|
|
" \"question_output\": \"sparse_categorical_crossentropy\",\n",
|
|
" \"answer_output\": \"sparse_categorical_crossentropy\",\n",
|
|
" \"type_output\": \"categorical_crossentropy\",\n",
|
|
" },\n",
|
|
" metrics={\n",
|
|
" \"question_output\": \"accuracy\",\n",
|
|
" \"answer_output\": \"accuracy\",\n",
|
|
" \"type_output\": \"accuracy\",\n",
|
|
" },\n",
|
|
")\n",
|
|
"\n",
|
|
"model.summary()\n",
|
|
"\n",
|
|
"# ----------------------------------------------------------------------------\n",
|
|
"# 5. TRAINING\n",
|
|
"# ----------------------------------------------------------------------------\n",
|
|
"model.fit(\n",
|
|
" X_train,\n",
|
|
" {\n",
|
|
" \"question_output\": np.expand_dims(y_q_train, -1),\n",
|
|
" \"answer_output\": np.expand_dims(y_a_train, -1),\n",
|
|
" \"type_output\": y_type_train,\n",
|
|
" },\n",
|
|
" batch_size=32,\n",
|
|
" epochs=30,\n",
|
|
" validation_split=0.1,\n",
|
|
" callbacks=[EarlyStopping(patience=3, restore_best_weights=True)],\n",
|
|
")\n",
|
|
"\n",
|
|
"import pickle\n",
|
|
"\n",
|
|
"\n",
|
|
"model.save(\"new_model_lstm_qg.keras\")\n",
|
|
"with open(\"tokenizers.pkl\", \"wb\") as f:\n",
|
|
" pickle.dump({\n",
|
|
" \"token\": token_tok,\n",
|
|
" \"ner\": token_ner,\n",
|
|
" \"srl\": token_srl,\n",
|
|
" \"question\": token_q,\n",
|
|
" \"answer\": token_a,\n",
|
|
" \"type\": token_type\n",
|
|
" }, f)\n",
|
|
"\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"id": "06fd86c7",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 236ms/step\n",
|
|
"\n",
|
|
"=== Akurasi Detail ===\n",
|
|
"Question Accuracy (Token-level): 0.0000\n",
|
|
"Answer Accuracy (Token-level) : 0.0000\n",
|
|
"Type Accuracy (Class-level) : 0.68\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"\n",
|
|
"def token_level_accuracy(y_true, y_pred):\n",
|
|
" correct = 0\n",
|
|
" total = 0\n",
|
|
" for true_seq, pred_seq in zip(y_true, y_pred):\n",
|
|
" for t, p in zip(true_seq, pred_seq):\n",
|
|
" if t != 0: # ignore padding\n",
|
|
" total += 1\n",
|
|
" if t == p:\n",
|
|
" correct += 1\n",
|
|
" return correct / total if total > 0 else 0\n",
|
|
"\n",
|
|
"\n",
|
|
"# Predict on test set\n",
|
|
"y_pred_q, y_pred_a, y_pred_type = model.predict(X_test)\n",
|
|
"\n",
|
|
"# Decode predictions to class indices\n",
|
|
"y_pred_q = np.argmax(y_pred_q, axis=-1)\n",
|
|
"y_pred_a = np.argmax(y_pred_a, axis=-1)\n",
|
|
"y_pred_type = np.argmax(y_pred_type, axis=-1)\n",
|
|
"y_true_type = np.argmax(y_type_test, axis=-1)\n",
|
|
"\n",
|
|
"# Calculate token-level accuracy\n",
|
|
"acc_q = token_level_accuracy(y_q_test, y_pred_q)\n",
|
|
"acc_a = token_level_accuracy(y_a_test, y_pred_a)\n",
|
|
"\n",
|
|
"# Type classification report\n",
|
|
"report_type = classification_report(y_true_type, y_pred_type, zero_division=0)\n",
|
|
"\n",
|
|
"# Print Results\n",
|
|
"print(\"\\n=== Akurasi Detail ===\")\n",
|
|
"print(f\"Question Accuracy (Token-level): {acc_q:.4f}\")\n",
|
|
"print(f\"Answer Accuracy (Token-level) : {acc_a:.4f}\")\n",
|
|
"print(f\"Type Accuracy (Class-level) : {np.mean(y_true_type == y_pred_type):.2f}\")\n",
|
|
"# print(\"\\n=== Classification Report (TYPE) ===\")\n",
|
|
"# print(report_type)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"id": "d5ed106c",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"\n",
|
|
"# flat_true_a, flat_pred_a = flatten_valid(y_a_test, y_pred_a_class)\n",
|
|
"# print(\"\\n=== Classification Report: ANSWER ===\")\n",
|
|
"# print(classification_report(flat_true_a, flat_pred_a))\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"id": "aa3860de",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"\n",
|
|
"# print(\"\\n=== Classification Report: TYPE ===\")\n",
|
|
"# print(classification_report(y_true_type_class, y_pred_type_class))"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "myenv",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.10.16"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|