feat: training model change name
This commit is contained in:
parent
4ca70faf14
commit
ed1a3df0d8
|
@ -176,19 +176,19 @@
|
||||||
"qa_pairs": [
|
"qa_pairs": [
|
||||||
{
|
{
|
||||||
"type": "fill_in_the_blank",
|
"type": "fill_in_the_blank",
|
||||||
"question": "Apa Kepanjangan dari BPUPKI?",
|
"question": "Apa kepanjangan dari BPUPKI?",
|
||||||
"answer": "Badan Penyelidik Usaha Usaha Persiapan Kemerdekaan Indonesia"
|
"answer": "Badan Penyelidik Usaha-Usaha Persiapan Kemerdekaan Indonesia"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "multiple_choice",
|
"type": "multiple_choice",
|
||||||
"question": "BPUPKI dibentuk pada ",
|
"question": "BPUPKI dibentuk pada tanggal?",
|
||||||
"options": [
|
"options": [
|
||||||
"20 April 1945",
|
"20 April 1945",
|
||||||
"29 April 1945",
|
"29 April 1945",
|
||||||
"10 April 1945",
|
"10 April 1945",
|
||||||
"20 Mei 1945"
|
"20 Mei 1945"
|
||||||
],
|
],
|
||||||
"answer": "20 Mei 1945"
|
"answer": "29 April 1945"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,30 +2,37 @@
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 47,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# import library\n",
|
"# import library\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"# Data manipulation and visualization\n",
|
||||||
"import pandas as pd\n",
|
"import pandas as pd\n",
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"import json\n",
|
"import matplotlib.pyplot as plt\n",
|
||||||
"from tensorflow.keras.preprocessing.text import Tokenizer\n",
|
|
||||||
"from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
|
"# Natural language processing\n",
|
||||||
"import re\n",
|
"import re\n",
|
||||||
"import string\n",
|
"import string\n",
|
||||||
"import nltk\n",
|
"import nltk\n",
|
||||||
"from nltk.corpus import stopwords\n",
|
"from nltk.corpus import stopwords\n",
|
||||||
"from nltk.tokenize import word_tokenize\n",
|
"from nltk.tokenize import word_tokenize\n",
|
||||||
"from nltk.stem import WordNetLemmatizer\n",
|
"from nltk.stem import WordNetLemmatizer\n",
|
||||||
"import pickle\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
|
"# Deep learning\n",
|
||||||
|
"from tensorflow.keras.preprocessing.text import Tokenizer\n",
|
||||||
|
"from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
|
||||||
"from tensorflow.keras.models import Model\n",
|
"from tensorflow.keras.models import Model\n",
|
||||||
"from tensorflow.keras.layers import Input, Embedding, LSTM, Dense, Concatenate\n",
|
"from tensorflow.keras.layers import Input, Embedding, LSTM, Dense, Concatenate\n",
|
||||||
"from sklearn.metrics import classification_report, precision_score, recall_score, accuracy_score\n"
|
"from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n",
|
||||||
|
"\n",
|
||||||
|
"# Metrics for model evaluation\n",
|
||||||
|
"from sklearn.metrics import classification_report, precision_score, recall_score, accuracy_score\n",
|
||||||
|
"\n",
|
||||||
|
"# Utility for serialization\n",
|
||||||
|
"import pickle\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -346,7 +353,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 52,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
|
@ -467,6 +474,11 @@
|
||||||
" },\n",
|
" },\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"early_stop = EarlyStopping(monitor='val_loss', patience=3)\n",
|
||||||
|
"checkpoint = ModelCheckpoint(\"best_model.h5\", monitor='val_loss', save_best_only=True)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
"# === Training Model === #\n",
|
"# === Training Model === #\n",
|
||||||
"model.fit(\n",
|
"model.fit(\n",
|
||||||
" [context_padded, question_padded],\n",
|
" [context_padded, question_padded],\n",
|
||||||
|
|
Loading…
Reference in New Issue