feat: new model training lstm
This commit is contained in:
parent
ba8b5f7f8c
commit
32822f1095
|
@ -1 +1 @@
|
||||||
.venv
|
myenv
|
Binary file not shown.
|
@ -0,0 +1 @@
|
||||||
|
Katak mengalami metamorfosis dari telur, berudu, katak muda, hingga katak dewasa.,multiple_choice,Tahapan apakah yang termasuk dalam metamorfosis katak?,Berudu,Telur|Berudu|Pupa|Imago
|
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,195 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"context": "Albert Einstein adalah fisikawan teoretis kelahiran Jerman yang mengembangkan teori relativitas, salah satu dari dua pilar fisika modern. Karyanya juga dikenal karena pengaruhnya terhadap filosofi ilmu pengetahuan. Ia menerima Penghargaan Nobel dalam Fisika pada tahun 1921 atas jasanya dalam fisika teoretis.",
|
||||||
|
"qa_pairs": [
|
||||||
|
{
|
||||||
|
"type": "fill_in_the_blank",
|
||||||
|
"question": "_______ mengembangkan teori relativitas.",
|
||||||
|
"answer": "Albert Einstein"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "multiple_choice",
|
||||||
|
"question": "Kapan Albert Einstein menerima Penghargaan Nobel?",
|
||||||
|
"options": ["1905", "1915", "1921", "1930"],
|
||||||
|
"answer": "1921"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "true_false",
|
||||||
|
"question": "Albert Einstein menerima Penghargaan Nobel untuk teori relativitas.",
|
||||||
|
"answer": "False"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"context": "Samudra Pasifik adalah yang terbesar dan terdalam di antara divisi samudra di Bumi. Samudra ini membentang dari Samudra Arktik di utara hingga Samudra Selatan di selatan dan berbatasan dengan Asia dan Australia di barat serta Amerika di timur.",
|
||||||
|
"qa_pairs": [
|
||||||
|
{
|
||||||
|
"type": "fill_in_the_blank",
|
||||||
|
"question": "Samudra _______ adalah yang terbesar dan terdalam.",
|
||||||
|
"answer": "Pasifik"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "multiple_choice",
|
||||||
|
"question": "Benua mana yang berbatasan dengan Samudra Pasifik?",
|
||||||
|
"options": ["Afrika", "Eropa", "Asia dan Australia", "Antartika"],
|
||||||
|
"answer": "Asia dan Australia"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "true_false",
|
||||||
|
"question": "Samudra Pasifik lebih kecil daripada Samudra Atlantik.",
|
||||||
|
"answer": "False"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"context": "Proklamasi Kemerdekaan Indonesia dibacakan pada tanggal 17 Agustus 1945 oleh Soekarno dan Mohammad Hatta di Jakarta. Peristiwa ini menandai lahirnya negara Indonesia yang merdeka dari penjajahan.",
|
||||||
|
"qa_pairs": [
|
||||||
|
{
|
||||||
|
"type": "fill_in_the_blank",
|
||||||
|
"question": "Proklamasi Kemerdekaan Indonesia terjadi pada tanggal _______.",
|
||||||
|
"answer": "17 Agustus 1945"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "multiple_choice",
|
||||||
|
"question": "Siapa yang membacakan Proklamasi Kemerdekaan Indonesia?",
|
||||||
|
"options": [
|
||||||
|
"Soekarno",
|
||||||
|
"Mohammad Hatta",
|
||||||
|
"Sayuti Melik",
|
||||||
|
"Chairul Basri"
|
||||||
|
],
|
||||||
|
"answer": "Soekarno dan Mohammad Hatta"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "true_false",
|
||||||
|
"question": "Proklamasi Kemerdekaan Indonesia dibacakan di Surabaya.",
|
||||||
|
"answer": "False"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"context": "Hukum Newton adalah tiga hukum fisika yang menjadi dasar mekanika klasik. Hukum pertama menyatakan bahwa suatu benda akan tetap diam atau bergerak lurus beraturan kecuali ada gaya luar yang bekerja padanya. Hukum kedua menyatakan bahwa percepatan suatu benda berbanding lurus dengan gaya yang bekerja padanya dan berbanding terbalik dengan massanya. Hukum ketiga menyatakan bahwa setiap aksi memiliki reaksi yang sama besar tetapi berlawanan arah.",
|
||||||
|
"qa_pairs": [
|
||||||
|
{
|
||||||
|
"type": "fill_in_the_blank",
|
||||||
|
"question": "Hukum Newton terdiri dari _______ hukum.",
|
||||||
|
"answer": "tiga"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "multiple_choice",
|
||||||
|
"question": "Apa yang dikatakan oleh Hukum Ketiga Newton?",
|
||||||
|
"options": [
|
||||||
|
"Benda akan tetap diam kecuali ada gaya luar.",
|
||||||
|
"Percepatan berbanding lurus dengan gaya.",
|
||||||
|
"Setiap aksi memiliki reaksi yang sama besar tetapi berlawanan arah.",
|
||||||
|
"Energi tidak dapat diciptakan atau dimusnahkan."
|
||||||
|
],
|
||||||
|
"answer": "Setiap aksi memiliki reaksi yang sama besar tetapi berlawanan arah."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "true_false",
|
||||||
|
"question": "Hukum Kedua Newton menyatakan bahwa percepatan berbanding terbalik dengan gaya yang bekerja pada benda.",
|
||||||
|
"answer": "False"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"context": "Budi Utomo adalah organisasi pemuda yang didirikan pada 20 Mei 1908 oleh dr. Wahidin Sudirohusodo dan para mahasiswa STOVIA. Organisasi ini bertujuan untuk meningkatkan pendidikan dan kesejahteraan rakyat Indonesia serta menjadi tonggak awal kebangkitan nasional.",
|
||||||
|
"qa_pairs": [
|
||||||
|
{
|
||||||
|
"type": "fill_in_the_blank",
|
||||||
|
"question": "Budi Utomo didirikan pada tanggal _______.",
|
||||||
|
"answer": "20 Mei 1908"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "multiple_choice",
|
||||||
|
"question": "Siapa tokoh yang mendirikan Budi Utomo?",
|
||||||
|
"options": [
|
||||||
|
"Soekarno",
|
||||||
|
"Mohammad Hatta",
|
||||||
|
"dr. Wahidin Sudirohusodo",
|
||||||
|
"Ki Hajar Dewantara"
|
||||||
|
],
|
||||||
|
"answer": "dr. Wahidin Sudirohusodo"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "true_false",
|
||||||
|
"question": "Budi Utomo bertujuan meningkatkan pendidikan dan kesejahteraan rakyat Indonesia.",
|
||||||
|
"answer": "True"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"context": "Ki Hajar Dewantara adalah pelopor pendidikan di Indonesia dan pendiri Taman Siswa. Ia dikenal dengan semboyannya 'Ing Ngarsa Sung Tuladha, Ing Madya Mangun Karsa, Tut Wuri Handayani', yang menekankan peran guru dalam pendidikan.",
|
||||||
|
"qa_pairs": [
|
||||||
|
{
|
||||||
|
"type": "fill_in_the_blank",
|
||||||
|
"question": "Ki Hajar Dewantara mendirikan _______.",
|
||||||
|
"answer": "Taman Siswa"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "multiple_choice",
|
||||||
|
"question": "Apa makna dari semboyan 'Tut Wuri Handayani'?",
|
||||||
|
"options": [
|
||||||
|
"Guru memberi contoh di depan.",
|
||||||
|
"Guru mendukung dari belakang.",
|
||||||
|
"Guru membangun semangat di tengah-tengah murid.",
|
||||||
|
"Guru mengajarkan teori dan praktik."
|
||||||
|
],
|
||||||
|
"answer": "Guru mendukung dari belakang."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "true_false",
|
||||||
|
"question": "Ki Hajar Dewantara adalah pahlawan di bidang pendidikan.",
|
||||||
|
"answer": "True"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"context": "Teori evolusi dikembangkan oleh Charles Darwin dan dijelaskan dalam bukunya 'On the Origin of Species' yang diterbitkan pada tahun 1859. Teori ini menyatakan bahwa spesies berevolusi melalui seleksi alam, di mana individu dengan karakteristik yang lebih baik memiliki peluang lebih tinggi untuk bertahan hidup dan berkembang biak.",
|
||||||
|
"qa_pairs": [
|
||||||
|
{
|
||||||
|
"type": "fill_in_the_blank",
|
||||||
|
"question": "Teori evolusi dikembangkan oleh _______.",
|
||||||
|
"answer": "Charles Darwin"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "multiple_choice",
|
||||||
|
"question": "Dalam buku apa Charles Darwin menjelaskan teori evolusinya?",
|
||||||
|
"options": [
|
||||||
|
"Principia Mathematica",
|
||||||
|
"The Selfish Gene",
|
||||||
|
"On the Origin of Species",
|
||||||
|
"The Descent of Man"
|
||||||
|
],
|
||||||
|
"answer": "On the Origin of Species"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "true_false",
|
||||||
|
"question": "Teori evolusi menyatakan bahwa semua spesies tetap sama sepanjang waktu.",
|
||||||
|
"answer": "False"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"context": "BPUPKI (Badan Penyelidik Usaha-Usaha Persiapan Kemerdekaan Indonesia) dibentuk oleh pemerintah Jepang pada 29 April 1945 sebagai bagian dari janji Jepang untuk memberikan kemerdekaan kepada Indonesia. Pembentukan BPUPKI terjadi pada masa Perang Dunia II, ketika Jepang mulai mengalami kekalahan dari Sekutu dan ingin mendapatkan dukungan dari rakyat Indonesia.",
|
||||||
|
"qa_pairs": [
|
||||||
|
{
|
||||||
|
"type": "fill_in_the_blank",
|
||||||
|
"question": "Apa Kepanjangan dari BPUPKI?",
|
||||||
|
"answer": "Badan Penyelidik Usaha Usaha Persiapan Kemerdekaan Indonesia"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "multiple_choice",
|
||||||
|
"question": "BPUPKI dibentuk pada ",
|
||||||
|
"options": [
|
||||||
|
"20 April 1945",
|
||||||
|
"29 April 1945",
|
||||||
|
"10 April 1945",
|
||||||
|
"20 Mei 1945"
|
||||||
|
],
|
||||||
|
"answer": "20 Mei 1945"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -9,3 +9,102 @@ Lapisan terluar dari atmosfer Bumi disebut troposfer.,multiple_choice,Apa nama l
|
||||||
Ibu Kota Jepang adalah Tokyo.,true_false,Ibu Kota Jepang adalah Kyoto.,False,True|False
|
Ibu Kota Jepang adalah Tokyo.,true_false,Ibu Kota Jepang adalah Kyoto.,False,True|False
|
||||||
Fotosintesis adalah proses yang dilakukan tumbuhan untuk membuat makanan sendiri.,fill_in_the_blank,Apa nama proses yang dilakukan tumbuhan untuk membuat makanannya sendiri?,Fotosintesis,-
|
Fotosintesis adalah proses yang dilakukan tumbuhan untuk membuat makanan sendiri.,fill_in_the_blank,Apa nama proses yang dilakukan tumbuhan untuk membuat makanannya sendiri?,Fotosintesis,-
|
||||||
Dataran tertinggi di dunia adalah Dataran Tinggi Tibet.,fill_in_the_blank,Apa nama dataran tertinggi di dunia?,Dataran Tinggi Tibet,-
|
Dataran tertinggi di dunia adalah Dataran Tinggi Tibet.,fill_in_the_blank,Apa nama dataran tertinggi di dunia?,Dataran Tinggi Tibet,-
|
||||||
|
Indonesia memiliki lima sila dalam Pancasila.,true_false,Indonesia memiliki enam sila dalam Pancasila.,False,True|False
|
||||||
|
Segitiga memiliki tiga sisi.,fill_in_the_blank,Berapa jumlah sisi yang dimiliki oleh segitiga?,Tiga,-
|
||||||
|
Air membeku pada suhu 0 derajat Celsius.,multiple_choice,Pada suhu berapakah air membeku?,0 derajat Celsius,-10 derajat Celsius|0 derajat Celsius|10 derajat Celsius|100 derajat Celsius
|
||||||
|
Sumber energi utama bagi Bumi adalah Matahari.,fill_in_the_blank,Apa sumber energi utama bagi Bumi?,Matahari,-
|
||||||
|
Jantung manusia memiliki empat ruang.,multiple_choice,Berapa jumlah ruang pada jantung manusia?,4,2|3|4|5
|
||||||
|
Kota terbesar di Indonesia adalah Jakarta.,true_false,Kota terbesar di Indonesia adalah Surabaya.,False,True|False
|
||||||
|
Laut yang mengelilingi Indonesia disebut perairan Nusantara.,fill_in_the_blank,Apa nama laut yang mengelilingi Indonesia?,Perairan Nusantara,-
|
||||||
|
Nama planet terdekat dengan Matahari adalah Merkurius.,fill_in_the_blank,Apa nama planet yang paling dekat dengan Matahari?,Merkurius,-
|
||||||
|
Harimau adalah hewan karnivora.,true_false,Harimau adalah hewan herbivora.,False,True|False
|
||||||
|
Salah satu sumber protein nabati adalah kacang-kacangan.,multiple_choice,Makanan manakah yang termasuk sumber protein nabati?,Kacang-kacangan,Daging|Ikan|Kacang-kacangan|Telur
|
||||||
|
Gunung Merapi terletak di Pulau Jawa.,fill_in_the_blank,Di pulau manakah Gunung Merapi berada?,Jawa,-
|
||||||
|
Bulan adalah satelit alami Bumi.,true_false,Bulan adalah planet dalam tata surya.,False,True|False
|
||||||
|
Tumbuhan menyerap air dari tanah melalui akarnya.,fill_in_the_blank,Melalui bagian apakah tumbuhan menyerap air?,Akar,-
|
||||||
|
Peristiwa turunnya air dari awan disebut hujan.,fill_in_the_blank,Apa nama peristiwa turunnya air dari awan?,Hujan,-
|
||||||
|
Benua terbesar di dunia adalah Asia.,multiple_choice,Benua manakah yang terbesar di dunia?,Asia,Australia|Afrika|Asia|Eropa
|
||||||
|
Puncak Jaya adalah gunung tertinggi di Indonesia.,fill_in_the_blank,Apa nama gunung tertinggi di Indonesia?,Puncak Jaya,-
|
||||||
|
Hewan pemakan tumbuhan disebut herbivora.,true_false,Hewan pemakan tumbuhan disebut omnivora.,False,True|False
|
||||||
|
Planet yang dikenal sebagai planet merah adalah Mars.,fill_in_the_blank,Planet manakah yang disebut sebagai planet merah?,Mars,-
|
||||||
|
Gajah adalah mamalia darat terbesar di dunia.,fill_in_the_blank,Hewan apakah yang merupakan mamalia darat terbesar?,Gajah,-
|
||||||
|
Indonesia terdiri dari lebih dari 17.000 pulau.,true_false,Indonesia terdiri dari lebih dari 10.000 pulau.,True,True|False
|
||||||
|
Sungai terpanjang di dunia adalah Sungai Nil.,fill_in_the_blank,Sungai apakah yang terpanjang di dunia?,Sungai Nil,-
|
||||||
|
Kelelawar adalah satu-satunya mamalia yang bisa terbang.,true_false,Kelelawar adalah satu-satunya mamalia yang bisa terbang.,True,True|False
|
||||||
|
Nama mata uang resmi Indonesia adalah Rupiah.,fill_in_the_blank,Apa nama mata uang resmi Indonesia?,Rupiah,-
|
||||||
|
Benua Afrika memiliki Gurun Sahara yang merupakan gurun terbesar di dunia.,fill_in_the_blank,Di benua manakah Gurun Sahara berada?,Afrika,-
|
||||||
|
Hewan yang hanya makan daging disebut karnivora.,multiple_choice,Apa istilah untuk hewan yang hanya makan daging?,Karnivora,Herbivora|Omnivora|Karnivora|Detritivora
|
||||||
|
Badan yang mengelola keuangan negara Indonesia adalah Bank Indonesia.,fill_in_the_blank,Apa nama badan yang mengelola keuangan negara Indonesia?,Bank Indonesia,-
|
||||||
|
Burung hantu berburu mangsanya pada malam hari.,true_false,Burung hantu berburu mangsanya pada siang hari.,False,True|False
|
||||||
|
Tulang manusia dewasa berjumlah sekitar 206 buah.,multiple_choice,Berapa jumlah tulang pada manusia dewasa?,206,180|190|200|206
|
||||||
|
Hewan yang berkembang biak dengan bertelur disebut ovipar.,fill_in_the_blank,Apa istilah untuk hewan yang berkembang biak dengan bertelur?,Ovipar,-
|
||||||
|
Nama presiden pertama Indonesia adalah Soekarno.,fill_in_the_blank,Siapakah presiden pertama Indonesia?,Soekarno,-
|
||||||
|
Benua terkecil di dunia adalah Australia.,true_false,Benua terkecil di dunia adalah Antartika.,False,True|False
|
||||||
|
Kadal adalah hewan yang termasuk dalam kelompok reptil.,multiple_choice,Hewan manakah yang termasuk dalam kelompok reptil?,Kadal,Kucing|Burung|Kadal|Katak
|
||||||
|
Hewan yang dapat berpindah tempat disebut hewan bergerak.,fill_in_the_blank,Bagaimana cara hewan berpindah tempat?,Bergerak,-
|
||||||
|
Nama samudra terbesar di dunia adalah Samudra Pasifik.,fill_in_the_blank,Samudra apakah yang terbesar di dunia?,Samudra Pasifik,-
|
||||||
|
Tumbuhan hijau menghasilkan oksigen saat berfotosintesis.,true_false,Tumbuhan hijau menghasilkan karbon dioksida saat fotosintesis.,False,True|False
|
||||||
|
Ikan bernapas menggunakan insang.,fill_in_the_blank,Bagian tubuh apakah yang digunakan ikan untuk bernapas?,Insang,-
|
||||||
|
Bumi memiliki satu satelit alami bernama Bulan.,multiple_choice,Berapa jumlah satelit alami yang dimiliki Bumi?,1,0|1|2|3
|
||||||
|
Proses perubahan air menjadi uap air disebut evaporasi.,fill_in_the_blank,Apa nama proses perubahan air menjadi uap air?,Evaporasi,-
|
||||||
|
Bintang yang paling dekat dengan Bumi adalah Matahari.,fill_in_the_blank,Apa nama bintang yang paling dekat dengan Bumi?,Matahari,-
|
||||||
|
Salah satu jenis energi terbarukan adalah energi surya.,multiple_choice,Apa contoh energi terbarukan?,Energi surya,Energi batu bara|Energi minyak bumi|Energi surya|Energi nuklir
|
||||||
|
Danau terbesar di Indonesia adalah Danau Toba.,fill_in_the_blank,Apa nama danau terbesar di Indonesia?,Danau Toba,-
|
||||||
|
Perubahan wujud dari cair ke gas disebut menguap.,true_false,Perubahan wujud dari cair ke gas disebut mencair.,False,True|False
|
||||||
|
Tari Saman berasal dari Aceh.,fill_in_the_blank,Tari apakah yang berasal dari Aceh?,Tari Saman,-
|
||||||
|
Raja pertama di Kerajaan Majapahit adalah Raden Wijaya.,multiple_choice,Siapakah raja pertama dari Kerajaan Majapahit?,Raden Wijaya,Hayam Wuruk|Gajah Mada|Raden Wijaya|Ken Arok
|
||||||
|
Hewan pemakan segala disebut omnivora.,fill_in_the_blank,Apa istilah untuk hewan pemakan segala?,Omnivora,-
|
||||||
|
Udara mengandung gas yang paling banyak berupa nitrogen.,multiple_choice,Gas apakah yang paling banyak di udara?,Nitrogen,Oksigen|Karbon dioksida|Nitrogen|Hidrogen
|
||||||
|
Nama ibu kota negara Malaysia adalah Kuala Lumpur.,fill_in_the_blank,Apa ibu kota negara Malaysia?,Kuala Lumpur,-
|
||||||
|
Gunung tertinggi di Pulau Jawa adalah Gunung Semeru.,true_false,Gunung tertinggi di Pulau Jawa adalah Gunung Merapi.,False,True|False
|
||||||
|
Burung cendrawasih dikenal karena bulunya yang indah.,fill_in_the_blank,Burung apakah yang dikenal karena bulunya yang indah?,Cendrawasih,-
|
||||||
|
Komodo hanya ditemukan di Indonesia.,true_false,Komodo hanya ditemukan di Indonesia.,True,True|False
|
||||||
|
Fungsi utama akar pada tumbuhan adalah menyerap air dan mineral.,multiple_choice,Apa fungsi utama akar pada tumbuhan?,Menyerap air dan mineral,Menghasilkan oksigen|Menyerap air dan mineral|Menyediakan makanan|Membantu fotosintesis
|
||||||
|
Listrik dapat dihasilkan dari pembangkit tenaga air.,true_false,Listrik hanya dapat dihasilkan dari minyak bumi.,False,True|False
|
||||||
|
Pakaian adat dari Sumatra Barat disebut Baju Bundo Kanduang.,fill_in_the_blank,Apa nama pakaian adat dari Sumatra Barat?,Baju Bundo Kanduang,-
|
||||||
|
Alat pernapasan pada manusia adalah paru-paru.,fill_in_the_blank,Apa alat pernapasan manusia?,Paru-paru,-
|
||||||
|
Kota terbesar di dunia berdasarkan jumlah penduduk adalah Tokyo.,multiple_choice,Kota manakah yang terbesar di dunia berdasarkan jumlah penduduk?,Tokyo,Jakarta|New York|Tokyo|Beijing
|
||||||
|
Kuda dapat berlari dengan kecepatan tinggi.,true_false,Kuda tidak bisa berlari.,False,True|False
|
||||||
|
Planet terbesar dalam tata surya adalah Jupiter.,fill_in_the_blank,Planet apakah yang terbesar dalam tata surya?,Jupiter,-
|
||||||
|
Lambang negara Indonesia adalah Garuda Pancasila.,fill_in_the_blank,Apa lambang negara Indonesia?,Garuda Pancasila,-
|
||||||
|
Rafflesia arnoldii adalah bunga terbesar di dunia.,multiple_choice,Apa nama bunga terbesar di dunia?,Rafflesia arnoldii,Mawar|Anggrek|Rafflesia arnoldii|Teratai
|
||||||
|
Tumbuhan menghasilkan makanan melalui proses fotosintesis.,true_false,Tumbuhan menghasilkan makanan melalui proses fotosintesis.,True,True|False
|
||||||
|
Tumbuhan hijau membuat makanannya sendiri melalui proses fotosintesis.,fill_in_the_blank,Apa nama proses yang digunakan tumbuhan untuk membuat makanan sendiri?,Fotosintesis,-
|
||||||
|
Kucing adalah hewan mamalia yang memiliki kelenjar susu untuk menyusui anaknya.,true_false,Kucing adalah hewan ovipar.,False,True|False
|
||||||
|
Pohon jati menggugurkan daunnya saat musim kemarau untuk mengurangi penguapan.,multiple_choice,Mengapa pohon jati menggugurkan daunnya saat musim kemarau?,Mengurangi penguapan,Menyerap lebih banyak air|Mengurangi penguapan|Meningkatkan fotosintesis|Melindungi dari serangga
|
||||||
|
Hewan yang berkembang biak dengan cara bertelur disebut ovipar.,fill_in_the_blank,Apa istilah untuk hewan yang berkembang biak dengan bertelur?,Ovipar,-
|
||||||
|
Hewan yang mengalami metamorfosis sempurna memiliki empat tahap dalam siklus hidupnya.,true_false,Hewan yang mengalami metamorfosis sempurna hanya memiliki dua tahap dalam siklus hidupnya.,False,True|False
|
||||||
|
Hewan pemakan daging disebut karnivora.,fill_in_the_blank,Apa istilah untuk hewan pemakan daging?,Karnivora,-
|
||||||
|
Hewan yang makan tumbuhan dan daging disebut omnivora.,true_false,Ayam adalah contoh hewan omnivora.,True,True|False
|
||||||
|
Pohon kelapa memiliki akar serabut.,multiple_choice,Akar apakah yang dimiliki oleh pohon kelapa?,Akar serabut,Akar tunggang|Akar serabut|Akar gantung|Akar napas
|
||||||
|
Alat pernapasan ikan adalah insang.,fill_in_the_blank,Apa nama alat pernapasan ikan?,Insang,-
|
||||||
|
Jantung manusia terdiri dari empat ruang.,true_false,Jantung manusia terdiri dari tiga ruang.,False,True|False
|
||||||
|
Organ yang berfungsi untuk memompa darah ke seluruh tubuh adalah jantung.,fill_in_the_blank,Organ apakah yang berfungsi memompa darah?,Jantung,-
|
||||||
|
Manusia bernapas dengan menggunakan paru-paru.,multiple_choice,Alat pernapasan manusia adalah?,Paru-paru,Insang|Trakea|Paru-paru|Jantung
|
||||||
|
Burung memiliki paruh yang berbeda-beda sesuai dengan jenis makanannya.,true_false,Paruh burung selalu sama bentuknya.,False,True|False
|
||||||
|
Cicak dapat memutuskan ekornya untuk melindungi diri dari pemangsa.,fill_in_the_blank,Apa nama kemampuan cicak dalam memutuskan ekornya?,Autotomi,-
|
||||||
|
Tumbuhan menyerap air melalui akarnya.,true_false,Tumbuhan menyerap air melalui daunnya.,False,True|False
|
||||||
|
Padi berkembang biak dengan biji.,multiple_choice,Bagaimana padi berkembang biak?,Dengan biji,Dengan akar|Dengan biji|Dengan batang|Dengan daun
|
||||||
|
Cacing bernapas melalui permukaan kulitnya.,fill_in_the_blank,Bagaimana cacing bernapas?,Melalui kulit,-
|
||||||
|
Hewan yang aktif di malam hari disebut nokturnal.,true_false,Ayam adalah hewan nokturnal.,False,True|False
|
||||||
|
Gajah memiliki belalai yang berfungsi untuk mengambil makanan dan minum.,multiple_choice,Bagian tubuh apakah yang digunakan gajah untuk mengambil makanan?,Belalai,Kaki|Belalai|Ekor|Telinga
|
||||||
|
Serangga memiliki tiga pasang kaki.,fill_in_the_blank,Berapa jumlah pasang kaki serangga?,Tiga,-
|
||||||
|
Akar bakau memiliki akar napas yang membantu bertahan di lingkungan air.,true_false,Akar bakau adalah akar serabut.,False,True|False
|
||||||
|
Daun berfungsi sebagai tempat fotosintesis pada tumbuhan.,fill_in_the_blank,Bagian tumbuhan apakah yang berfungsi untuk fotosintesis?,Daun,-
|
||||||
|
Hewan berdarah panas dapat menjaga suhu tubuhnya tetap stabil.,multiple_choice,Apa ciri khas hewan berdarah panas?,Menjaga suhu tubuh tetap stabil,Menyesuaikan suhu tubuh dengan lingkungan|Menjaga suhu tubuh tetap stabil|Bisa berubah warna|Tidak bernapas
|
||||||
|
Kura-kura berkembang biak dengan bertelur.,true_false,Kura-kura berkembang biak dengan melahirkan.,False,True|False
|
||||||
|
Hewan yang memiliki tulang belakang disebut vertebrata.,fill_in_the_blank,Hewan apakah yang memiliki tulang belakang?,Vertebrata,-
|
||||||
|
Hewan yang tidak memiliki tulang belakang disebut invertebrata.,true_false,Laba-laba adalah hewan vertebrata.,False,True|False
|
||||||
|
Jaringan xilem pada tumbuhan berfungsi mengangkut air dan mineral dari akar ke daun.,multiple_choice,Apa fungsi jaringan xilem pada tumbuhan?,Mengangkut air dan mineral,Mengangkut oksigen|Menyerap cahaya|Mengangkut air dan mineral|Menyimpan cadangan makanan
|
||||||
|
Lidah manusia memiliki indera perasa untuk merasakan berbagai rasa makanan.,fill_in_the_blank,Organ tubuh apakah yang berfungsi sebagai indera perasa?,Lidah,-
|
||||||
|
Hewan yang hidup di air dan bernapas dengan insang disebut ikan.,true_false,Katak adalah hewan yang bernapas dengan insang sepanjang hidupnya.,False,True|False
|
||||||
|
Rusa jantan memiliki tanduk untuk mempertahankan diri dan menarik perhatian betina.,multiple_choice,Mengapa rusa jantan memiliki tanduk?,Untuk mempertahankan diri dan menarik perhatian betina,Untuk mendengar lebih baik|Untuk bernapas|Untuk mempertahankan diri dan menarik perhatian betina|Untuk mengumpulkan makanan
|
||||||
|
Daun memiliki bagian yang disebut stomata untuk pertukaran gas.,fill_in_the_blank,Bagian apakah pada daun yang berfungsi untuk pertukaran gas?,Stomata,-
|
||||||
|
Burung merpati memiliki kemampuan navigasi yang baik untuk kembali ke sarangnya.,true_false,Burung merpati sulit menemukan jalan pulang ke sarangnya.,False,True|False
|
||||||
|
Tumbuhan berakar tunggang biasanya memiliki batang yang kuat.,multiple_choice,Tumbuhan dengan akar tunggang memiliki batang seperti apa?,Kuat,Lemah|Kuat|Lentur|Berongga
|
||||||
|
Proses pencernaan makanan dimulai dari mulut.,fill_in_the_blank,Di mana proses pencernaan makanan dimulai?,Mulut,-
|
||||||
|
Jamur bukan termasuk tumbuhan karena tidak memiliki klorofil.,true_false,Jamur termasuk tumbuhan karena memiliki klorofil.,False,True|False
|
||||||
|
Kupu-kupu mengalami metamorfosis sempurna yang terdiri dari empat tahap.,multiple_choice,Berapa tahap dalam metamorfosis sempurna kupu-kupu?,4,2|3|4|5
|
||||||
|
Proses pembuahan pada tumbuhan terjadi di dalam bunga.,fill_in_the_blank,Di bagian manakah proses pembuahan pada tumbuhan terjadi?,Bunga,-
|
||||||
|
Tumbuhan yang hidup di air disebut hidrofit.,true_false,Tumbuhan yang hidup di air disebut xerofit.,False,True|False
|
||||||
|
Burung hantu memiliki mata yang besar untuk melihat dengan jelas di malam hari.,multiple_choice,Mengapa burung hantu memiliki mata besar?,Untuk melihat jelas di malam hari,Untuk menarik perhatian betina|Untuk berenang|Untuk melihat jelas di malam hari|Untuk mencari makan di siang hari
|
||||||
|
|
|
|
@ -1,5 +1,51 @@
|
||||||
numpy
|
absl-py==2.1.0
|
||||||
pandas
|
astunparse==1.6.3
|
||||||
matplotlib
|
certifi==2025.1.31
|
||||||
scikit-learn
|
charset-normalizer==3.4.1
|
||||||
tensorflow
|
contourpy==1.3.1
|
||||||
|
cycler==0.12.1
|
||||||
|
flatbuffers==25.2.10
|
||||||
|
fonttools==4.56.0
|
||||||
|
gast==0.6.0
|
||||||
|
google-pasta==0.2.0
|
||||||
|
grpcio==1.71.0
|
||||||
|
h5py==3.13.0
|
||||||
|
idna==3.10
|
||||||
|
joblib==1.4.2
|
||||||
|
keras==3.9.0
|
||||||
|
kiwisolver==1.4.8
|
||||||
|
libclang==18.1.1
|
||||||
|
Markdown==3.7
|
||||||
|
markdown-it-py==3.0.0
|
||||||
|
MarkupSafe==3.0.2
|
||||||
|
matplotlib==3.10.1
|
||||||
|
mdurl==0.1.2
|
||||||
|
ml-dtypes==0.4.1
|
||||||
|
namex==0.0.8
|
||||||
|
numpy==2.0.2
|
||||||
|
opt_einsum==3.4.0
|
||||||
|
optree==0.14.1
|
||||||
|
packaging==24.2
|
||||||
|
pandas==2.2.3
|
||||||
|
pillow==11.1.0
|
||||||
|
protobuf==5.29.3
|
||||||
|
Pygments==2.19.1
|
||||||
|
pyparsing==3.2.1
|
||||||
|
python-dateutil==2.9.0.post0
|
||||||
|
pytz==2025.1
|
||||||
|
requests==2.32.3
|
||||||
|
rich==13.9.4
|
||||||
|
scikit-learn==1.6.1
|
||||||
|
scipy==1.15.2
|
||||||
|
six==1.17.0
|
||||||
|
tensorboard==2.18.0
|
||||||
|
tensorboard-data-server==0.7.2
|
||||||
|
tensorflow==2.18.0
|
||||||
|
tensorflow-io-gcs-filesystem==0.37.1
|
||||||
|
termcolor==2.5.0
|
||||||
|
threadpoolctl==3.5.0
|
||||||
|
typing_extensions==4.12.2
|
||||||
|
tzdata==2025.1
|
||||||
|
urllib3==2.3.0
|
||||||
|
Werkzeug==3.1.3
|
||||||
|
wrapt==1.17.2
|
||||||
|
|
|
@ -0,0 +1,77 @@
|
||||||
|
from tensorflow.keras.models import load_model
|
||||||
|
import numpy as np
|
||||||
|
import pickle
|
||||||
|
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
||||||
|
|
||||||
|
# Load Trained Model
|
||||||
|
model = load_model("lstm_multi_output_model.keras")
|
||||||
|
|
||||||
|
# Load Tokenizer
|
||||||
|
with open("tokenizer.pkl", "rb") as handle:
|
||||||
|
tokenizer = pickle.load(handle)
|
||||||
|
|
||||||
|
# Define max sequence length (should match training phase)
|
||||||
|
MAX_LENGTH = 100
|
||||||
|
|
||||||
|
# Mapping for question type labels
|
||||||
|
question_type_dict = {0: "Fill in the Blank", 1: "True/False", 2: "Multiple Choice"}
|
||||||
|
|
||||||
|
|
||||||
|
def predict_question_answer_type(context_text):
|
||||||
|
"""
|
||||||
|
Given a context (paragraph), predict a question, answer, and question type.
|
||||||
|
"""
|
||||||
|
# === Tokenize and Pad the Context === #
|
||||||
|
context_seq = tokenizer.texts_to_sequences([context_text])
|
||||||
|
context_padded = pad_sequences(
|
||||||
|
context_seq, maxlen=MAX_LENGTH, padding="post", truncating="post"
|
||||||
|
)
|
||||||
|
|
||||||
|
# === Create a Dummy Input for the Question Decoder === #
|
||||||
|
# Since our model is seq2seq, we initialize an empty decoder input
|
||||||
|
decoder_input_seq = np.zeros((1, MAX_LENGTH))
|
||||||
|
|
||||||
|
# === Predict Outputs === #
|
||||||
|
predicted_question_seq, predicted_answer_seq, predicted_type = model.predict(
|
||||||
|
[context_padded, decoder_input_seq]
|
||||||
|
)
|
||||||
|
|
||||||
|
# === Convert Predicted Sequences to Text === #
|
||||||
|
index_to_word = {v: k for k, v in tokenizer.word_index.items()}
|
||||||
|
|
||||||
|
# Convert predicted question
|
||||||
|
predicted_question = " ".join(
|
||||||
|
[
|
||||||
|
index_to_word[idx]
|
||||||
|
for idx in np.argmax(predicted_question_seq, axis=2)[0]
|
||||||
|
if idx in index_to_word
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert predicted answer
|
||||||
|
predicted_answer = " ".join(
|
||||||
|
[
|
||||||
|
index_to_word[idx]
|
||||||
|
for idx in np.argmax(predicted_answer_seq, axis=2)[0]
|
||||||
|
if idx in index_to_word
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert predicted question type (numerical label → text)
|
||||||
|
predicted_question_type = question_type_dict[np.argmax(predicted_type)]
|
||||||
|
|
||||||
|
return predicted_question, predicted_answer, predicted_question_type
|
||||||
|
|
||||||
|
|
||||||
|
# Sample Test Context
|
||||||
|
context_example = "Ki Hajar Dewantara adalah pelopor pendidikan di Indonesia dan pendiri Taman Siswa. Ia dikenal dengan semboyannya 'Ing Ngarsa Sung Tuladha, Ing Madya Mangun Karsa, Tut Wuri Handayani', yang menekankan peran guru dalam pendidikan."
|
||||||
|
|
||||||
|
# Run the prediction
|
||||||
|
predicted_question, predicted_answer, predicted_question_type = (
|
||||||
|
predict_question_answer_type(context_example)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Print the Results
|
||||||
|
print(f"🔹 Predicted Question: {predicted_question}")
|
||||||
|
print(f"🔹 Predicted Answer: {predicted_answer}")
|
||||||
|
print(f"🔹 Predicted Question Type: {predicted_question_type}")
|
BIN
tokenizer.pkl
BIN
tokenizer.pkl
Binary file not shown.
|
@ -0,0 +1,603 @@
|
||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 47,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# import library\n",
|
||||||
|
"\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import matplotlib.pyplot as plt\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import json\n",
|
||||||
|
"from tensorflow.keras.preprocessing.text import Tokenizer\n",
|
||||||
|
"from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
|
||||||
|
"\n",
|
||||||
|
"import re\n",
|
||||||
|
"import string\n",
|
||||||
|
"import nltk\n",
|
||||||
|
"from nltk.corpus import stopwords\n",
|
||||||
|
"from nltk.tokenize import word_tokenize\n",
|
||||||
|
"from nltk.stem import WordNetLemmatizer\n",
|
||||||
|
"import pickle\n",
|
||||||
|
"\n",
|
||||||
|
"from tensorflow.keras.models import Model\n",
|
||||||
|
"from tensorflow.keras.layers import Input, Embedding, LSTM, Dense, Concatenate\n",
|
||||||
|
"from sklearn.metrics import classification_report, precision_score, recall_score, accuracy_score\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 48,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"[nltk_data] Downloading package stopwords to /home/akeon/nltk_data...\n",
|
||||||
|
"[nltk_data] Package stopwords is already up-to-date!\n",
|
||||||
|
"[nltk_data] Downloading package punkt to /home/akeon/nltk_data...\n",
|
||||||
|
"[nltk_data] Package punkt is already up-to-date!\n",
|
||||||
|
"[nltk_data] Downloading package punkt_tab to /home/akeon/nltk_data...\n",
|
||||||
|
"[nltk_data] Package punkt_tab is already up-to-date!\n",
|
||||||
|
"[nltk_data] Downloading package wordnet to /home/akeon/nltk_data...\n",
|
||||||
|
"[nltk_data] Package wordnet is already up-to-date!\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"True"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 48,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# download assets\n",
|
||||||
|
"nltk.download(\"stopwords\")\n",
|
||||||
|
"nltk.download(\"punkt\")\n",
|
||||||
|
"nltk.download(\"punkt_tab\")\n",
|
||||||
|
"nltk.download(\"wordnet\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 49,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<div>\n",
|
||||||
|
"<style scoped>\n",
|
||||||
|
" .dataframe tbody tr th:only-of-type {\n",
|
||||||
|
" vertical-align: middle;\n",
|
||||||
|
" }\n",
|
||||||
|
"\n",
|
||||||
|
" .dataframe tbody tr th {\n",
|
||||||
|
" vertical-align: top;\n",
|
||||||
|
" }\n",
|
||||||
|
"\n",
|
||||||
|
" .dataframe thead th {\n",
|
||||||
|
" text-align: right;\n",
|
||||||
|
" }\n",
|
||||||
|
"</style>\n",
|
||||||
|
"<table border=\"1\" class=\"dataframe\">\n",
|
||||||
|
" <thead>\n",
|
||||||
|
" <tr style=\"text-align: right;\">\n",
|
||||||
|
" <th></th>\n",
|
||||||
|
" <th>context</th>\n",
|
||||||
|
" <th>qa_pairs</th>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" </thead>\n",
|
||||||
|
" <tbody>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>0</th>\n",
|
||||||
|
" <td>Albert Einstein adalah fisikawan teoretis kela...</td>\n",
|
||||||
|
" <td>[{'type': 'fill_in_the_blank', 'question': '__...</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>1</th>\n",
|
||||||
|
" <td>Samudra Pasifik adalah yang terbesar dan terda...</td>\n",
|
||||||
|
" <td>[{'type': 'fill_in_the_blank', 'question': 'Sa...</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>2</th>\n",
|
||||||
|
" <td>Proklamasi Kemerdekaan Indonesia dibacakan pad...</td>\n",
|
||||||
|
" <td>[{'type': 'fill_in_the_blank', 'question': 'Pr...</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>3</th>\n",
|
||||||
|
" <td>Hukum Newton adalah tiga hukum fisika yang men...</td>\n",
|
||||||
|
" <td>[{'type': 'fill_in_the_blank', 'question': 'Hu...</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>4</th>\n",
|
||||||
|
" <td>Budi Utomo adalah organisasi pemuda yang didir...</td>\n",
|
||||||
|
" <td>[{'type': 'fill_in_the_blank', 'question': 'Bu...</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" </tbody>\n",
|
||||||
|
"</table>\n",
|
||||||
|
"</div>"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
" context \\\n",
|
||||||
|
"0 Albert Einstein adalah fisikawan teoretis kela... \n",
|
||||||
|
"1 Samudra Pasifik adalah yang terbesar dan terda... \n",
|
||||||
|
"2 Proklamasi Kemerdekaan Indonesia dibacakan pad... \n",
|
||||||
|
"3 Hukum Newton adalah tiga hukum fisika yang men... \n",
|
||||||
|
"4 Budi Utomo adalah organisasi pemuda yang didir... \n",
|
||||||
|
"\n",
|
||||||
|
" qa_pairs \n",
|
||||||
|
"0 [{'type': 'fill_in_the_blank', 'question': '__... \n",
|
||||||
|
"1 [{'type': 'fill_in_the_blank', 'question': 'Sa... \n",
|
||||||
|
"2 [{'type': 'fill_in_the_blank', 'question': 'Pr... \n",
|
||||||
|
"3 [{'type': 'fill_in_the_blank', 'question': 'Hu... \n",
|
||||||
|
"4 [{'type': 'fill_in_the_blank', 'question': 'Bu... "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 49,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# load dataset\n",
|
||||||
|
"df = pd.read_json(\"independent_dataset.json\")\n",
|
||||||
|
"df.head()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 50,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Text Preprocessing\n",
|
||||||
|
"stop_words = set(stopwords.words(\"indonesian\")) \n",
|
||||||
|
"lemmatizer = WordNetLemmatizer()\n",
|
||||||
|
"\n",
|
||||||
|
"normalization_dict = {\n",
|
||||||
|
" \"yg\": \"yang\",\n",
|
||||||
|
" \"gokil\": \"kocak\",\n",
|
||||||
|
" \"kalo\": \"kalau\",\n",
|
||||||
|
" \"gue\": \"saya\",\n",
|
||||||
|
" \"elo\": \"kamu\",\n",
|
||||||
|
" \"nih\": \"ini\",\n",
|
||||||
|
" \"trs\": \"terus\",\n",
|
||||||
|
" \"tdk\": \"tidak\",\n",
|
||||||
|
" \"gmna\": \"bagaimana\",\n",
|
||||||
|
" \"tp\": \"tapi\",\n",
|
||||||
|
" \"jd\": \"jadi\",\n",
|
||||||
|
" \"aja\": \"saja\",\n",
|
||||||
|
" \"krn\": \"karena\",\n",
|
||||||
|
" \"blm\": \"belum\",\n",
|
||||||
|
" \"dgn\": \"dengan\",\n",
|
||||||
|
" \"skrg\": \"sekarang\",\n",
|
||||||
|
" \"msh\": \"masih\",\n",
|
||||||
|
" \"lg\": \"lagi\",\n",
|
||||||
|
" \"sy\": \"saya\",\n",
|
||||||
|
" \"sm\": \"sama\",\n",
|
||||||
|
" \"bgt\": \"banget\",\n",
|
||||||
|
" \"dr\": \"dari\",\n",
|
||||||
|
" \"kpn\": \"kapan\",\n",
|
||||||
|
" \"hrs\": \"harus\",\n",
|
||||||
|
" \"cm\": \"cuma\",\n",
|
||||||
|
" \"sbnrnya\": \"sebenarnya\",\n",
|
||||||
|
" \"tdr\": \"tidur\",\n",
|
||||||
|
" \"tdk\": \"tidak\",\n",
|
||||||
|
" \"kl\": \"kalau\",\n",
|
||||||
|
" \"org\": \"orang\",\n",
|
||||||
|
" \"pke\": \"pakai\",\n",
|
||||||
|
" \"prnh\": \"pernah\",\n",
|
||||||
|
" \"brgkt\": \"berangkat\",\n",
|
||||||
|
" \"pdhl\": \"padahal\",\n",
|
||||||
|
" \"btw\": \"ngomong-ngomong\",\n",
|
||||||
|
" \"dmn\": \"di mana\",\n",
|
||||||
|
" \"bsk\": \"besok\",\n",
|
||||||
|
" \"td\": \"tadi\",\n",
|
||||||
|
" \"dlm\": \"dalam\",\n",
|
||||||
|
" \"utk\": \"untuk\",\n",
|
||||||
|
" \"spt\": \"seperti\",\n",
|
||||||
|
" \"gpp\": \"tidak apa-apa\",\n",
|
||||||
|
" \"bs\": \"bisa\",\n",
|
||||||
|
" \"jg\": \"juga\",\n",
|
||||||
|
" \"tp\": \"tapi\",\n",
|
||||||
|
" \"dg\": \"dengan\",\n",
|
||||||
|
" \"klw\": \"kalau\",\n",
|
||||||
|
" \"wkwk\": \"haha\",\n",
|
||||||
|
" \"cpt\": \"cepat\",\n",
|
||||||
|
" \"knp\": \"kenapa\",\n",
|
||||||
|
" \"jgk\": \"juga\",\n",
|
||||||
|
" \"plg\": \"pulang\",\n",
|
||||||
|
" \"brp\": \"berapa\",\n",
|
||||||
|
" \"bkn\": \"bukan\",\n",
|
||||||
|
" \"mnt\": \"minta\",\n",
|
||||||
|
" \"udh\": \"sudah\",\n",
|
||||||
|
" \"sdh\": \"sudah\",\n",
|
||||||
|
" \"brkt\": \"berangkat\",\n",
|
||||||
|
" \"btw\": \"by the way\",\n",
|
||||||
|
" \"tdk\": \"tidak\",\n",
|
||||||
|
" \"sprt\": \"seperti\",\n",
|
||||||
|
" \"jgn\": \"jangan\",\n",
|
||||||
|
" \"mlm\": \"malam\",\n",
|
||||||
|
" \"sblm\": \"sebelum\",\n",
|
||||||
|
" \"stlh\": \"setelah\",\n",
|
||||||
|
" \"tdr\": \"tidur\",\n",
|
||||||
|
" \"mlh\": \"malah\",\n",
|
||||||
|
" \"tmn\": \"teman\",\n",
|
||||||
|
"}\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"def text_preprocessing(text):\n",
|
||||||
|
" #doing lower case \n",
|
||||||
|
" text = text.lower()\n",
|
||||||
|
" \n",
|
||||||
|
" # remove symbol and read mark\n",
|
||||||
|
" text = text.translate(str.maketrans(\"\", \"\", string.punctuation))\n",
|
||||||
|
" \n",
|
||||||
|
" # remove blank space\n",
|
||||||
|
" text = re.sub(r\"\\s+\", \" \", text).strip()\n",
|
||||||
|
" \n",
|
||||||
|
" # word tokenize \n",
|
||||||
|
" tokens = word_tokenize(text)\n",
|
||||||
|
" \n",
|
||||||
|
" # normalassi kata\n",
|
||||||
|
" tokens = [normalization_dict[word] if word in normalization_dict else word for word in tokens] \n",
|
||||||
|
" \n",
|
||||||
|
" \n",
|
||||||
|
" # lemmatization\n",
|
||||||
|
" tokens = [lemmatizer.lemmatize(word) for word in tokens] \n",
|
||||||
|
" \n",
|
||||||
|
" # stopword removal\n",
|
||||||
|
" tokens = [word for word in tokens if word not in stop_words] \n",
|
||||||
|
" \n",
|
||||||
|
" return tokens\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 51,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"✅ Vocabulary Size: 182\n",
|
||||||
|
"✅ Sample Tokenized Context: [ 9 10 91 38 92 93 39 5 19 94 95 11 96 97 40 98 99 100\n",
|
||||||
|
" 101 20 21 22 11 41 102 11 38 0 0 0 0 0 0 0 0 0\n",
|
||||||
|
" 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
|
||||||
|
" 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
|
||||||
|
" 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
|
||||||
|
" 0 0 0 0 0 0 0 0 0 0]\n",
|
||||||
|
"✅ Sample Tokenized Question: [39 5 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
|
||||||
|
" 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
|
||||||
|
" 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
|
||||||
|
" 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
|
||||||
|
" 0 0 0 0]\n",
|
||||||
|
"✅ Sample Tokenized Answer: [ 9 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
|
||||||
|
" 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
|
||||||
|
" 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
|
||||||
|
" 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
|
||||||
|
" 0 0 0 0]\n",
|
||||||
|
"✅ Sample Question Type Label: 0\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"with open(\"independent_dataset.json\", \"r\", encoding=\"utf-8\") as file:\n",
|
||||||
|
" dataset = json.load(file)\n",
|
||||||
|
"\n",
|
||||||
|
"for entry in dataset:\n",
|
||||||
|
" entry[\"context\"] = text_preprocessing(entry[\"context\"])\n",
|
||||||
|
" for qa in entry[\"qa_pairs\"]:\n",
|
||||||
|
" qa[\"question\"] = text_preprocessing(qa[\"question\"])\n",
|
||||||
|
" qa[\"answer\"] = text_preprocessing(qa[\"answer\"])\n",
|
||||||
|
"\n",
|
||||||
|
"# === Extract Contexts, Questions, Answers, and Question Types === #\n",
|
||||||
|
"contexts = [entry[\"context\"] for entry in dataset]\n",
|
||||||
|
"questions = [qa[\"question\"] for entry in dataset for qa in entry[\"qa_pairs\"]]\n",
|
||||||
|
"answers = [qa[\"answer\"] for entry in dataset for qa in entry[\"qa_pairs\"]]\n",
|
||||||
|
"question_types = [qa[\"type\"] for entry in dataset for qa in entry[\"qa_pairs\"]] # Extract Question Types\n",
|
||||||
|
"\n",
|
||||||
|
"# === Initialize Tokenizer === #\n",
|
||||||
|
"tokenizer = Tokenizer(oov_token=\"<OOV>\")\n",
|
||||||
|
"tokenizer.fit_on_texts(contexts + questions + answers)\n",
|
||||||
|
"\n",
|
||||||
|
"# === Convert Text to Sequences === #\n",
|
||||||
|
"context_sequences = tokenizer.texts_to_sequences(contexts)\n",
|
||||||
|
"question_sequences = tokenizer.texts_to_sequences(questions)\n",
|
||||||
|
"answer_sequences = tokenizer.texts_to_sequences(answers)\n",
|
||||||
|
"\n",
|
||||||
|
"# === Define Max Length for Padding === #\n",
|
||||||
|
"MAX_LENGTH = 100 # Adjust based on dataset analysis\n",
|
||||||
|
"context_padded = pad_sequences(context_sequences, maxlen=MAX_LENGTH, padding=\"post\", truncating=\"post\")\n",
|
||||||
|
"question_padded = pad_sequences(question_sequences, maxlen=MAX_LENGTH, padding=\"post\", truncating=\"post\")\n",
|
||||||
|
"answer_padded = pad_sequences(answer_sequences, maxlen=MAX_LENGTH, padding=\"post\", truncating=\"post\")\n",
|
||||||
|
"\n",
|
||||||
|
"# === Encode Question Types (Convert Categorical Labels to Numeric) === #\n",
|
||||||
|
"question_type_dict = {\"fill_in_the_blank\": 0, \"true_false\": 1, \"multiple_choice\": 2}\n",
|
||||||
|
"question_type_labels = np.array([question_type_dict[q_type] for q_type in question_types])\n",
|
||||||
|
"\n",
|
||||||
|
"# === Save Processed Data as .npy Files === #\n",
|
||||||
|
"np.save(\"context_padded.npy\", context_padded)\n",
|
||||||
|
"np.save(\"question_padded.npy\", question_padded)\n",
|
||||||
|
"np.save(\"answer_padded.npy\", answer_padded)\n",
|
||||||
|
"np.save(\"question_type_labels.npy\", question_type_labels)\n",
|
||||||
|
"\n",
|
||||||
|
"# Save Tokenizer for Future Use\n",
|
||||||
|
"with open(\"tokenizer.pkl\", \"wb\") as handle:\n",
|
||||||
|
" pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)\n",
|
||||||
|
"\n",
|
||||||
|
"# === Check Results === #\n",
|
||||||
|
"print(f\"✅ Vocabulary Size: {len(tokenizer.word_index) + 1}\")\n",
|
||||||
|
"print(f\"✅ Sample Tokenized Context: {context_padded[0]}\")\n",
|
||||||
|
"print(f\"✅ Sample Tokenized Question: {question_padded[0]}\")\n",
|
||||||
|
"print(f\"✅ Sample Tokenized Answer: {answer_padded[0]}\")\n",
|
||||||
|
"print(f\"✅ Sample Question Type Label: {question_type_labels[0]}\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 52,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Epoch 1/10\n",
|
||||||
|
"\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 3s/step - answer_output_accuracy: 0.0000e+00 - answer_output_loss: 5.2070 - loss: 11.5152 - question_output_accuracy: 0.0000e+00 - question_output_loss: 5.2081 - question_type_output_accuracy: 0.3333 - question_type_output_loss: 1.1002 - val_answer_output_accuracy: 0.1250 - val_answer_output_loss: 5.1854 - val_loss: 11.4804 - val_question_output_accuracy: 0.0000e+00 - val_question_output_loss: 5.2043 - val_question_type_output_accuracy: 1.0000 - val_question_type_output_loss: 1.0907\n",
|
||||||
|
"Epoch 2/10\n",
|
||||||
|
"\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 184ms/step - answer_output_accuracy: 0.2100 - answer_output_loss: 5.1680 - loss: 11.4156 - question_output_accuracy: 0.0167 - question_output_loss: 5.1820 - question_type_output_accuracy: 1.0000 - question_type_output_loss: 1.0656 - val_answer_output_accuracy: 0.2450 - val_answer_output_loss: 5.1625 - val_loss: 11.4545 - val_question_output_accuracy: 0.0000e+00 - val_question_output_loss: 5.2056 - val_question_type_output_accuracy: 1.0000 - val_question_type_output_loss: 1.0864\n",
|
||||||
|
"Epoch 3/10\n",
|
||||||
|
"\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 188ms/step - answer_output_accuracy: 0.2717 - answer_output_loss: 5.1203 - loss: 11.3080 - question_output_accuracy: 0.0250 - question_output_loss: 5.1552 - question_type_output_accuracy: 1.0000 - question_type_output_loss: 1.0326 - val_answer_output_accuracy: 0.3350 - val_answer_output_loss: 5.1270 - val_loss: 11.4122 - val_question_output_accuracy: 0.0000e+00 - val_question_output_loss: 5.2071 - val_question_type_output_accuracy: 1.0000 - val_question_type_output_loss: 1.0781\n",
|
||||||
|
"Epoch 4/10\n",
|
||||||
|
"\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 174ms/step - answer_output_accuracy: 0.3417 - answer_output_loss: 5.0458 - loss: 11.1663 - question_output_accuracy: 0.0333 - question_output_loss: 5.1257 - question_type_output_accuracy: 1.0000 - question_type_output_loss: 0.9948 - val_answer_output_accuracy: 0.9700 - val_answer_output_loss: 5.0661 - val_loss: 11.3417 - val_question_output_accuracy: 0.0000e+00 - val_question_output_loss: 5.2090 - val_question_type_output_accuracy: 1.0000 - val_question_type_output_loss: 1.0665\n",
|
||||||
|
"Epoch 5/10\n",
|
||||||
|
"\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 166ms/step - answer_output_accuracy: 0.9883 - answer_output_loss: 4.9145 - loss: 10.9538 - question_output_accuracy: 0.0333 - question_output_loss: 5.0917 - question_type_output_accuracy: 1.0000 - question_type_output_loss: 0.9476 - val_answer_output_accuracy: 0.9700 - val_answer_output_loss: 4.9497 - val_loss: 11.2111 - val_question_output_accuracy: 0.0000e+00 - val_question_output_loss: 5.2115 - val_question_type_output_accuracy: 1.0000 - val_question_type_output_loss: 1.0498\n",
|
||||||
|
"Epoch 6/10\n",
|
||||||
|
"\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 176ms/step - answer_output_accuracy: 0.9883 - answer_output_loss: 4.6563 - loss: 10.5922 - question_output_accuracy: 0.0333 - question_output_loss: 5.0504 - question_type_output_accuracy: 1.0000 - question_type_output_loss: 0.8855 - val_answer_output_accuracy: 0.9700 - val_answer_output_loss: 4.6939 - val_loss: 10.9339 - val_question_output_accuracy: 0.0000e+00 - val_question_output_loss: 5.2154 - val_question_type_output_accuracy: 1.0000 - val_question_type_output_loss: 1.0247\n",
|
||||||
|
"Epoch 7/10\n",
|
||||||
|
"\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 189ms/step - answer_output_accuracy: 0.9867 - answer_output_loss: 4.0798 - loss: 9.8799 - question_output_accuracy: 0.0333 - question_output_loss: 4.9974 - question_type_output_accuracy: 1.0000 - question_type_output_loss: 0.8027 - val_answer_output_accuracy: 0.9700 - val_answer_output_loss: 4.0274 - val_loss: 10.2376 - val_question_output_accuracy: 0.0000e+00 - val_question_output_loss: 5.2215 - val_question_type_output_accuracy: 1.0000 - val_question_type_output_loss: 0.9888\n",
|
||||||
|
"Epoch 8/10\n",
|
||||||
|
"\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 172ms/step - answer_output_accuracy: 0.9867 - answer_output_loss: 2.9405 - loss: 8.5655 - question_output_accuracy: 0.0333 - question_output_loss: 4.9250 - question_type_output_accuracy: 1.0000 - question_type_output_loss: 0.7000 - val_answer_output_accuracy: 0.9700 - val_answer_output_loss: 2.6667 - val_loss: 8.8361 - val_question_output_accuracy: 0.0000e+00 - val_question_output_loss: 5.2325 - val_question_type_output_accuracy: 1.0000 - val_question_type_output_loss: 0.9369\n",
|
||||||
|
"Epoch 9/10\n",
|
||||||
|
"\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 186ms/step - answer_output_accuracy: 0.9867 - answer_output_loss: 1.7502 - loss: 7.1458 - question_output_accuracy: 0.0317 - question_output_loss: 4.8152 - question_type_output_accuracy: 1.0000 - question_type_output_loss: 0.5805 - val_answer_output_accuracy: 0.9700 - val_answer_output_loss: 1.5605 - val_loss: 7.6711 - val_question_output_accuracy: 0.0000e+00 - val_question_output_loss: 5.2582 - val_question_type_output_accuracy: 0.5000 - val_question_type_output_loss: 0.8525\n",
|
||||||
|
"Epoch 10/10\n",
|
||||||
|
"\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 196ms/step - answer_output_accuracy: 0.9867 - answer_output_loss: 0.9620 - loss: 6.0314 - question_output_accuracy: 0.0250 - question_output_loss: 4.6111 - question_type_output_accuracy: 1.0000 - question_type_output_loss: 0.4584 - val_answer_output_accuracy: 0.9700 - val_answer_output_loss: 0.8570 - val_loss: 6.9187 - val_question_output_accuracy: 0.0000e+00 - val_question_output_loss: 5.3395 - val_question_type_output_accuracy: 0.5000 - val_question_type_output_loss: 0.7221\n",
|
||||||
|
"✅ Model LSTM Multi-Output berhasil dilatih dan disimpan!\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Implementation of lstm with semantic analyz\n",
|
||||||
|
"# === Load Tokenizer === #\n",
|
||||||
|
"with open(\"tokenizer.pkl\", \"rb\") as handle:\n",
|
||||||
|
" tokenizer = pickle.load(handle)\n",
|
||||||
|
"\n",
|
||||||
|
"# === Load Data yang Sudah Diproses === #\n",
|
||||||
|
"MAX_LENGTH = 100\n",
|
||||||
|
"VOCAB_SIZE = len(tokenizer.word_index) + 1\n",
|
||||||
|
"\n",
|
||||||
|
"context_padded = np.load(\"context_padded.npy\")\n",
|
||||||
|
"question_padded = np.load(\"question_padded.npy\")\n",
|
||||||
|
"answer_padded = np.load(\"answer_padded.npy\")\n",
|
||||||
|
"question_type_labels = np.load(\n",
|
||||||
|
" \"question_type_labels.npy\"\n",
|
||||||
|
") # Label tipe soal (0 = Fill, 1 = True/False, 2 = Multiple Choice)\n",
|
||||||
|
"\n",
|
||||||
|
"# === Hyperparameter === #\n",
|
||||||
|
"EMBEDDING_DIM = 300\n",
|
||||||
|
"LSTM_UNITS = 256\n",
|
||||||
|
"BATCH_SIZE = 32\n",
|
||||||
|
"EPOCHS = 10\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"# === Input Encoder (Konteks) === #\n",
|
||||||
|
"context_input = Input(shape=(MAX_LENGTH,), name=\"context_input\")\n",
|
||||||
|
"context_embedding = Embedding(\n",
|
||||||
|
" input_dim=VOCAB_SIZE,\n",
|
||||||
|
" output_dim=EMBEDDING_DIM,\n",
|
||||||
|
" mask_zero=True,\n",
|
||||||
|
" name=\"context_embedding\",\n",
|
||||||
|
")(context_input)\n",
|
||||||
|
"encoder_lstm = LSTM(LSTM_UNITS, return_state=True, name=\"encoder_lstm\")\n",
|
||||||
|
"encoder_output, state_h, state_c = encoder_lstm(context_embedding)\n",
|
||||||
|
"\n",
|
||||||
|
"# === Decoder untuk Pertanyaan === #\n",
|
||||||
|
"question_decoder_input = Input(shape=(MAX_LENGTH,), name=\"question_decoder_input\")\n",
|
||||||
|
"question_embedding = Embedding(\n",
|
||||||
|
" input_dim=VOCAB_SIZE,\n",
|
||||||
|
" output_dim=EMBEDDING_DIM,\n",
|
||||||
|
" mask_zero=True,\n",
|
||||||
|
" name=\"question_embedding\",\n",
|
||||||
|
")(question_decoder_input)\n",
|
||||||
|
"question_lstm = LSTM(\n",
|
||||||
|
" LSTM_UNITS, return_sequences=True, return_state=True, name=\"question_lstm\"\n",
|
||||||
|
")\n",
|
||||||
|
"question_output, _, _ = question_lstm(\n",
|
||||||
|
" question_embedding, initial_state=[state_h, state_c]\n",
|
||||||
|
")\n",
|
||||||
|
"question_dense = Dense(VOCAB_SIZE, activation=\"softmax\", name=\"question_output\")(\n",
|
||||||
|
" question_output\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# === Decoder untuk Jawaban === #\n",
|
||||||
|
"answer_lstm = LSTM(\n",
|
||||||
|
" LSTM_UNITS, return_sequences=True, return_state=True, name=\"answer_lstm\"\n",
|
||||||
|
")\n",
|
||||||
|
"answer_output, _, _ = answer_lstm(context_embedding, initial_state=[state_h, state_c])\n",
|
||||||
|
"answer_dense = Dense(VOCAB_SIZE, activation=\"softmax\", name=\"answer_output\")(\n",
|
||||||
|
" answer_output\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# === Prediksi Tipe Soal (Fill, True/False, Multiple Choice) === #\n",
|
||||||
|
"type_dense = Dense(128, activation=\"relu\")(encoder_output)\n",
|
||||||
|
"question_type_output = Dense(3, activation=\"softmax\", name=\"question_type_output\")(\n",
|
||||||
|
" type_dense\n",
|
||||||
|
") # 3 Kategori soal\n",
|
||||||
|
"\n",
|
||||||
|
"# === Membangun Model Multi-Output === #\n",
|
||||||
|
"model = Model(\n",
|
||||||
|
" inputs=[context_input, question_decoder_input],\n",
|
||||||
|
" outputs=[question_dense, answer_dense, question_type_output],\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# === Compile Model === #\n",
|
||||||
|
"# Compile Model (Fix for multiple outputs)\n",
|
||||||
|
"model.compile(\n",
|
||||||
|
" optimizer=\"adam\",\n",
|
||||||
|
" loss={\n",
|
||||||
|
" \"question_output\": \"sparse_categorical_crossentropy\",\n",
|
||||||
|
" \"answer_output\": \"sparse_categorical_crossentropy\",\n",
|
||||||
|
" \"question_type_output\": \"sparse_categorical_crossentropy\",\n",
|
||||||
|
" },\n",
|
||||||
|
" metrics={\n",
|
||||||
|
" \"question_output\": [\"accuracy\"],\n",
|
||||||
|
" \"answer_output\": [\"accuracy\"],\n",
|
||||||
|
" \"question_type_output\": [\"accuracy\"],\n",
|
||||||
|
" },\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# === Training Model === #\n",
|
||||||
|
"model.fit(\n",
|
||||||
|
" [context_padded, question_padded],\n",
|
||||||
|
" {\n",
|
||||||
|
" \"question_output\": question_padded,\n",
|
||||||
|
" \"answer_output\": answer_padded,\n",
|
||||||
|
" \"question_type_output\": question_type_labels,\n",
|
||||||
|
" },\n",
|
||||||
|
" batch_size=BATCH_SIZE,\n",
|
||||||
|
" epochs=EPOCHS,\n",
|
||||||
|
" validation_split=0.2,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# === Simpan Model === #\n",
|
||||||
|
"model.save(\"lstm_multi_output_model.keras\")\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"✅ Model LSTM Multi-Output berhasil dilatih dan disimpan!\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 53,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"ename": "NameError",
|
||||||
|
"evalue": "name 'context_padded_test' is not defined",
|
||||||
|
"output_type": "error",
|
||||||
|
"traceback": [
|
||||||
|
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||||
|
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||||
|
"Cell \u001b[0;32mIn[53], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m predictions \u001b[38;5;241m=\u001b[39m model\u001b[38;5;241m.\u001b[39mpredict([\u001b[43mcontext_padded_test\u001b[49m, question_padded_test])\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m# predictions[0] corresponds to question_output (shape: [batch_size, MAX_LENGTH, VOCAB_SIZE])\u001b[39;00m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;66;03m# predictions[1] corresponds to answer_output (shape: [batch_size, MAX_LENGTH, VOCAB_SIZE])\u001b[39;00m\n\u001b[1;32m 5\u001b[0m \u001b[38;5;66;03m# predictions[2] corresponds to question_type_output (shape: [batch_size, 3])\u001b[39;00m\n\u001b[1;32m 6\u001b[0m \n\u001b[1;32m 7\u001b[0m \u001b[38;5;66;03m# Convert probabilities to predicted class indices\u001b[39;00m\n\u001b[1;32m 8\u001b[0m question_output_pred \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39margmax(predictions[\u001b[38;5;241m0\u001b[39m], axis\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m) \u001b[38;5;66;03m# shape: (batch_size, MAX_LENGTH)\u001b[39;00m\n",
|
||||||
|
"\u001b[0;31mNameError\u001b[0m: name 'context_padded_test' is not defined"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"predictions = model.predict([context_padded_test, question_padded_test])\n",
|
||||||
|
"\n",
|
||||||
|
"# predictions[0] corresponds to question_output (shape: [batch_size, MAX_LENGTH, VOCAB_SIZE])\n",
|
||||||
|
"# predictions[1] corresponds to answer_output (shape: [batch_size, MAX_LENGTH, VOCAB_SIZE])\n",
|
||||||
|
"# predictions[2] corresponds to question_type_output (shape: [batch_size, 3])\n",
|
||||||
|
"\n",
|
||||||
|
"# Convert probabilities to predicted class indices\n",
|
||||||
|
"question_output_pred = np.argmax(predictions[0], axis=-1) # shape: (batch_size, MAX_LENGTH)\n",
|
||||||
|
"answer_output_pred = np.argmax(predictions[1], axis=-1) # shape: (batch_size, MAX_LENGTH)\n",
|
||||||
|
"question_type_pred = np.argmax(predictions[2], axis=-1) # shape: (batch_size,)\n",
|
||||||
|
"\n",
|
||||||
|
"# === 3. Evaluate QUESTION TYPE (single-label classification) === #\n",
|
||||||
|
"print(\"=== Evaluation for Question Type ===\")\n",
|
||||||
|
"print(classification_report(\n",
|
||||||
|
" question_type_test, # True labels\n",
|
||||||
|
" question_type_pred, # Predicted labels\n",
|
||||||
|
" target_names=[\"Fill\", \"True/False\", \"Multiple Choice\"], # Optionally label your classes\n",
|
||||||
|
" zero_division=0 # Avoids warning if a class is absent\n",
|
||||||
|
"))\n",
|
||||||
|
"\n",
|
||||||
|
"# If you just want separate metrics (macro-average for multi-class):\n",
|
||||||
|
"acc_qtype = accuracy_score(question_type_test, question_type_pred)\n",
|
||||||
|
"prec_qtype = precision_score(question_type_test, question_type_pred, average='macro', zero_division=0)\n",
|
||||||
|
"rec_qtype = recall_score(question_type_test, question_type_pred, average='macro', zero_division=0)\n",
|
||||||
|
"\n",
|
||||||
|
"print(f\"Question Type -> Accuracy: {acc_qtype:.4f}, Precision(macro): {prec_qtype:.4f}, Recall(macro): {rec_qtype:.4f}\")\n",
|
||||||
|
"print(\"\")\n",
|
||||||
|
"\n",
|
||||||
|
"# === 4. Evaluate QUESTION OUTPUT & ANSWER OUTPUT (sequence predictions) === #\n",
|
||||||
|
"# We do a token-level comparison. We must exclude padded positions to get a fair score.\n",
|
||||||
|
"\n",
|
||||||
|
"# A helper function to flatten predictions & true labels while ignoring padding (zeros).\n",
|
||||||
|
"def flatten_and_mask(true_seq, pred_seq, pad_token=0):\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" true_seq, pred_seq = [batch_size, MAX_LENGTH]\n",
|
||||||
|
" Returns flattened arrays of true & predicted labels, ignoring where true_seq == pad_token.\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" mask = (true_seq != pad_token)\n",
|
||||||
|
" true_flat = true_seq[mask].flatten()\n",
|
||||||
|
" pred_flat = pred_seq[mask].flatten()\n",
|
||||||
|
" return true_flat, pred_flat\n",
|
||||||
|
"\n",
|
||||||
|
"# --- 4a. Question Output ---\n",
|
||||||
|
"q_true_flat, q_pred_flat = flatten_and_mask(question_padded_test, question_output_pred, pad_token=0)\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"=== Evaluation for Question Tokens ===\")\n",
|
||||||
|
"print(classification_report(\n",
|
||||||
|
" q_true_flat, \n",
|
||||||
|
" q_pred_flat,\n",
|
||||||
|
" zero_division=0 # Avoid warnings if a class is absent\n",
|
||||||
|
"))\n",
|
||||||
|
"\n",
|
||||||
|
"acc_q = accuracy_score(q_true_flat, q_pred_flat)\n",
|
||||||
|
"prec_q = precision_score(q_true_flat, q_pred_flat, average='macro', zero_division=0)\n",
|
||||||
|
"rec_q = recall_score(q_true_flat, q_pred_flat, average='macro', zero_division=0)\n",
|
||||||
|
"print(f\"Question Tokens -> Accuracy: {acc_q:.4f}, Precision(macro): {prec_q:.4f}, Recall(macro): {rec_q:.4f}\")\n",
|
||||||
|
"print(\"\")\n",
|
||||||
|
"\n",
|
||||||
|
"# --- 4b. Answer Output ---\n",
|
||||||
|
"a_true_flat, a_pred_flat = flatten_and_mask(answer_padded_test, answer_output_pred, pad_token=0)\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"=== Evaluation for Answer Tokens ===\")\n",
|
||||||
|
"print(classification_report(\n",
|
||||||
|
" a_true_flat,\n",
|
||||||
|
" a_pred_flat,\n",
|
||||||
|
" zero_division=0\n",
|
||||||
|
"))\n",
|
||||||
|
"\n",
|
||||||
|
"acc_a = accuracy_score(a_true_flat, a_pred_flat)\n",
|
||||||
|
"prec_a = precision_score(a_true_flat, a_pred_flat, average='macro', zero_division=0)\n",
|
||||||
|
"rec_a = recall_score(a_true_flat, a_pred_flat, average='macro', zero_division=0)\n",
|
||||||
|
"print(f\"Answer Tokens -> Accuracy: {acc_a:.4f}, Precision(macro): {prec_a:.4f}, Recall(macro): {rec_a:.4f}\")\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "myenv",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.16"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
Loading…
Reference in New Issue