Lstm_prediction/model/model.py

172 lines
5.6 KiB
Python

# Import Libraries
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
import joblib
# Konstanta
FIELD_DATE = 'Date'
FIELD_TIME = 'waktu'
FIELD_TEMP = 'suhu' # Suhu
FIELD_HUMID = 'kelembaban' # Kelembaban
FIELD_ID = 'id_mikro' # ID Mikro
SAMPLE_TRAINED = 30 # Jumlah data sebelumnya yang digunakan untuk prediksi
TARGET_ID_MIKRO = 14 # Ganti dengan ID mikro yang diinginkan
# **1. Fungsi untuk membaca dan memproses data**
def getDataFrameFromCSV(file_path, id_mikro_filter):
# Membaca file CSV tanpa parsing tanggal terlebih dahulu
df = pd.read_csv(file_path)
# Pastikan kolom ada di dataset
if FIELD_DATE not in df.columns or FIELD_TIME not in df.columns or FIELD_ID not in df.columns:
raise ValueError("Pastikan CSV memiliki kolom 'Date', 'Time', dan 'id_mikro'.")
# Gabungkan kolom 'Date' dan 'Time' menjadi satu kolom DateTime
df['DateTime'] = pd.to_datetime(df[FIELD_DATE] + ' ' + df[FIELD_TIME], errors='coerce')
# Hapus baris dengan DateTime NaN
df = df.dropna(subset=['DateTime'])
# Filter data berdasarkan ID Mikro
df = df[df[FIELD_ID] == id_mikro_filter]
# Urutkan berdasarkan waktu
df = df.sort_values(by='DateTime').reset_index(drop=True)
# Mengisi nilai yang hilang dengan interpolasi
df.interpolate(method='linear', inplace=True)
return df
# **2. Membaca Data**
file_path = "Dataset.CSV" # Ganti dengan nama file CSV
df = getDataFrameFromCSV(file_path, TARGET_ID_MIKRO)
# Menampilkan info data setelah diproses
print(f"Data Shape After Filtering: {df.shape}")
print(f"First 5 Rows After Processing:\n{df.head()}")
print(df.info())
# **5. Preprocessing Data**
dataset = df[[FIELD_TEMP, FIELD_HUMID]].values # Menggunakan suhu dan kelembaban
# Menentukan ukuran data latih
train_percent = 90
trainingDataLen = math.ceil((len(dataset) * train_percent) / 100)
print(f"Training Data Size: {trainingDataLen}")
# **Scaling data antara 0 - 1**
scaler = MinMaxScaler(feature_range=(0, 1))
scaledData = scaler.fit_transform(dataset)
# **Simpan scaler ke file**
import joblib
joblib.dump(scaler, 'scaler.pkl')
print("✅ Scaler berhasil disimpan sebagai scaler.pkl")
# **6. Menyiapkan Data Latih**
trainData = scaledData[:trainingDataLen, :]
xTrain, yTrain = [], []
for i in range(SAMPLE_TRAINED, len(trainData)):
xTrain.append(trainData[i - SAMPLE_TRAINED:i, :]) # Menggunakan semua fitur
yTrain.append(trainData[i, :]) # Targetnya adalah suhu dan kelembaban
# Konversi ke numpy array
xTrain, yTrain = np.array(xTrain), np.array(yTrain)
# Reshape agar sesuai dengan input LSTM (samples, timesteps, features)
xTrain = np.reshape(xTrain, (xTrain.shape[0], xTrain.shape[1], 2))
# **7. Membangun Model LSTM**
model = Sequential([
LSTM(50, return_sequences=True, input_shape=(xTrain.shape[1], 2)),
LSTM(50, return_sequences=False),
Dense(25),
Dense(2) # Output untuk suhu dan kelembaban
])
# Menampilkan ringkasan arsitektur model
model.summary()
# Kompilasi Model
model.compile(optimizer='adam', loss='mean_squared_error')
# **8. Melatih Model**
model.fit(xTrain, yTrain, batch_size=16, epochs=50) # Ubah epochs jika perlu
# **9. Menyiapkan Data Uji**
testData = scaledData[trainingDataLen - SAMPLE_TRAINED:, :]
xTest, yTest = [], dataset[trainingDataLen:, :]
for i in range(SAMPLE_TRAINED, len(testData)):
xTest.append(testData[i - SAMPLE_TRAINED:i, :])
# Konversi ke numpy array
xTest = np.array(xTest)
# Reshape agar sesuai dengan input LSTM
xTest = np.reshape(xTest, (xTest.shape[0], xTest.shape[1], 2))
# **10. Melakukan Prediksi**
predictions = model.predict(xTest)
predictions = scaler.inverse_transform(predictions) # Kembali ke skala asli
# **11. Evaluasi Model**
rmse_temp = np.sqrt(np.mean(predictions[:, 0] - yTest[:, 0]) ** 2)
rmse_humid = np.sqrt(np.mean(predictions[:, 1] - yTest[:, 1]) ** 2)
# Hitung akurasi prediksi dalam persentase
max_temp = np.max(yTest[:, 0])
min_temp = np.min(yTest[:, 0])
akurasi_temp = (1 - (rmse_temp / (max_temp - min_temp))) * 100
max_humid = np.max(yTest[:, 1])
min_humid = np.min(yTest[:, 1])
akurasi_humid = (1 - (rmse_humid / (max_humid - min_humid))) * 100
print(f"\nRMSE Temperature: {rmse_temp}")
print(f"RMSE Humidity: {rmse_humid}")
print(f"Akurasi Prediksi Suhu: {akurasi_temp:.2f}%")
print(f"Akurasi Prediksi Kelembaban: {akurasi_humid:.2f}%")
# **12. Menampilkan Hasil Prediksi**
train = df[:trainingDataLen]
valid = df[trainingDataLen:].copy()
valid['Predicted_Temperature'] = predictions[:, 0]
valid['Predicted_Humidity'] = predictions[:, 1]
# **Plot Suhu**
plt.figure(figsize=(14, 6))
plt.title(f"Temperature Prediction with RMSE: {rmse_temp:.4f}")
plt.xlabel("Date & Time", fontsize=14)
plt.ylabel("Temperature (°C)", fontsize=14)
plt.plot(train['DateTime'], train[FIELD_TEMP], label="Train")
plt.plot(valid['DateTime'], valid[FIELD_TEMP], label="Actual")
plt.plot(valid['DateTime'], valid["Predicted_Temperature"], label="Prediction", linestyle='dashed')
plt.xticks(rotation=45)
plt.legend()
plt.show()
# **Plot Kelembaban**
plt.figure(figsize=(14, 6))
plt.title(f"Humidity Prediction with RMSE: {rmse_humid:.4f}")
plt.xlabel("Date & Time", fontsize=14)
plt.ylabel("Humidity (%)", fontsize=14)
plt.plot(train['DateTime'], train[FIELD_HUMID], label="Train")
plt.plot(valid['DateTime'], valid[FIELD_HUMID], label="Actual")
plt.plot(valid['DateTime'], valid["Predicted_Humidity"], label="Prediction", linestyle='dashed')
plt.xticks(rotation=45)
plt.legend()
plt.show()
# Save model
model.save('lstm_mode4.h5')