first commit

This commit is contained in:
sitirohilah 2025-05-08 13:13:41 +07:00
commit e9a62efb32
157 changed files with 29611 additions and 0 deletions

16
.gitignore vendored Normal file
View File

@ -0,0 +1,16 @@
# Ignore log
backend/scraping.log
# Ignore all node_modules or venv
node_modules/
venv/
# Ignore generated/temporary files
*.log
.env
.DS_Store
*.json
# Exclude frontend directory from the ignore rules
!frontend/

BIN
README.md Normal file

Binary file not shown.

4
backend/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
.env
venv/
*.dll
*.pyd

BIN
backend/README.md Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

1156
backend/admin.py Normal file

File diff suppressed because it is too large Load Diff

556
backend/app.py Normal file
View File

@ -0,0 +1,556 @@
import os
import numpy as np
import tensorflow as tf
from flask import Flask, request, jsonify, send_file
from flask_cors import CORS
from flask_jwt_extended import JWTManager, jwt_required, get_jwt_identity
import joblib
import mysql.connector
from models import db, User
from config import Config
from auth import auth_bp
from admin import admin_bp # Impor blueprint admin yang sudah berisi semua route
from datetime import datetime, timedelta
import logging
import scraping
app = Flask(__name__)
app.config.from_object(Config)
# Konfigurasi JWT
app.config['JWT_SECRET_KEY'] = 'rahasia-kunci-yang-sangat-aman' # Ganti dengan secret key yang benar
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(hours=1)
# Inisialisasi ekstensi
CORS(app)
jwt = JWTManager(app)
db.init_app(app)
# JWT error handlers
@jwt.invalid_token_loader
def invalid_token_callback(error):
logging.error(f"Invalid JWT token: {error}")
return jsonify({"status": "error", "message": "Invalid token"}), 401
@jwt.expired_token_loader
def expired_token_callback(jwt_header, jwt_data):
logging.error(f"Expired JWT token: {jwt_data}")
return jsonify({"status": "error", "message": "Token has expired"}), 401
@jwt.unauthorized_loader
def missing_token_callback(error):
logging.error(f"Missing JWT token: {error}")
return jsonify({"status": "error", "message": "Authorization header is missing"}), 401
# Konfigurasi logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler("app.log"),
logging.StreamHandler()
]
)
logger = logging.getLogger()
# Registrasi blueprints
app.register_blueprint(auth_bp, url_prefix='/api/auth')
app.register_blueprint(admin_bp, url_prefix='/api/admin')
# Buat direktori uploads jika belum ada
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
# Inisialisasi database saat aplikasi dimulai
with app.app_context():
db.create_all()
# Buat user admin jika belum ada
if not User.query.filter_by(username='admin').first():
admin = User(username='admin', is_admin=True)
admin.set_password('admin123')
db.session.add(admin)
# Buat user biasa untuk testing
user = User(username='user', is_admin=False)
user.set_password('user123')
db.session.add(user)
db.session.commit()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
SCALER_DIR = os.path.join(BASE_DIR, "scalers")
MODEL_DIR = os.path.join(BASE_DIR, "models")
DATASET_DIR = os.path.join(BASE_DIR, "datasets")
for directory in [SCALER_DIR, MODEL_DIR, DATASET_DIR]:
os.makedirs(directory, exist_ok=True)
# Fungsi koneksi database
def connect_db():
try:
return mysql.connector.connect(
host="localhost",
user="root",
password="",
database="harga_komoditas",
autocommit=True
)
except mysql.connector.Error as err:
print(f"❌ Gagal koneksi database: {err}")
return None
# Load model dan scaler
def load_model(komoditas):
# Format nama komoditas
komoditas_formatted = komoditas.lower().replace(" ", "_").replace("-", "_")
# Buat nama file yang diharapkan
model_filename = f"{komoditas_formatted}_model.h5"
scaler_filename = f"{komoditas_formatted}_scaler.pkl"
model_path = os.path.join(MODEL_DIR, model_filename)
scaler_path = os.path.join(SCALER_DIR, scaler_filename)
logger.info(f"🔍 Mencari model di: {model_path}")
logger.info(f"🔍 Mencari scaler di: {scaler_path}")
if not os.path.exists(model_path) or not os.path.exists(scaler_path):
print(f"❌ Model atau scaler untuk {komoditas} tidak ditemukan.")
return None, None
try:
model = tf.keras.models.load_model(model_path)
scaler = joblib.load(scaler_path)
print("✅ Model dan scaler berhasil dimuat.")
return model, scaler
except Exception as e:
print(f"❌ Gagal memuat model atau scaler: {e}")
return None, None
# Ambil 60 harga terakhir dari database
def get_last_60_prices(komoditas):
db = connect_db()
if not db:
return None
try:
with db.cursor() as cursor:
query = """
SELECT harga FROM harga_komoditas
WHERE LOWER(REPLACE(REPLACE(komoditas, ' ', '_'), '-', '_')) = LOWER(%s)
ORDER BY tanggal DESC LIMIT 60
"""
cursor.execute(query, (komoditas.lower().replace(" ", "_").replace("-", "_"),))
result = cursor.fetchall()
if not result:
print(f"⚠️ Tidak ada data harga untuk {komoditas}")
return None
prices = [row[0] for row in result]
if len(prices) < 60:
avg_price = np.mean(prices)
prices.extend([avg_price] * (60 - len(prices)))
return list(reversed(prices))
except mysql.connector.Error as err:
print(f"❌ Gagal mengambil data harga: {err}")
return None
finally:
db.close()
@app.route("/api/predict-with-filter", methods=["POST"])
def predict_with_filter():
try:
data = request.get_json()
komoditas = data.get("komoditas")
filter_days = data.get("filter_days", 30) # Default 30 hari
# Validasi input
if not komoditas:
return jsonify({"status": "error", "message": "Parameter 'komoditas' dibutuhkan."}), 400
if filter_days not in [3, 7, 30]:
return jsonify({"status": "error", "message": "Filter hari harus 3, 7, atau 30."}), 400
# Ambil data historis untuk visualisasi
db = connect_db()
if not db:
return jsonify({"status": "error", "message": "Gagal terhubung ke database"}), 500
try:
with db.cursor(dictionary=True) as cursor:
query = """
SELECT harga, tanggal FROM harga_komoditas
WHERE LOWER(REPLACE(REPLACE(komoditas, ' ', '_'), '-', '_')) = LOWER(%s)
ORDER BY tanggal DESC LIMIT 60
"""
cursor.execute(query, (komoditas.lower().replace(" ", "_").replace("-", "_"),))
historical_data = cursor.fetchall()
finally:
db.close()
if not historical_data:
return jsonify({"status": "error", "message": f"Data historis tidak ditemukan untuk komoditas '{komoditas}'"}), 404
# Siapkan data untuk prediksi
harga = get_last_60_prices(komoditas)
if harga is None:
return jsonify({"status": "error", "message": f"Data tidak ditemukan untuk '{komoditas}'"}), 404
# Load model dan scaler
model, scaler = load_model(komoditas)
if not model or not scaler:
return jsonify({"status": "error", "message": f"Model atau scaler untuk '{komoditas}' tidak tersedia."}), 404
# Proses prediksi untuk jumlah hari yang diminta
predictions = []
harga_np = np.array(harga, dtype=np.float32).reshape(-1, 1)
# Data yang akan digunakan untuk prediksi
current_data = harga_np.copy()
# Dapatkan tanggal terakhir dari data historis untuk mulai prediksi
from datetime import datetime, timedelta
if historical_data and len(historical_data) > 0:
last_date = historical_data[0]["tanggal"] # Historical data sudah diurutkan DESC
# Log untuk debugging
logger.info(f"Raw last_date dari DB: {last_date}, tipe: {type(last_date)}")
# Pastikan last_date adalah objek datetime
if isinstance(last_date, str):
last_date = datetime.strptime(last_date, "%Y-%m-%d")
# Periksa bulan dan koreksi jika perlu
current_month = datetime.now().month
if last_date.month != current_month:
logger.info(f"Mengoreksi bulan dari {last_date.month} ke {current_month}")
# Buat tanggal baru dengan bulan yang benar (bulan saat ini)
last_date = datetime(last_date.year, current_month, last_date.day)
# Tanggal awal prediksi adalah 1 hari setelah data terakhir
start_date = last_date + timedelta(days=1)
logger.info(f"Tanggal awal prediksi: {start_date.strftime('%Y-%m-%d')}")
else:
# Fallback ke tanggal saat ini jika tidak ada data historis
start_date = datetime.now()
for i in range(filter_days):
# Ambil 60 data terbaru untuk prediksi
input_data = current_data[-60:].reshape(1, 60, 1)
# Normalisasi data
input_scaled = scaler.transform(input_data.reshape(-1, 1)).reshape(1, 60, 1)
# Prediksi
pred = model.predict(input_scaled)
# Denormalisasi hasil
predicted_price = scaler.inverse_transform(pred).flatten()[0]
# Tanggal prediksi
prediction_date = start_date + timedelta(days=i)
# Tambahkan ke hasil
predictions.append({
"tanggal": prediction_date.strftime("%Y-%m-%d"),
"prediksi": float(predicted_price),
"hari_ke": i+1
})
# Update data untuk prediksi selanjutnya
new_value = np.array([[predicted_price]], dtype=np.float32)
current_data = np.vstack((current_data, new_value))
# Format data historis untuk chart
historical_data_formatted = []
for item in reversed(historical_data[:30]): # Terbaru ke terlama, batasi 30 hari
tanggal_item = item["tanggal"]
# Pastikan tanggal menggunakan bulan yang benar
if hasattr(tanggal_item, "strftime"):
current_month = datetime.now().month
if tanggal_item.month != current_month:
# Koreksi bulan jika perlu
tanggal_item = datetime(tanggal_item.year, current_month, tanggal_item.day)
formatted_date = tanggal_item.strftime("%Y-%m-%d")
else:
formatted_date = tanggal_item
historical_data_formatted.append({
"tanggal": formatted_date,
"harga": float(item["harga"])
})
# Simpan riwayat prediksi ke database
try:
db = connect_db()
if db:
with db.cursor() as cursor:
for pred in predictions:
query = """
INSERT INTO prediksi_history
(komoditas, tanggal_prediksi, tanggal_dibuat, harga_prediksi, filter_days, user_id)
VALUES (%s, %s, NOW(), %s, %s, %s)
"""
# Gunakan user_id dari JWT atau null jika tidak ada
user_id = None
if request.headers.get('Authorization'):
try:
from flask_jwt_extended import get_jwt_identity
user_id = get_jwt_identity()
except:
pass
cursor.execute(
query,
(komoditas, pred["tanggal"], pred["prediksi"], filter_days, user_id)
)
db.close()
except Exception as e:
logger.error(f"Error menyimpan riwayat prediksi: {e}")
# Lanjutkan meskipun ada error, tidak perlu return
# Format last_date ke string dengan bulan yang benar
if hasattr(last_date, "strftime"):
last_date_str = last_date.strftime("%Y-%m-%d")
else:
last_date_str = str(last_date)
return jsonify({
"status": "success",
"komoditas": komoditas,
"filter_days": filter_days,
"predictions": predictions,
"historical_data": historical_data_formatted,
"last_date": last_date_str
})
except Exception as e:
logger.error(f"❌ Error di API /predict-with-filter: {e}")
return jsonify({"status": "error", "message": f"Gagal mendapatkan prediksi: {str(e)}"}), 500
# API untuk mendapatkan riwayat prediksi (histori admin)
@app.route("/api/admin/prediction-history", methods=["GET"])
@jwt_required() # Pastikan hanya user terautentikasi yang bisa mengakses
def get_prediction_history():
try:
# Cek apakah user adalah admin
current_user_id = get_jwt_identity()
admin_check = db.session.query(User).filter_by(id=current_user_id, is_admin=True).first()
if not admin_check:
return jsonify({"status": "error", "message": "Unauthorized access"}), 403
# Parameter filter
komoditas = request.args.get('komoditas')
start_date = request.args.get('start_date')
end_date = request.args.get('end_date')
limit = request.args.get('limit', 100)
conn = connect_db()
if not conn:
return jsonify({"status": "error", "message": "Gagal terhubung ke database"}), 500
try:
with conn.cursor(dictionary=True) as cursor:
query = """
SELECT ph.id, ph.komoditas, ph.tanggal_prediksi, ph.tanggal_dibuat,
ph.harga_prediksi, ph.filter_days,
u.username as requested_by
FROM prediksi_history ph
LEFT JOIN users u ON ph.user_id = u.id
WHERE 1=1
"""
params = []
# Tambahkan filter jika ada
if komoditas:
query += " AND LOWER(REPLACE(REPLACE(ph.komoditas, ' ', '_'), '-', '_')) = LOWER(%s)"
params.append(komoditas.lower().replace(" ", "_").replace("-", "_"))
if start_date:
query += " AND ph.tanggal_dibuat >= %s"
params.append(start_date)
if end_date:
query += " AND ph.tanggal_dibuat <= %s"
params.append(end_date)
# Tambahkan sorting dan limit
query += " ORDER BY ph.tanggal_dibuat DESC LIMIT %s"
params.append(int(limit))
cursor.execute(query, params)
history = cursor.fetchall()
# Format tanggal untuk respon JSON
for item in history:
if hasattr(item["tanggal_prediksi"], "strftime"):
item["tanggal_prediksi"] = item["tanggal_prediksi"].strftime("%Y-%m-%d")
if hasattr(item["tanggal_dibuat"], "strftime"):
item["tanggal_dibuat"] = item["tanggal_dibuat"].strftime("%Y-%m-%d %H:%M:%S")
return jsonify({
"status": "success",
"history": history
})
finally:
conn.close()
except Exception as e:
logger.error(f"❌ Error di API /admin/prediction-history: {e}")
return jsonify({"status": "error", "message": f"Gagal mendapatkan riwayat prediksi: {str(e)}"}), 500
# API prediksi harga
@app.route("/api/predict", methods=["POST"])
def predict():
try:
data = request.get_json()
komoditas = data.get("komoditas")
if not komoditas:
return jsonify({"status": "error", "message": "Parameter 'komoditas' dibutuhkan."}), 400
harga = get_last_60_prices(komoditas)
if harga is None:
return jsonify({"status": "error", "message": f"Data tidak ditemukan untuk '{komoditas}'"}), 404
model, scaler = load_model(komoditas)
if not model or not scaler:
return jsonify({"status": "error", "message": f"Model atau scaler untuk '{komoditas}' tidak tersedia."}), 404
harga_np = np.array(harga, dtype=np.float32).reshape(-1, 1)
if harga_np.shape[0] < 60:
return jsonify({"status": "error", "message": "Data harga kurang dari 60 hari."}), 400
harga_scaled = scaler.transform(harga_np).reshape(1, 60, 1)
pred = model.predict(harga_scaled)
harga_prediksi = scaler.inverse_transform(pred).flatten()[0]
return jsonify({"status": "success", "komoditas": komoditas, "predicted_price": round(float(harga_prediksi), 2)})
except Exception as e:
logger.error(f"❌ Error di API /predict: {e}")
return jsonify({"status": "error", "message": f"Gagal mendapatkan prediksi: {str(e)}"}), 500
# API untuk mendapatkan harga terbaru
@app.route('/api/get_latest_prices', methods=['GET'])
def get_latest_prices():
commodity = request.args.get('commodity')
if not commodity:
return jsonify({'status': 'error', 'message': 'Parameter commodity tidak ditemukan'}), 400
try:
db = connect_db()
if not db:
return jsonify({'status': 'error', 'message': 'Gagal terhubung ke database'}), 500
with db.cursor(dictionary=True) as cursor:
query = """
SELECT harga, tanggal FROM harga_komoditas
WHERE LOWER(REPLACE(REPLACE(komoditas, ' ', '_'), '-', '_')) = LOWER(%s)
ORDER BY tanggal DESC LIMIT 60
"""
cursor.execute(query, (commodity.lower().replace(" ", "_").replace("-", "_"),))
data = cursor.fetchall()
if not data:
return jsonify({'status': 'error', 'message': f'Tidak ada data harga untuk {commodity}'}), 404
return jsonify({'status': 'success', 'latest_prices': data})
except Exception as e:
logger.error(f"❌ Error saat mengambil data: {e}")
return jsonify({'status': 'error', 'message': f'Gagal mengambil data harga: {str(e)}'}), 500
finally:
if db:
db.close()
@app.route('/api/scrape', methods=['POST'])
def scrape_data():
"""
Endpoint untuk scraping data
"""
try:
data = request.json or {}
days_back = data.get('days_back', 70)
if not isinstance(days_back, int) or days_back <= 0 or days_back > 365:
return jsonify({
'status': 'error',
'message': 'parameter days_back harus berupa angka antara 1-365'
}), 400
logger.info(f"menjalankan scraping untuk {days_back} hari terakhir")
result = scraping.scrape_and_store(days_back)
return jsonify(result)
except Exception as e:
logger.error(f"error pada endpoint /api/scrape: {str(e)}")
return jsonify({
'status': 'error',
'message': str(e)
}), 500
@app.route('/api/check-mapping', methods=['GET'])
def check_mapping():
"""Endpoint untuk memeriksa integritas pemetaan komoditas"""
try:
result = scraping.check_komoditas_mapping_integrity()
return jsonify(result)
except Exception as e:
logger.error(f"Error pada endpoint /api/check-mapping: {str(e)}")
return jsonify({
'status': 'error',
'message': str(e)
}), 500
@app.route('/api/data-status', methods=['GET'])
def data_status():
try:
days = request.args.get('days', 70, type=int)
result = scraping.get_data_status(days)
return jsonify(result)
except Exception as e:
logger.error(f"Error pada endpoint /api/data-status: {str(e)}")
return jsonify({
'status': 'error',
'message': str(e)
}), 500
@app.route('/api/komoditas', methods=['GET'])
def get_komoditas():
"""
Endpoint untuk mendapatkan daftar komoditas yang tersedia
"""
try:
return jsonify({
'status': 'success',
'komoditas': list(scraping.KOMODITAS_DIPERLUKAN),
'mapping': scraping.KOMODITAS_MAPPING
})
except Exception as e:
logger.error(f"Error pada endpoint /api/komoditas: {str(e)}")
return jsonify({
'status': 'error',
'message': str(e)
}), 500
if __name__ == "__main__":
app.run(debug=True)

73
backend/auth.py Normal file
View File

@ -0,0 +1,73 @@
from flask import Blueprint, request, jsonify
from flask_jwt_extended import create_access_token, jwt_required, get_jwt_identity
from models import User, db
auth_bp = Blueprint('auth', __name__)
@auth_bp.route('/login', methods=['POST'])
def login():
data = request.get_json()
if not data or not data.get('username') or not data.get('password'):
return jsonify({'status': 'error', 'message': 'Username dan password diperlukan'}), 400
user = User.query.filter_by(username=data['username']).first()
if not user or not user.check_password(data['password']):
return jsonify({'status': 'error', 'message': 'Username atau password salah'}), 401
# Gunakan ID user sebagai identity (bukan username)
access_token = create_access_token(identity=user.id)
return jsonify({
'status': 'success',
'token': access_token, # Gunakan 'token' untuk konsistensi dengan frontend
'user': user.to_dict()
})
@auth_bp.route('/register', methods=['POST'])
def register():
data = request.get_json()
if not data or not data.get('username') or not data.get('password'):
return jsonify({'status': 'error', 'message': 'Username dan password diperlukan'}), 400
if User.query.filter_by(username=data['username']).first():
return jsonify({'status': 'error', 'message': 'Username sudah digunakan'}), 400
user = User(username=data['username'], is_admin=data.get('is_admin', False))
user.set_password(data['password'])
db.session.add(user)
db.session.commit()
return jsonify({'status': 'success', 'message': 'User berhasil dibuat'})
@auth_bp.route('/me', methods=['GET'])
@jwt_required()
def get_user():
user_id = get_jwt_identity() # Mendapatkan ID user dari token
user = User.query.get(user_id)
if not user:
return jsonify({'status': 'error', 'message': 'User tidak ditemukan'}), 404
return jsonify({'status': 'success', 'user': user.to_dict()})
@auth_bp.route('/verify', methods=['GET'])
@jwt_required()
def verify_token():
"""
Endpoint untuk verifikasi token JWT
"""
user_id = get_jwt_identity()
user = User.query.get(user_id)
if not user:
return jsonify({"status": "error", "message": "User tidak ditemukan"}), 404
return jsonify({
"status": "success",
"message": "Token valid",
"user": user.to_dict()
})

24
backend/client/.gitignore vendored Normal file
View File

@ -0,0 +1,24 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?

8
backend/client/README.md Normal file
View File

@ -0,0 +1,8 @@
# React + Vite
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
Currently, two official plugins are available:
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh

View File

@ -0,0 +1,38 @@
import js from '@eslint/js'
import globals from 'globals'
import react from 'eslint-plugin-react'
import reactHooks from 'eslint-plugin-react-hooks'
import reactRefresh from 'eslint-plugin-react-refresh'
export default [
{ ignores: ['dist'] },
{
files: ['**/*.{js,jsx}'],
languageOptions: {
ecmaVersion: 2020,
globals: globals.browser,
parserOptions: {
ecmaVersion: 'latest',
ecmaFeatures: { jsx: true },
sourceType: 'module',
},
},
settings: { react: { version: '18.3' } },
plugins: {
react,
'react-hooks': reactHooks,
'react-refresh': reactRefresh,
},
rules: {
...js.configs.recommended.rules,
...react.configs.recommended.rules,
...react.configs['jsx-runtime'].rules,
...reactHooks.configs.recommended.rules,
'react/jsx-no-target-blank': 'off',
'react-refresh/only-export-components': [
'warn',
{ allowConstantExport: true },
],
},
},
]

13
backend/client/index.html Normal file
View File

@ -0,0 +1,13 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Vite + React</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.jsx"></script>
</body>
</html>

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="31.88" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 257"><defs><linearGradient id="IconifyId1813088fe1fbc01fb466" x1="-.828%" x2="57.636%" y1="7.652%" y2="78.411%"><stop offset="0%" stop-color="#41D1FF"></stop><stop offset="100%" stop-color="#BD34FE"></stop></linearGradient><linearGradient id="IconifyId1813088fe1fbc01fb467" x1="43.376%" x2="50.316%" y1="2.242%" y2="89.03%"><stop offset="0%" stop-color="#FFEA83"></stop><stop offset="8.333%" stop-color="#FFDD35"></stop><stop offset="100%" stop-color="#FFA800"></stop></linearGradient></defs><path fill="url(#IconifyId1813088fe1fbc01fb466)" d="M255.153 37.938L134.897 252.976c-2.483 4.44-8.862 4.466-11.382.048L.875 37.958c-2.746-4.814 1.371-10.646 6.827-9.67l120.385 21.517a6.537 6.537 0 0 0 2.322-.004l117.867-21.483c5.438-.991 9.574 4.796 6.877 9.62Z"></path><path fill="url(#IconifyId1813088fe1fbc01fb467)" d="M185.432.063L96.44 17.501a3.268 3.268 0 0 0-2.634 3.014l-5.474 92.456a3.268 3.268 0 0 0 3.997 3.378l24.777-5.718c2.318-.535 4.413 1.507 3.936 3.838l-7.361 36.047c-.495 2.426 1.782 4.5 4.151 3.78l15.304-4.649c2.372-.72 4.652 1.36 4.15 3.788l-11.698 56.621c-.732 3.542 3.979 5.473 5.943 2.437l1.313-2.028l72.516-144.72c1.215-2.423-.88-5.186-3.54-4.672l-25.505 4.922c-2.396.462-4.435-1.77-3.759-4.114l16.646-57.705c.677-2.35-1.37-4.583-3.769-4.113Z"></path></svg>

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

@ -0,0 +1,42 @@
#root {
max-width: 1280px;
margin: 0 auto;
padding: 2rem;
text-align: center;
}
.logo {
height: 6em;
padding: 1.5em;
will-change: filter;
transition: filter 300ms;
}
.logo:hover {
filter: drop-shadow(0 0 2em #646cffaa);
}
.logo.react:hover {
filter: drop-shadow(0 0 2em #61dafbaa);
}
@keyframes logo-spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
@media (prefers-reduced-motion: no-preference) {
a:nth-of-type(2) .logo {
animation: logo-spin infinite 20s linear;
}
}
.card {
padding: 2em;
}
.read-the-docs {
color: #888;
}

View File

@ -0,0 +1,35 @@
import { useState } from 'react'
import reactLogo from './assets/react.svg'
import viteLogo from '/vite.svg'
import './App.css'
function App() {
const [count, setCount] = useState(0)
return (
<>
<div>
<a href="https://vite.dev" target="_blank">
<img src={viteLogo} className="logo" alt="Vite logo" />
</a>
<a href="https://react.dev" target="_blank">
<img src={reactLogo} className="logo react" alt="React logo" />
</a>
</div>
<h1>Vite + React</h1>
<div className="card">
<button onClick={() => setCount((count) => count + 1)}>
count is {count}
</button>
<p>
Edit <code>src/App.jsx</code> and save to test HMR
</p>
</div>
<p className="read-the-docs">
Click on the Vite and React logos to learn more
</p>
</>
)
}
export default App

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="35.93" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 228"><path fill="#00D8FF" d="M210.483 73.824a171.49 171.49 0 0 0-8.24-2.597c.465-1.9.893-3.777 1.273-5.621c6.238-30.281 2.16-54.676-11.769-62.708c-13.355-7.7-35.196.329-57.254 19.526a171.23 171.23 0 0 0-6.375 5.848a155.866 155.866 0 0 0-4.241-3.917C100.759 3.829 77.587-4.822 63.673 3.233C50.33 10.957 46.379 33.89 51.995 62.588a170.974 170.974 0 0 0 1.892 8.48c-3.28.932-6.445 1.924-9.474 2.98C17.309 83.498 0 98.307 0 113.668c0 15.865 18.582 31.778 46.812 41.427a145.52 145.52 0 0 0 6.921 2.165a167.467 167.467 0 0 0-2.01 9.138c-5.354 28.2-1.173 50.591 12.134 58.266c13.744 7.926 36.812-.22 59.273-19.855a145.567 145.567 0 0 0 5.342-4.923a168.064 168.064 0 0 0 6.92 6.314c21.758 18.722 43.246 26.282 56.54 18.586c13.731-7.949 18.194-32.003 12.4-61.268a145.016 145.016 0 0 0-1.535-6.842c1.62-.48 3.21-.974 4.76-1.488c29.348-9.723 48.443-25.443 48.443-41.52c0-15.417-17.868-30.326-45.517-39.844Zm-6.365 70.984c-1.4.463-2.836.91-4.3 1.345c-3.24-10.257-7.612-21.163-12.963-32.432c5.106-11 9.31-21.767 12.459-31.957c2.619.758 5.16 1.557 7.61 2.4c23.69 8.156 38.14 20.213 38.14 29.504c0 9.896-15.606 22.743-40.946 31.14Zm-10.514 20.834c2.562 12.94 2.927 24.64 1.23 33.787c-1.524 8.219-4.59 13.698-8.382 15.893c-8.067 4.67-25.32-1.4-43.927-17.412a156.726 156.726 0 0 1-6.437-5.87c7.214-7.889 14.423-17.06 21.459-27.246c12.376-1.098 24.068-2.894 34.671-5.345a134.17 134.17 0 0 1 1.386 6.193ZM87.276 214.515c-7.882 2.783-14.16 2.863-17.955.675c-8.075-4.657-11.432-22.636-6.853-46.752a156.923 156.923 0 0 1 1.869-8.499c10.486 2.32 22.093 3.988 34.498 4.994c7.084 9.967 14.501 19.128 21.976 27.15a134.668 134.668 0 0 1-4.877 4.492c-9.933 8.682-19.886 14.842-28.658 17.94ZM50.35 144.747c-12.483-4.267-22.792-9.812-29.858-15.863c-6.35-5.437-9.555-10.836-9.555-15.216c0-9.322 13.897-21.212 37.076-29.293c2.813-.98 5.757-1.905 8.812-2.773c3.204 10.42 7.406 21.315 12.477 32.332c-5.137 11.18-9.399 22.249-12.634 32.792a134.718 134.718 0 0 1-6.318-1.979Zm12.378-84.26c-4.811-24.587-1.616-43.134 6.425-47.789c8.564-4.958 27.502 2.111 47.463 19.835a144.318 144.318 0 0 1 3.841 3.545c-7.438 7.987-14.787 17.08-21.808 26.988c-12.04 1.116-23.565 2.908-34.161 5.309a160.342 160.342 0 0 1-1.76-7.887Zm110.427 27.268a347.8 347.8 0 0 0-7.785-12.803c8.168 1.033 15.994 2.404 23.343 4.08c-2.206 7.072-4.956 14.465-8.193 22.045a381.151 381.151 0 0 0-7.365-13.322Zm-45.032-43.861c5.044 5.465 10.096 11.566 15.065 18.186a322.04 322.04 0 0 0-30.257-.006c4.974-6.559 10.069-12.652 15.192-18.18ZM82.802 87.83a323.167 323.167 0 0 0-7.227 13.238c-3.184-7.553-5.909-14.98-8.134-22.152c7.304-1.634 15.093-2.97 23.209-3.984a321.524 321.524 0 0 0-7.848 12.897Zm8.081 65.352c-8.385-.936-16.291-2.203-23.593-3.793c2.26-7.3 5.045-14.885 8.298-22.6a321.187 321.187 0 0 0 7.257 13.246c2.594 4.48 5.28 8.868 8.038 13.147Zm37.542 31.03c-5.184-5.592-10.354-11.779-15.403-18.433c4.902.192 9.899.29 14.978.29c5.218 0 10.376-.117 15.453-.343c-4.985 6.774-10.018 12.97-15.028 18.486Zm52.198-57.817c3.422 7.8 6.306 15.345 8.596 22.52c-7.422 1.694-15.436 3.058-23.88 4.071a382.417 382.417 0 0 0 7.859-13.026a347.403 347.403 0 0 0 7.425-13.565Zm-16.898 8.101a358.557 358.557 0 0 1-12.281 19.815a329.4 329.4 0 0 1-23.444.823c-7.967 0-15.716-.248-23.178-.732a310.202 310.202 0 0 1-12.513-19.846h.001a307.41 307.41 0 0 1-10.923-20.627a310.278 310.278 0 0 1 10.89-20.637l-.001.001a307.318 307.318 0 0 1 12.413-19.761c7.613-.576 15.42-.876 23.31-.876H128c7.926 0 15.743.303 23.354.883a329.357 329.357 0 0 1 12.335 19.695a358.489 358.489 0 0 1 11.036 20.54a329.472 329.472 0 0 1-11 20.722Zm22.56-122.124c8.572 4.944 11.906 24.881 6.52 51.026c-.344 1.668-.73 3.367-1.15 5.09c-10.622-2.452-22.155-4.275-34.23-5.408c-7.034-10.017-14.323-19.124-21.64-27.008a160.789 160.789 0 0 1 5.888-5.4c18.9-16.447 36.564-22.941 44.612-18.3ZM128 90.808c12.625 0 22.86 10.235 22.86 22.86s-10.235 22.86-22.86 22.86s-22.86-10.235-22.86-22.86s10.235-22.86 22.86-22.86Z"></path></svg>

After

Width:  |  Height:  |  Size: 4.0 KiB

View File

@ -0,0 +1,68 @@
:root {
font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif;
line-height: 1.5;
font-weight: 400;
color-scheme: light dark;
color: rgba(255, 255, 255, 0.87);
background-color: #242424;
font-synthesis: none;
text-rendering: optimizeLegibility;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
a {
font-weight: 500;
color: #646cff;
text-decoration: inherit;
}
a:hover {
color: #535bf2;
}
body {
margin: 0;
display: flex;
place-items: center;
min-width: 320px;
min-height: 100vh;
}
h1 {
font-size: 3.2em;
line-height: 1.1;
}
button {
border-radius: 8px;
border: 1px solid transparent;
padding: 0.6em 1.2em;
font-size: 1em;
font-weight: 500;
font-family: inherit;
background-color: #1a1a1a;
cursor: pointer;
transition: border-color 0.25s;
}
button:hover {
border-color: #646cff;
}
button:focus,
button:focus-visible {
outline: 4px auto -webkit-focus-ring-color;
}
@media (prefers-color-scheme: light) {
:root {
color: #213547;
background-color: #ffffff;
}
a:hover {
color: #747bff;
}
button {
background-color: #f9f9f9;
}
}

View File

@ -0,0 +1,10 @@
import { StrictMode } from 'react'
import { createRoot } from 'react-dom/client'
import './index.css'
import App from './App.jsx'
createRoot(document.getElementById('root')).render(
<StrictMode>
<App />
</StrictMode>,
)

View File

@ -0,0 +1,7 @@
import { defineConfig } from 'vite'
import react from '@vitejs/plugin-react'
// https://vite.dev/config/
export default defineConfig({
plugins: [react()],
})

11
backend/config.py Normal file
View File

@ -0,0 +1,11 @@
import os
from datetime import timedelta
class Config:
SECRET_KEY = 'your-secret-key-change-in-production'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:@localhost/harga_komoditas'
SQLALCHEMY_TRACK_MODIFICATIONS = False
JWT_SECRET_KEY = 'jwt-secret-key-change-in-production'
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=1)
UPLOAD_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'uploads')
ALLOWED_EXTENSIONS = {'csv'}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

1341
backend/datasets/kedelai.csv Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,417 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# Impor pustaka yang dibutuhkan\n",
"import os\n",
"import numpy as np\n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt\n",
"from sklearn.preprocessing import MinMaxScaler\n",
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
"from tensorflow.keras.models import Sequential\n",
"from tensorflow.keras.layers import LSTM, Dense, Dropout\n",
"from tensorflow.keras.optimizers import Adam\n",
"from tensorflow.keras.models import load_model\n",
"from tensorflow.keras.callbacks import EarlyStopping\n",
"import glob"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# Setel seed acak untuk reproduktibilitas\n",
"np.random.seed(42)\n",
"import tensorflow as tf\n",
"tf.random.set_seed(42)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# Tentukan jalur ke data dan ambil semua file CSV\n",
"data_path = \"C:/D/projects/BPP PROJECT/bpp-prediction/backend/datasets/\"\n",
"csv_files = glob.glob(data_path + \"*.csv\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"# Fungsi untuk memuat dan membersihkan data (menangani nilai yang hilang)\n",
"def load_and_clean_data(file_path):\n",
" df = pd.read_csv(file_path, delimiter=';')\n",
" df['Tanggal'] = pd.to_datetime(df['Tanggal'], format='%d/%m/%Y')\n",
" df.set_index('Tanggal', inplace=True)\n",
" \n",
" # Menangani data yang hilang\n",
" df = df.fillna(method='ffill') # Menggunakan pengisian maju untuk menangani data yang hilang\n",
" \n",
" return df"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"ename": "ValueError",
"evalue": "time data \"2021-04-01\" doesn't match format \"%d/%m/%Y\", at position 0. You might want to try:\n - passing `format` if your strings have a consistent format;\n - passing `format='ISO8601'` if your strings are all ISO8601 but not necessarily in exactly the same format;\n - passing `format='mixed'`, and the format will be inferred for each element individually. You might want to use `dayfirst` alongside this.",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[5], line 2\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;66;03m# Memuat semua dataset\u001b[39;00m\n\u001b[1;32m----> 2\u001b[0m datasets \u001b[38;5;241m=\u001b[39m {file\u001b[38;5;241m.\u001b[39msplit(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\\\\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)[\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m]: \u001b[43mload_and_clean_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfile\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mfor\u001b[39;00m file \u001b[38;5;129;01min\u001b[39;00m csv_files}\n",
"Cell \u001b[1;32mIn[4], line 4\u001b[0m, in \u001b[0;36mload_and_clean_data\u001b[1;34m(file_path)\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mload_and_clean_data\u001b[39m(file_path):\n\u001b[0;32m 3\u001b[0m df \u001b[38;5;241m=\u001b[39m pd\u001b[38;5;241m.\u001b[39mread_csv(file_path, delimiter\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m;\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m----> 4\u001b[0m df[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mTanggal\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[43mpd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mto_datetime\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdf\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mTanggal\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mformat\u001b[39;49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;132;43;01m%d\u001b[39;49;00m\u001b[38;5;124;43m/\u001b[39;49m\u001b[38;5;124;43m%\u001b[39;49m\u001b[38;5;124;43mm/\u001b[39;49m\u001b[38;5;124;43m%\u001b[39;49m\u001b[38;5;124;43mY\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m 5\u001b[0m df\u001b[38;5;241m.\u001b[39mset_index(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mTanggal\u001b[39m\u001b[38;5;124m'\u001b[39m, inplace\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[0;32m 7\u001b[0m \u001b[38;5;66;03m# Menangani data yang hilang\u001b[39;00m\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python312\\site-packages\\pandas\\core\\tools\\datetimes.py:1067\u001b[0m, in \u001b[0;36mto_datetime\u001b[1;34m(arg, errors, dayfirst, yearfirst, utc, format, exact, unit, infer_datetime_format, origin, cache)\u001b[0m\n\u001b[0;32m 1065\u001b[0m result \u001b[38;5;241m=\u001b[39m arg\u001b[38;5;241m.\u001b[39mmap(cache_array)\n\u001b[0;32m 1066\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1067\u001b[0m values \u001b[38;5;241m=\u001b[39m \u001b[43mconvert_listlike\u001b[49m\u001b[43m(\u001b[49m\u001b[43marg\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_values\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mformat\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1068\u001b[0m result \u001b[38;5;241m=\u001b[39m arg\u001b[38;5;241m.\u001b[39m_constructor(values, index\u001b[38;5;241m=\u001b[39marg\u001b[38;5;241m.\u001b[39mindex, name\u001b[38;5;241m=\u001b[39marg\u001b[38;5;241m.\u001b[39mname)\n\u001b[0;32m 1069\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(arg, (ABCDataFrame, abc\u001b[38;5;241m.\u001b[39mMutableMapping)):\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python312\\site-packages\\pandas\\core\\tools\\datetimes.py:433\u001b[0m, in \u001b[0;36m_convert_listlike_datetimes\u001b[1;34m(arg, format, name, utc, unit, errors, dayfirst, yearfirst, exact)\u001b[0m\n\u001b[0;32m 431\u001b[0m \u001b[38;5;66;03m# `format` could be inferred, or user didn't ask for mixed-format parsing.\u001b[39;00m\n\u001b[0;32m 432\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mformat\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mformat\u001b[39m \u001b[38;5;241m!=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmixed\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m--> 433\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_array_strptime_with_fallback\u001b[49m\u001b[43m(\u001b[49m\u001b[43marg\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mutc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mformat\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mexact\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43merrors\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 435\u001b[0m result, tz_parsed \u001b[38;5;241m=\u001b[39m objects_to_datetime64(\n\u001b[0;32m 436\u001b[0m arg,\n\u001b[0;32m 437\u001b[0m dayfirst\u001b[38;5;241m=\u001b[39mdayfirst,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 441\u001b[0m allow_object\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m,\n\u001b[0;32m 442\u001b[0m )\n\u001b[0;32m 444\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m tz_parsed \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 445\u001b[0m \u001b[38;5;66;03m# We can take a shortcut since the datetime64 numpy array\u001b[39;00m\n\u001b[0;32m 446\u001b[0m \u001b[38;5;66;03m# is in UTC\u001b[39;00m\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python312\\site-packages\\pandas\\core\\tools\\datetimes.py:467\u001b[0m, in \u001b[0;36m_array_strptime_with_fallback\u001b[1;34m(arg, name, utc, fmt, exact, errors)\u001b[0m\n\u001b[0;32m 456\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_array_strptime_with_fallback\u001b[39m(\n\u001b[0;32m 457\u001b[0m arg,\n\u001b[0;32m 458\u001b[0m name,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 462\u001b[0m errors: \u001b[38;5;28mstr\u001b[39m,\n\u001b[0;32m 463\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Index:\n\u001b[0;32m 464\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 465\u001b[0m \u001b[38;5;124;03m Call array_strptime, with fallback behavior depending on 'errors'.\u001b[39;00m\n\u001b[0;32m 466\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m--> 467\u001b[0m result, tz_out \u001b[38;5;241m=\u001b[39m \u001b[43marray_strptime\u001b[49m\u001b[43m(\u001b[49m\u001b[43marg\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfmt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mexact\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mexact\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43merrors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43merrors\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mutc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mutc\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 468\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m tz_out \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 469\u001b[0m unit \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mdatetime_data(result\u001b[38;5;241m.\u001b[39mdtype)[\u001b[38;5;241m0\u001b[39m]\n",
"File \u001b[1;32mstrptime.pyx:501\u001b[0m, in \u001b[0;36mpandas._libs.tslibs.strptime.array_strptime\u001b[1;34m()\u001b[0m\n",
"File \u001b[1;32mstrptime.pyx:451\u001b[0m, in \u001b[0;36mpandas._libs.tslibs.strptime.array_strptime\u001b[1;34m()\u001b[0m\n",
"File \u001b[1;32mstrptime.pyx:583\u001b[0m, in \u001b[0;36mpandas._libs.tslibs.strptime._parse_with_format\u001b[1;34m()\u001b[0m\n",
"\u001b[1;31mValueError\u001b[0m: time data \"2021-04-01\" doesn't match format \"%d/%m/%Y\", at position 0. You might want to try:\n - passing `format` if your strings have a consistent format;\n - passing `format='ISO8601'` if your strings are all ISO8601 but not necessarily in exactly the same format;\n - passing `format='mixed'`, and the format will be inferred for each element individually. You might want to use `dayfirst` alongside this."
]
}
],
"source": [
"# Memuat semua dataset\n",
"datasets = {file.split(\"\\\\\")[-1]: load_and_clean_data(file) for file in csv_files}\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'datasets' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[6], line 2\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;66;03m# Menampilkan informasi dataset\u001b[39;00m\n\u001b[1;32m----> 2\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m name, df \u001b[38;5;129;01min\u001b[39;00m \u001b[43mdatasets\u001b[49m\u001b[38;5;241m.\u001b[39mitems():\n\u001b[0;32m 3\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mDataset: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 4\u001b[0m display(df\u001b[38;5;241m.\u001b[39mhead())\n",
"\u001b[1;31mNameError\u001b[0m: name 'datasets' is not defined"
]
}
],
"source": [
"# Menampilkan informasi dataset\n",
"for name, df in datasets.items():\n",
" print(f\"\\nDataset: {name}\")\n",
" display(df.head())\n",
"# Kamus untuk menyimpan scaler untuk setiap dataset\n",
"scalers = {}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Fungsi untuk menormalkan data\n",
"def normalize_data(df, dataset_name):\n",
" \"\"\"Menormalkan data dan menyimpan scaler untuk digunakan nanti\"\"\"\n",
" scalers[dataset_name] = MinMaxScaler(feature_range=(0, 1))\n",
" df_scaled = scalers[dataset_name].fit_transform(df[['Harga']].values)\n",
" return df_scaled"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Menormalkan semua dataset\n",
"scaled_datasets = {}\n",
"for file, df in datasets.items():\n",
" print(f\"\\nMenormalkan dataset: {file}\")\n",
" scaled_datasets[file] = normalize_data(df, file)\n",
" \n",
" # Menampilkan sampel nilai asli dan nilai terormalisasi\n",
" print(\"Nilai asli:\", df['Harga'].head().values)\n",
" print(\"Nilai terormalisasi:\", scaled_datasets[file][:5].flatten())\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import joblib\n",
"\n",
"# Simpan semua scaler ke dalam file\n",
"scaler_path = \"C:/D/projects/BPP PROJECT/bpp-prediction/backend/scalers_100/\"\n",
"os.makedirs(scaler_path, exist_ok=True)\n",
"\n",
"for dataset_name, scaler in scalers.items():\n",
" joblib.dump(scaler, f\"{scaler_path}{dataset_name}_scaler.pkl\")\n",
"\n",
"print(\"Semua scaler telah disimpan.\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Fungsi untuk membuat urutan data untuk LSTM\n",
"def create_dataset(data, time_step=60):\n",
" X, y = [], []\n",
" for i in range(len(data) - time_step - 1):\n",
" X.append(data[i:(i + time_step), 0])\n",
" y.append(data[i + time_step, 0])\n",
" return np.array(X), np.array(y)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Membuat urutan untuk setiap dataset\n",
"datasets_X_y = {}\n",
"for file, scaled_data in scaled_datasets.items():\n",
" print(f\"\\nMembuat dataset untuk {file}\")\n",
" X, y = create_dataset(scaled_data)\n",
" datasets_X_y[file] = (X, y)\n",
" print(f\"X shape: {X.shape}, y shape: {y.shape}\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Fungsi untuk membagi data\n",
"def split_data(X, y, train_size=0.8):\n",
" train_len = int(len(X) * train_size)\n",
" X_train, X_test = X[:train_len], X[train_len:]\n",
" y_train, y_test = y[:train_len], y[train_len:]\n",
" return X_train, X_test, y_train, y_test\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Membagi data untuk setiap dataset\n",
"split_datasets = {}\n",
"for file, (X, y) in datasets_X_y.items():\n",
" print(f\"\\nMembagi data untuk {file}\")\n",
" X_train, X_test, y_train, y_test = split_data(X, y)\n",
" split_datasets[file] = (X_train, X_test, y_train, y_test)\n",
" print(f\"Bentuk set pelatihan: {X_train.shape}\")\n",
" print(f\"Bentuk set pengujian: {X_test.shape}\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Fungsi untuk membangun model LSTM\n",
"def build_model(input_shape):\n",
" model = Sequential([\n",
" LSTM(units=50, return_sequences=True, input_shape=input_shape),\n",
" Dropout(0.2),\n",
" LSTM(units=50, return_sequences=False),\n",
" Dropout(0.2),\n",
" Dense(units=1)\n",
" ])\n",
" model.compile(optimizer=Adam(), loss='mean_squared_error')\n",
" return model\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Melatih model untuk setiap dataset\n",
"model_results = {}\n",
"for file, (X_train, X_test, y_train, y_test) in split_datasets.items():\n",
" print(f\"\\nMelatih model untuk {file}\")\n",
" X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)\n",
" \n",
" model = build_model((X_train.shape[1], 1))\n",
"\n",
" # Penghentian dini untuk mencegah overfitting\n",
" early_stopping = EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True)\n",
"\n",
" history = model.fit(\n",
" X_train, y_train,\n",
" epochs=80,\n",
" batch_size=32,\n",
" validation_split=0.1,\n",
" callbacks=[early_stopping],\n",
" verbose=1\n",
" )\n",
" \n",
" # Menyimpan model\n",
" model.save(f\"C:/D/projects/BPP PROJECT/bpp-prediction/backend/models_100/{file}_model.h5\")\n",
" model_results[file] = model\n",
" \n",
" # # Menampilkan Hidden State dan Cell State pada timestep terakhir\n",
" # print(\"\\nEvaluasi pada data uji (X_test):\")\n",
" # output, state_h, state_c = model.predict(X_test)\n",
" # print(\"Hidden State terakhir (state_h):\", state_h)\n",
" # print(\"Cell State terakhir (state_c):\", state_c)\n",
" \n",
" # Plot riwayat pelatihan\n",
" plt.figure(figsize=(10, 6))\n",
" plt.plot(history.history['loss'], label='Loss Pelatihan')\n",
" plt.plot(history.history['val_loss'], label='Loss Validasi')\n",
" plt.title(f'Loss Model untuk {file}')\n",
" plt.xlabel('Epoch')\n",
" plt.ylabel('Loss')\n",
" plt.legend()\n",
" plt.grid(True)\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Fungsi untuk evaluasi\n",
"def evaluate_model(model, X_test, y_test):\n",
" y_pred = model.predict(X_test)\n",
" rmse = np.sqrt(mean_squared_error(y_test, y_pred))\n",
" mae = mean_absolute_error(y_test, y_pred)\n",
" return rmse, mae\n",
"\n",
"def denormalize_data(scaled_data, dataset_name):\n",
" return scalers[dataset_name].inverse_transform(scaled_data)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Evaluasi model dan membuat prediksi\n",
"evaluations = {}\n",
"predictions = {}\n",
"\n",
"for file, model in model_results.items():\n",
" X_test = split_datasets[file][1].reshape(-1, 60, 1)\n",
" y_test = split_datasets[file][3]\n",
" \n",
" # Membuat prediksi\n",
" y_pred_scaled = model.predict(X_test)\n",
" \n",
" # Denormalisasi\n",
" y_pred_rescaled = denormalize_data(y_pred_scaled, file)\n",
" y_test_rescaled = denormalize_data(y_test.reshape(-1, 1), file)\n",
" \n",
" # Menghitung metrik\n",
" rmse, mae = evaluate_model(model, X_test, y_test)\n",
" evaluations[file] = {'RMSE': rmse, 'MAE': mae}\n",
" \n",
" predictions[file] = {\n",
" 'y_pred': y_pred_rescaled,\n",
" 'y_test': y_test_rescaled\n",
" }\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Fungsi untuk memplot prediksi\n",
"def plot_predictions(file, predictions, evaluations):\n",
" y_pred = predictions[file]['y_pred']\n",
" y_test = predictions[file]['y_test']\n",
" \n",
" plt.figure(figsize=(12, 6))\n",
" plt.plot(y_test, label='Harga Aktual', linewidth=2)\n",
" plt.plot(y_pred, label='Harga Prediksi', linewidth=2)\n",
" plt.title(f'Prediksi Harga BPP untuk {file}\\nRMSE: {evaluations[file][\"RMSE\"]:.2f}, MAE: {evaluations[file][\"MAE\"]:.2f}')\n",
" plt.xlabel('Waktu')\n",
" plt.ylabel('Harga (Rupiah)')\n",
" plt.legend()\n",
" plt.grid(True)\n",
" plt.tight_layout()\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Plot hasil untuk setiap dataset\n",
"for file in predictions.keys():\n",
" plot_predictions(file, predictions, evaluations)\n",
"\n",
"# Menampilkan hasil evaluasi akhir\n",
"print(\"\\nHasil Evaluasi Akhir:\")\n",
"for name, metrics in evaluations.items():\n",
" print(f\"\\nMetrik untuk {name}:\")\n",
" print(f\"RMSE: {metrics['RMSE']:.4f}\")\n",
" print(f\"MAE: {metrics['MAE']:.4f}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"model.summary()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

44
backend/models.py Normal file
View File

@ -0,0 +1,44 @@
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
db = SQLAlchemy()
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
password_hash = db.Column(db.String(255), nullable=False)
is_admin = db.Column(db.Boolean, default=False)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def to_dict(self):
return {
'id': self.id,
'username': self.username,
'is_admin': self.is_admin
}
class DatasetUpload(db.Model):
__tablename__ = 'dataset_uploads'
id = db.Column(db.Integer, primary_key=True)
filename = db.Column(db.String(255), nullable=False)
uploaded_by = db.Column(db.Integer, db.ForeignKey('users.id'))
upload_date = db.Column(db.DateTime, default=db.func.current_timestamp())
commodity_name = db.Column(db.String(100), nullable=False)
status = db.Column(db.String(50), default='pending') # pending, processing, completed, error
def to_dict(self):
return {
'id': self.id,
'filename': self.filename,
'upload_date': self.upload_date.strftime('%Y-%m-%d %H:%M:%S'),
'commodity_name': self.commodity_name,
'status': self.status
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Some files were not shown because too many files have changed in this diff Show More