MIF_E31230988/text.py

694 lines
29 KiB
Python

from datetime import datetime
import io
import json
import logging
import re
import threading
import time
import traceback
from urllib.parse import urlparse, urlunparse
import uuid
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from flask import Flask, Response, jsonify, request, send_file
from flask_cors import CORS
import joblib
import matplotlib
import matplotlib.pyplot as plt
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import numpy as np
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import (
Image as RLImage,
Paragraph,
SimpleDocTemplate,
Spacer,
Table,
TableStyle,
)
from deep_translator import GoogleTranslator
from keybert import KeyBERT
from sentence_transformers import SentenceTransformer
from collections import Counter
from scipy.sparse import csr_matrix, hstack
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
matplotlib.use('Agg')
def _to_builtin(obj):
if isinstance(obj, (np.integer,)):
return int(obj)
if isinstance(obj, (np.floating,)):
return float(obj)
if isinstance(obj, (np.ndarray,)):
return obj.tolist()
if isinstance(obj, dict):
return {str(k): _to_builtin(v) for k, v in obj.items()}
if isinstance(obj, (list, tuple)):
return [_to_builtin(v) for v in obj]
return obj
def _json_dumps(obj):
return json.dumps(_to_builtin(obj), ensure_ascii=False)
# Setup Logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
def get_reviews_and_category(shortlink: str, on_progress=None):
chrome_options = Options()
chrome_options.add_argument("--start-maximized")
chrome_options.add_argument("--headless=new")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--disable-blink-features=AutomationControlled")
chrome_options.add_argument("--window-size=1366,768")
chrome_options.add_argument(
"--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0 Safari/537.36"
)
# chrome_options.binary_location = "/usr/bin/google-chrome"
# service = Service("/usr/bin/chromedriver")
# Gunakan ChromeDriverManager untuk OS Windows agar tidak hardcode path linux
service = Service(ChromeDriverManager().install())
driver = webdriver.Chrome(service=service, options=chrome_options)
try:
driver.get(shortlink)
# Wait for redirect off the shortlink and into tokopedia domain
WebDriverWait(driver, 40).until(lambda d: d.current_url != shortlink)
WebDriverWait(driver, 40).until(lambda d: "tokopedia.com" in d.current_url)
time.sleep(2)
original_url = driver.current_url
if not original_url or original_url == shortlink:
raise RuntimeError(f"Redirect did not resolve. current_url='{driver.current_url}'")
parsed_url = urlparse(original_url)
review_path = parsed_url.path.rstrip('/') + "/review"
review_url = urlunparse((parsed_url.scheme, parsed_url.netloc, review_path, "", "", ""))
# Capture product name on the product page (most reliable)
try:
product_name = WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.XPATH, "//h1[@data-testid='lblPDPDetailProductName']"))
).text.strip()
except Exception:
product_name = ""
driver.get(review_url)
if not WebDriverWait(driver, 40).until(lambda d: d.title.strip() != ""):
raise RuntimeError("Halaman review tidak memuat title")
time.sleep(5)
all_reviews = []
all_stars = []
star_counts = {"1": 0, "2": 0, "3": 0, "4": 0, "5": 0}
category = ""
productName = product_name
while True:
try:
try:
review_feed = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, "review-feed")))
except Exception:
raise RuntimeError("Elemen review-feed tidak ditemukan")
# Ensure the reviews container is in view so lazy content loads
try:
driver.execute_script("arguments[0].scrollIntoView({block: 'start'});", review_feed)
time.sleep(1)
except Exception:
pass
# Wait for at least one review to appear with retries
reviews = review_feed.find_elements(By.XPATH, ".//span[@data-testid='lblItemUlasan']")
retries = 3
while not reviews and retries > 0:
try:
driver.execute_script("window.scrollBy(0, 600);")
except Exception:
pass
time.sleep(1)
reviews = review_feed.find_elements(By.XPATH, ".//span[@data-testid='lblItemUlasan']")
retries -= 1
category = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, "//nav[@aria-label='Breadcrumb']//ol/li[2]//a"))
).text.strip()
# productName already captured from product page; keep as-is
if not productName:
# Fallbacks on review page
try:
pn = WebDriverWait(driver, 5).until(
EC.visibility_of_element_located((By.XPATH, "//h1[@data-testid='lblPDPDetailProductName']"))
).text.strip()
if pn:
productName = pn
except Exception:
try:
og = driver.find_element(By.XPATH, "//meta[@property='og:title']").get_attribute("content")
if og:
productName = og.strip()
except Exception:
try:
# As a last resort, use last breadcrumb item text
bc = driver.find_element(By.XPATH, "//nav[@aria-label='Breadcrumb']//ol/li[last()]//a").text
if bc:
productName = bc.strip()
except Exception:
pass
for review in reviews:
try:
parent_container = review.find_element(By.XPATH, "./ancestor::*[self::div or self::li][1]")
more_button = parent_container.find_element(By.XPATH, ".//button[contains(normalize-space(.), 'Selengkapnya')]")
driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", more_button)
driver.execute_script("arguments[0].click();", more_button)
try:
WebDriverWait(driver, 5).until(EC.staleness_of(more_button))
except Exception:
time.sleep(0.5)
except Exception:
pass
review_text = review.text.strip()
if review_text and review_text not in all_reviews:
all_reviews.append(review_text)
stars = 0
try:
try:
container = review.find_element(By.XPATH, "./ancestor::article[1]")
star_container = container.find_element(By.XPATH, ".//div[@data-testid='icnStarRating']")
except Exception:
star_container = review.find_element(By.XPATH, "./ancestor::*[.//div[@data-testid='icnStarRating'] and not(descendant::span[@data-testid='lblItemUlasan'][2])][1]//div[@data-testid='icnStarRating']")
svgs = star_container.find_elements(By.XPATH, ".//*[local-name()='svg' and @fill='var(--YN300, #FFD45F)']")
stars = len(svgs)
except Exception:
pass
if 1 <= stars <= 5:
star_counts[str(stars)] += 1
all_stars.append(stars)
if on_progress:
try:
on_progress(len(all_reviews))
except Exception:
pass
try:
next_button = WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.XPATH, "//button[contains(@aria-label, 'Laman berikutnya')]")
)
)
if next_button.get_attribute("disabled") is not None:
break
driver.execute_script("arguments[0].click();", next_button)
time.sleep(3)
except Exception:
break
except Exception as e:
logging.error(f"Error saat mengambil review: {type(e).__name__}: {e!r}")
break
return all_reviews, category, productName, star_counts, all_stars
finally:
driver.quit()
# Mapping kategori breadcrumb (ID) ke encoding yang Anda sediakan (EN)
category_map = {
"Perawatan Hewan": 0, # Animal Care
"Otomotif": 1, # Automotive
"Kecantikan": 2, # Beauty
"Perawatan Tubuh": 3, # Body Care
"Buku": 4, # Books
"Audio, Kamera & Elektronik Lainnya": 5, # Camera (atau gunakan 20 jika ingin 'Other Products')
"Pertukangan": 6, # Carpentry
"Komputer & Laptop": 7, # Computers and Laptops
"Elektronik": 8, # Electronics
"Makanan & Minuman": 9, # Food and Drink
"Gaming": 10, # Gaming
"Kesehatan": 11, # Health
"Rumah Tangga": 12, # Household
"Fashion Anak & Bayi": 13, # Kids and Baby Fashion
"Dapur": 14, # Kitchen
"Fashion Pria": 15, # Men's Fashion
"Ibu & Bayi": 16, # Mother and Baby
"Film & Musik": 17, # Movies and Music
"Fashion Muslim": 18, # Muslim Fashion
"Office & Stationery": 19, # Office & Stationery
"Lainnya": 20, # Other Products (fallback label jika ada)
"Perlengkapan Pesta": 21, # Party Supplies and Craft
"Handphone & Tablet": 22, # Phones and Tablets
"Logam Mulia": 23, # Precious Metal
"Properti": 24, # Property
"Olahraga": 25, # Sport
"Tiket, Travel, Voucher": 26, # Tour and Travel
"Mainan & Hobi": 27, # Toys and Hobbies
"Fashion Wanita": 28, # Women's Fashion
}
stemmer = StemmerFactory().create_stemmer()
try:
nltk.data.find('corpora/stopwords')
except LookupError:
nltk.download('stopwords')
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
try:
nltk.data.find('sentiment/vader_lexicon.zip')
except LookupError:
nltk.download('vader_lexicon')
stop_words = set(stopwords.words('indonesian'))
def preprocess_text(text):
text = text.lower()
text = re.sub(r'[^a-zA-Z\s]', '', text)
words = word_tokenize(text)
words = [stemmer.stem(word) for word in words if word not in stop_words]
return ' '.join(words)
def summarize_with_keybert(reviews, preds, top_n=10):
from collections import Counter
pos_phrases_all = []
neg_phrases_all = []
for review, pred in zip(reviews, preds):
# split review jadi kalimat
sentences = nltk.sent_tokenize(review)
for sentence in sentences:
keywords = kw_model.extract_keywords(
sentence,
keyphrase_ngram_range=(1,3),
stop_words=None,
top_n=2
)
phrases = [kw for kw, score in keywords]
if pred == 1:
pos_phrases_all.extend(phrases)
else:
neg_phrases_all.extend(phrases)
# hitung frekuensi
pos_counter = Counter(pos_phrases_all)
neg_counter = Counter(neg_phrases_all)
# ambil top
pos_common = [
phrase for phrase, count in pos_counter.most_common(top_n)
]
neg_common = [
phrase for phrase, count in neg_counter.most_common(top_n)
]
# hitung jumlah sentiment (tetap dari review, bukan kalimat)
pos_count = int(sum(preds))
neg_count = int(len(preds) - pos_count)
# RETURN TETAP SAMA FORMAT ANDA
return {
"dominant_sentiment":
"Positif" if pos_count >= neg_count else "Negatif",
"counts": {
"Positif": pos_count,
"Negatif": neg_count,
},
"top_phrases": {
"Positif": pos_common,
"Negatif": neg_common,
}
}
model = joblib.load('naive_bayes_model.pkl')
vectorizer = joblib.load('tfidf_vectorizer.pkl')
kw_model = KeyBERT(
SentenceTransformer("paraphrase-multilingual-MiniLM-L12-v2")
)
app = Flask(__name__)
CORS(app, resources={r"/analyze": {"origins": "*"}, r"/analyze_vader": {"origins": "*"}, r"/stream/*": {"origins": "*"}, r"/progress/*": {"origins": "*"}, r"/download/*": {"origins": "*"}})
# In-memory job progress store
PROGRESS = {}
PROGRESS_LOCK = threading.Lock()
def set_progress(job_id, status=None, percent=None, message=None, data=None):
with PROGRESS_LOCK:
st = PROGRESS.get(job_id, {"status": "pending", "percent": 0, "message": "", "data": None, "ts": None})
if status is not None:
st["status"] = status
if percent is not None:
st["percent"] = percent
if message is not None:
st["message"] = message
if data is not None:
st["data"] = data
st["ts"] = datetime.now().isoformat(timespec='seconds')
PROGRESS[job_id] = st
def _run_analysis_job(job_id, shortlink):
try:
set_progress(job_id, status="running", percent=5, message="Resolving shortlink...")
def _scrape_progress(n):
set_progress(job_id, message=f"Sudah berhasil mengambil {n} data")
reviews, category_name, product_name, star_counts, all_stars = get_reviews_and_category(shortlink, on_progress=_scrape_progress)
set_progress(job_id, percent=30, message=f"Scraped {len(reviews)} reviews; preprocessing...")
texts_preprocessed = [preprocess_text(r) for r in reviews]
if not texts_preprocessed:
set_progress(job_id, status="completed", percent=100, message="No reviews found", data={"category": category_name, "product_name": product_name, "star_counts": star_counts, "items": []})
return
set_progress(job_id, percent=55, message="Vectorizing...")
X_text = vectorizer.transform(texts_preprocessed)
encoded_category = category_map.get(category_name, 20)
X_cat = csr_matrix(np.full((len(reviews), 1), encoded_category))
set_progress(job_id, percent=75, message="Predicting sentiments...")
X_final = hstack([X_text, X_cat])
preds = model.predict(X_final)
probs = None
try:
if hasattr(model, "predict_proba"):
probs = model.predict_proba(X_final)
except Exception:
probs = None
set_progress(job_id, percent=85, message="Meringkas kata dominan per sentimen...")
summary = summarize_with_keybert(reviews, preds)
items = []
for i, (review, p, s) in enumerate(zip(reviews, preds, all_stars), start=1):
label = "Positif" if p == 1 else ("Negatif" if p == 0 else str(p))
items.append({"sentiment": label, "review": review, "stars": s})
# Optional fine-grained progress
set_progress(job_id, percent=75 + int(20 * (i/len(reviews))), message=f"Sudah berhasil mengambil {i} data")
result = {
"category": category_name,
"category_encoded": encoded_category,
"product_name": product_name,
"count": len(items),
"items": items,
"summary": summary,
"star_counts": star_counts,
}
set_progress(job_id, status="completed", percent=100, message="Done", data=result)
except Exception as e:
tb = traceback.format_exc()
set_progress(job_id, status="failed", percent=100, message=str(e) or "Unhandled error", data={"error": str(e), "traceback": tb})
def _run_vader_analysis_job(job_id, shortlink):
try:
set_progress(job_id, status="running", percent=5, message="Resolving shortlink...")
def _scrape_progress(n):
set_progress(job_id, message=f"Sudah berhasil mengambil {n} data")
reviews, category_name, product_name, star_counts, all_stars = get_reviews_and_category(shortlink, on_progress=_scrape_progress)
set_progress(job_id, percent=30, message=f"Scraped {len(reviews)} reviews; analyzing with VADER...")
if not reviews:
set_progress(job_id, status="completed", percent=100, message="No reviews found", data={"category": category_name, "product_name": product_name, "star_counts": star_counts, "items": []})
return
set_progress(job_id, percent=40, message="Menerjemahkan ulasan ke Bahasa Inggris...")
translator = GoogleTranslator(source='id', target='en')
translated_reviews = []
for i, review in enumerate(reviews, 1):
try:
# Bersihkan emoji dan simbol aneh agar tidak membuat API error
clean_review = re.sub(r'[^\w\s.,!?-]', '', review)
tr_text = translator.translate(clean_review[:4999])
if not tr_text or not tr_text.strip():
translated_reviews.append(review)
else:
translated_reviews.append(tr_text)
except Exception as e:
logging.warning(f"Gagal menerjemahkan (fallback ori): {str(e)} -> {review[:30]}...")
translated_reviews.append(review)
# Update progress occasionally
if i % 3 == 0:
set_progress(job_id, percent=40 + int(15 * (i/len(reviews))), message=f"Menerjemahkan {i}/{len(reviews)} ulasan...")
set_progress(job_id, percent=55, message="Memprediksi sentimen dengan VADER...")
sia = SentimentIntensityAnalyzer()
preds = []
for i, (review_ori, review_en) in enumerate(zip(reviews, translated_reviews)):
score = sia.polarity_scores(review_en)
sentiment_label = "Positif" if score['compound'] >= 0 else "Negatif"
logging.info(f"[VADER] Scores: neg={score['neg']}, neu={score['neu']}, pos={score['pos']}, compound={score['compound']} -> {sentiment_label} | EN: {review_en[:60]}...")
# Threshold: >= 0.0 diubah jadi Positif (1) agar cocok dengan return format
if score['compound'] >= 0:
preds.append(1)
else:
preds.append(0)
set_progress(job_id, percent=85, message="Meringkas kata dominan per sentimen...")
summary = summarize_with_keybert(reviews, preds)
items = []
for i, (review, p, s) in enumerate(zip(reviews, preds, all_stars), start=1):
label = "Positif" if p == 1 else "Negatif"
items.append({"sentiment": label, "review": review, "stars": s})
# Optional fine-grained progress
set_progress(job_id, percent=85 + int(15 * (i/len(reviews))), message=f"Sudah berhasil memproses {i} data")
result = {
"category": category_name,
"category_encoded": category_map.get(category_name, 20),
"product_name": product_name,
"count": len(items),
"items": items,
"summary": summary,
"star_counts": star_counts,
}
set_progress(job_id, status="completed", percent=100, message="Done", data=result)
except Exception as e:
tb = traceback.format_exc()
set_progress(job_id, status="failed", percent=100, message=str(e) or "Unhandled error", data={"error": str(e), "traceback": tb})
@app.post('/analyze')
def analyze():
data = request.get_json(silent=True) or {}
shortlink = data.get('shortlink')
if not shortlink:
return jsonify({"error": "shortlink is required"}), 400
job_id = uuid.uuid4().hex
set_progress(job_id, status="queued", percent=0, message="Job queued")
t = threading.Thread(target=_run_analysis_job, args=(job_id, shortlink), daemon=True)
t.start()
base = request.host_url.rstrip('/')
return jsonify({
"job_id": job_id,
"progress_url": f"{base}/progress/{job_id}",
"stream_url": f"{base}/stream/{job_id}"
}), 202
@app.post('/analyze_vader')
def analyze_vader():
data = request.get_json(silent=True) or {}
shortlink = data.get('shortlink')
if not shortlink:
return jsonify({"error": "shortlink is required"}), 400
job_id = uuid.uuid4().hex
set_progress(job_id, status="queued", percent=0, message="Job queued")
t = threading.Thread(target=_run_vader_analysis_job, args=(job_id, shortlink), daemon=True)
t.start()
base = request.host_url.rstrip('/')
return jsonify({
"job_id": job_id,
"progress_url": f"{base}/progress/{job_id}",
"stream_url": f"{base}/stream/{job_id}"
}), 202
@app.get('/progress/<job_id>')
def progress(job_id):
with PROGRESS_LOCK:
st = PROGRESS.get(job_id)
if not st:
return jsonify({"error": "unknown job_id"}), 404
payload = {"status": st.get("status"), "percent": st.get("percent"), "message": st.get("message"), "ts": st.get("ts")}
if st.get("status") in ("completed", "failed") and st.get("data") is not None:
payload["result"] = st["data"]
return jsonify(_to_builtin(payload))
@app.get('/stream/<job_id>')
def stream(job_id):
def gen():
last_payload = None
while True:
with PROGRESS_LOCK:
st = PROGRESS.get(job_id)
if not st:
yield f"data: {_json_dumps({'error': 'unknown job_id'})}\n\n"
break
payload = {"status": st.get("status"), "percent": st.get("percent"), "message": st.get("message"), "ts": st.get("ts")}
if payload != last_payload:
yield f"data: {_json_dumps(payload)}\n\n"
last_payload = payload
if st.get("status") in ("completed", "failed"):
# send final data when completed
if st.get("data") is not None:
yield f"data: {_json_dumps({'status': st['status'], 'percent': st['percent'], 'message': st['message'], 'ts': st.get('ts'), 'result': st['data']})}\n\n"
break
time.sleep(0.1)
return Response(
gen(),
mimetype='text/event-stream',
headers={
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Access-Control-Allow-Origin': '*'
}
)
@app.get('/download/<job_id>/pdf')
def download_pdf(job_id):
with PROGRESS_LOCK:
st = PROGRESS.get(job_id)
if not st:
return jsonify({"error": "unknown job_id"}), 404
if st.get("status") != "completed" or not st.get("data"):
return jsonify({"error": "job not completed"}), 400
data = st["data"]
items = data.get("items", [])
summary = data.get("summary") or {}
pos = sum(1 for it in items if it.get("sentiment") == "Positif")
neg = sum(1 for it in items if it.get("sentiment") == "Negatif")
chart_bytes = io.BytesIO()
labels = ["Positif", "Negatif"]
sizes = [pos, neg]
chart_colors = ['#34D399', '#F87171']
explode = (0.04, 0.04) if pos and neg else (0, 0)
fig, ax = plt.subplots(figsize=(4.5, 4.5))
ax.pie(sizes, explode=explode, labels=labels, colors=chart_colors, autopct='%1.1f%%', startangle=140)
ax.axis('equal')
plt.tight_layout()
fig.savefig(chart_bytes, format='png', dpi=150, bbox_inches='tight')
plt.close(fig)
chart_bytes.seek(0)
pdf_buf = io.BytesIO()
doc = SimpleDocTemplate(pdf_buf, pagesize=A4)
styles = getSampleStyleSheet()
elements = []
title = f"Hasil Analisis Sentimen"
elements.append(Paragraph(title, styles['Title']))
elements.append(Spacer(1, 12))
meta_data = [
["Category", data.get("category", "")],
["Product Name", data.get("product_name", "")],
["Total Reviews", str(data.get("count", 0))],
["Generated At", st.get("ts", "")],
["Positif", str(pos)],
["Negatif", str(neg)],
]
meta_table = Table(meta_data, hAlign='LEFT')
meta_table.setStyle(TableStyle([
('BACKGROUND', (0,0), (0,-1), colors.whitesmoke),
('TEXTCOLOR', (0,0), (0,-1), colors.black),
('GRID', (0,0), (-1,-1), 0.25, colors.grey),
('VALIGN', (0,0), (-1,-1), 'TOP'),
('FONTNAME', (0,0), (-1,-1), 'Helvetica'),
('FONTSIZE', (0,0), (-1,-1), 9),
('BACKGROUND', (0,0), (-1,0), colors.whitesmoke),
]))
elements.append(meta_table)
elements.append(Spacer(1, 12))
star_counts = data.get("star_counts", {})
if star_counts:
elements.append(Paragraph("Distribusi Bintang", styles['Heading3']))
star_data = [
["Bintang 5", str(star_counts.get("5", 0))],
["Bintang 4", str(star_counts.get("4", 0))],
["Bintang 3", str(star_counts.get("3", 0))],
["Bintang 2", str(star_counts.get("2", 0))],
["Bintang 1", str(star_counts.get("1", 0))],
]
star_table = Table(star_data, hAlign='LEFT')
star_table.setStyle(TableStyle([
('GRID', (0,0), (-1,-1), 0.25, colors.grey),
('BACKGROUND', (0,0), (0,-1), colors.whitesmoke),
('VALIGN', (0,0), (-1,-1), 'TOP'),
('FONTNAME', (0,0), (-1,-1), 'Helvetica'),
('FONTSIZE', (0,0), (-1,-1), 9),
]))
elements.append(star_table)
elements.append(Spacer(1, 12))
top_ph = summary.get("top_phrases") or {}
pos_ph = top_ph.get("Positif") or []
neg_ph = top_ph.get("Negatif") or []
if pos_ph or neg_ph:
elements.append(Paragraph("Ringkasan frasa dominan", styles['Heading3']))
ph_rows = [[Paragraph("Positif (top)", styles['BodyText']), Paragraph("Negatif (top)", styles['BodyText'])]]
max_rows = max(len(pos_ph), len(neg_ph), 1)
for i in range(max_rows):
p = pos_ph[i] if i < len(pos_ph) else ""
n = neg_ph[i] if i < len(neg_ph) else ""
ph_rows.append([Paragraph(p, styles['BodyText']), Paragraph(n, styles['BodyText'])])
ph_table = Table(ph_rows, repeatRows=1, hAlign='LEFT', colWidths=[240, 240])
ph_table.setStyle(TableStyle([
('GRID', (0,0), (-1,-1), 0.25, colors.grey),
('BACKGROUND', (0,0), (-1,0), colors.whitesmoke),
('VALIGN', (0,0), (-1,-1), 'TOP'),
('FONTNAME', (0,0), (-1,-1), 'Helvetica'),
('FONTSIZE', (0,0), (-1,-1), 9),
]))
elements.append(ph_table)
elements.append(Spacer(1, 12))
rl_img = RLImage(chart_bytes, width=250, height=250)
elements.append(rl_img)
elements.append(Spacer(1, 12))
data_rows = [[Paragraph("sentiment", styles['BodyText']), Paragraph("rating", styles['BodyText']), Paragraph("review", styles['BodyText'])]]
for it in items:
s = it.get("sentiment", "")
rating = str(it.get("stars", 0))
r = it.get("review", "")
data_rows.append([Paragraph(s, styles['BodyText']), Paragraph(rating, styles['BodyText']), Paragraph(r, styles['BodyText'])])
table = Table(data_rows, repeatRows=1, hAlign='LEFT', colWidths=[60, 40, 380])
table.setStyle(TableStyle([
('GRID', (0,0), (-1,-1), 0.25, colors.grey),
('BACKGROUND', (0,0), (-1,0), colors.whitesmoke),
('VALIGN', (0,0), (-1,-1), 'TOP'),
('FONTNAME', (0,0), (-1,-1), 'Helvetica'),
('FONTSIZE', (0,0), (-1,-1), 9),
]))
elements.append(table)
doc.build(elements)
pdf_buf.seek(0)
filename = f"sentiment_{job_id}.pdf"
return send_file(pdf_buf, as_attachment=True, download_name=filename, mimetype='application/pdf')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=False)