feat: adjust login page

This commit is contained in:
akhdanre 2025-04-24 21:27:29 +07:00
parent cf7091f83d
commit 7733fe7dd6
10 changed files with 132 additions and 58 deletions

View File

@ -1,10 +1,15 @@
from flask import jsonify, request, current_app
from pydantic import ValidationError
from models.login.login_response import UserResponseModel
from schemas.basic_response_schema import ResponseSchema
from schemas.google_login_schema import GoogleLoginSchema
from schemas import LoginSchema
from services import UserService, AuthService
from exception import AuthException
from mapper import UserMapper
import logging
logging = logging.getLogger(__name__)
class AuthController:
@ -20,7 +25,8 @@ class AuthController:
return (
jsonify(
ResponseSchema(
message="Register success", data=response
message="Register success",
data=UserMapper.user_entity_to_response(response),
).model_dump()
),
200,
@ -34,7 +40,7 @@ class AuthController:
current_app.logger.error(
f"Error during Google login: {str(e)}", exc_info=True
)
response = ResponseSchema(
response = ResponseSchema(
message="Internal server error", data=None, meta=None
)
return jsonify(response.model_dump()), 500
@ -59,7 +65,7 @@ class AuthController:
response = ResponseSchema(
message="Login successful",
data=user_info,
data=UserMapper.user_entity_to_response(user_info),
meta=None,
)
return jsonify(response.model_dump()), 200

View File

@ -4,10 +4,15 @@ from configs import Config, LoggerConfig
from flask import Flask
from blueprints import auth_blueprint, user_blueprint
from database import init_db
import logging
def createApp() -> Flask:
app = Flask(__name__)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s"
)
app.config.from_object(Config)
LoggerConfig.init_logger(app)

View File

@ -1,6 +1,6 @@
from datetime import datetime
from typing import Dict, Optional
from models import UserEntity
from models import UserEntity, UserResponseModel
from schemas import RegisterSchema
@ -39,3 +39,18 @@ class UserMapper:
updated_at=datetime.now(),
verification_token=None,
)
@staticmethod
def user_entity_to_response(user: UserEntity) -> UserResponseModel:
return UserResponseModel(
_id=str(user._id) if user._id else None,
google_id=user.google_id,
email=user.email,
name=user.name,
birth_date=user.birth_date,
pic_url=user.pic_url,
phone=user.phone,
locale=user.locale,
# created_at=user.created_at,
# updated_at=user.updated_at,
)

View File

@ -1,4 +1,5 @@
# app/models/__init__.py
from .entities import UserEntity
from .login import UserResponseModel
__all__ = ["UserEntity", "UserDTO"]
__all__ = ["UserEntity", "UserDTO", "UserResponseModel"]

View File

@ -0,0 +1 @@
from .login_response import UserResponseModel

View File

@ -0,0 +1,20 @@
from pydantic import BaseModel, EmailStr, Field
from typing import Optional
from datetime import datetime
class UserResponseModel(BaseModel):
id: Optional[str] = Field(alias="_id")
google_id: Optional[str] = None
email: EmailStr
name: str
birth_date: Optional[datetime] = None
pic_url: Optional[str] = None
phone: Optional[str] = None
locale: str
class Config:
allow_population_by_field_name = True
json_encoders = {
datetime: lambda v: v.isoformat(),
}

View File

@ -37,8 +37,10 @@ class AuthService:
def login(self, data: LoginSchema):
current_app.logger.info(f"request data: {data}")
user_data = self.user_repository.get_user_by_email(data.email)
current_app.logger.info(f"user_data: {user_data}")
if user_data == None:
return None

View File

@ -1,53 +0,0 @@
import numpy as np
class LSTM:
def __init__(self, input_dim, hidden_dim):
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.Wf = np.random.randn(hidden_dim, input_dim + hidden_dim) * 0.01
self.Wi = np.random.randn(hidden_dim, input_dim + hidden_dim) * 0.01
self.Wc = np.random.randn(hidden_dim, input_dim + hidden_dim) * 0.01
self.Wo = np.random.randn(hidden_dim, input_dim + hidden_dim) * 0.01
self.bf = np.zeros((hidden_dim, 1))
self.bi = np.zeros((hidden_dim, 1))
self.bc = np.zeros((hidden_dim, 1))
self.bo = np.zeros((hidden_dim, 1))
self.h = np.zeros((hidden_dim, 1))
self.c = np.zeros((hidden_dim, 1))
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def tanh(self, x):
return np.tanh(x)
def forward(self, x_t):
combined = np.vstack((self.h, x_t))
f_t = self.sigmoid(np.dot(self.Wf, combined) + self.bf)
i_t = self.sigmoid(np.dot(self.Wi, combined) + self.bi)
C_tilde_t = self.tanh(np.dot(self.Wc, combined) + self.bc)
self.c = f_t * self.c + i_t * C_tilde_t
o_t = self.sigmoid(np.dot(self.Wo, combined) + self.bo)
self.h = o_t * self.tanh(self.c)
return self.h
def backward(self, x_t, h_t, y_t, learning_rate):
# Your backward pass implementation here
pass
def train(self, X, y, num_epochs, learning_rate):
for epoch in range(num_epochs):
for i in range(len(X)):
x_t = X[i]
y_t = y[i]
# Forward pass
h_t = self.forward(x_t)
# Calculate loss and perform backward pass
loss = np.mean((h_t - y_t) ** 2) # Example loss
self.backward(x_t, h_t, y_t, learning_rate)
if i % 100 == 0: # Print loss every 100 samples
print(f"Epoch {epoch}, Sample {i}, Loss: {loss}")

View File

@ -0,0 +1,58 @@
from keras.models import load_model
import pickle
class LSTMService:
def predict(self, input_data, maxlen=50):
with open("QC/tokenizers.pkl", "rb") as f:
tokenizers = pickle.load(f)
model = load_model("QC/lstm_qg.keras")
tok_token = tokenizers["token"]
tok_ner = tokenizers["ner"]
tok_srl = tokenizers["srl"]
tok_q = tokenizers["question"]
tok_a = tokenizers["answer"]
tok_type = tokenizers["type"]
# Prepare input
tokens = input_data["tokens"]
ner = input_data["ner"]
srl = input_data["srl"]
x_tok = pad_sequences(
[tok_token.texts_to_sequences([tokens])[0]], maxlen=maxlen, padding="post"
)
x_ner = pad_sequences(
[tok_ner.texts_to_sequences([ner])[0]], maxlen=maxlen, padding="post"
)
x_srl = pad_sequences(
[tok_srl.texts_to_sequences([srl])[0]], maxlen=maxlen, padding="post"
)
# Predict
pred_q, pred_a, pred_type = model.predict([x_tok, x_ner, x_srl])
pred_q_ids = np.argmax(pred_q[0], axis=-1)
pred_a_ids = np.argmax(pred_a[0], axis=-1)
pred_type_id = np.argmax(pred_type[0])
# Decode
index2word_q = {v: k for k, v in tok_q.word_index.items()}
index2word_a = {v: k for k, v in tok_a.word_index.items()}
index2word_q[0] = "<PAD>"
index2word_a[0] = "<PAD>"
decoded_q = [index2word_q[i] for i in pred_q_ids if i != 0]
decoded_a = [index2word_a[i] for i in pred_a_ids if i != 0]
index2type = {v - 1: k for k, v in tok_type.word_index.items()}
decoded_type = index2type.get(pred_type_id, "unknown")
return {
"question": " ".join(decoded_q),
"answer": " ".join(decoded_a),
"type": decoded_type,
}

View File

@ -1,7 +1,9 @@
annotated-types==0.7.0
Authlib==1.5.1
bcrypt==4.3.0
bidict==0.23.1
blinker==1.9.0
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
@ -16,23 +18,40 @@ Flask-Bcrypt==1.0.1
Flask-JWT-Extended==4.7.1
Flask-Login==0.6.3
Flask-PyMongo==3.0.1
Flask-SocketIO==5.5.1
flask-swagger-ui==4.11.1
google-auth==2.38.0
google-auth-httplib2==0.2.0
google-auth-oauthlib==1.2.1
h11==0.14.0
httplib2==0.22.0
idna==3.10
iniconfig==2.0.0
itsdangerous==2.2.0
Jinja2==3.1.6
MarkupSafe==3.0.2
numpy==2.1.2
oauthlib==3.2.2
packaging==24.2
pluggy==1.5.0
pyasn1==0.6.1
pyasn1_modules==0.4.1
pycparser==2.22
pydantic==2.10.6
pydantic_core==2.27.2
PyJWT==2.10.1
pymongo==4.11.2
pyparsing==3.2.1
pytest==8.3.4
python-dotenv==1.0.1
python-engineio==4.11.2
python-socketio==5.12.1
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.9
simple-websocket==1.1.0
tomli==2.2.1
typing_extensions==4.12.2
urllib3==2.3.0
Werkzeug==3.1.3
wsproto==1.2.0