feat: adjust login page
This commit is contained in:
parent
cf7091f83d
commit
7733fe7dd6
|
@ -1,10 +1,15 @@
|
||||||
from flask import jsonify, request, current_app
|
from flask import jsonify, request, current_app
|
||||||
from pydantic import ValidationError
|
from pydantic import ValidationError
|
||||||
|
from models.login.login_response import UserResponseModel
|
||||||
from schemas.basic_response_schema import ResponseSchema
|
from schemas.basic_response_schema import ResponseSchema
|
||||||
from schemas.google_login_schema import GoogleLoginSchema
|
from schemas.google_login_schema import GoogleLoginSchema
|
||||||
from schemas import LoginSchema
|
from schemas import LoginSchema
|
||||||
from services import UserService, AuthService
|
from services import UserService, AuthService
|
||||||
from exception import AuthException
|
from exception import AuthException
|
||||||
|
from mapper import UserMapper
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logging = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class AuthController:
|
class AuthController:
|
||||||
|
@ -20,7 +25,8 @@ class AuthController:
|
||||||
return (
|
return (
|
||||||
jsonify(
|
jsonify(
|
||||||
ResponseSchema(
|
ResponseSchema(
|
||||||
message="Register success", data=response
|
message="Register success",
|
||||||
|
data=UserMapper.user_entity_to_response(response),
|
||||||
).model_dump()
|
).model_dump()
|
||||||
),
|
),
|
||||||
200,
|
200,
|
||||||
|
@ -34,7 +40,7 @@ class AuthController:
|
||||||
current_app.logger.error(
|
current_app.logger.error(
|
||||||
f"Error during Google login: {str(e)}", exc_info=True
|
f"Error during Google login: {str(e)}", exc_info=True
|
||||||
)
|
)
|
||||||
response = ResponseSchema(
|
response = ResponseSchema(
|
||||||
message="Internal server error", data=None, meta=None
|
message="Internal server error", data=None, meta=None
|
||||||
)
|
)
|
||||||
return jsonify(response.model_dump()), 500
|
return jsonify(response.model_dump()), 500
|
||||||
|
@ -59,7 +65,7 @@ class AuthController:
|
||||||
|
|
||||||
response = ResponseSchema(
|
response = ResponseSchema(
|
||||||
message="Login successful",
|
message="Login successful",
|
||||||
data=user_info,
|
data=UserMapper.user_entity_to_response(user_info),
|
||||||
meta=None,
|
meta=None,
|
||||||
)
|
)
|
||||||
return jsonify(response.model_dump()), 200
|
return jsonify(response.model_dump()), 200
|
||||||
|
|
|
@ -4,10 +4,15 @@ from configs import Config, LoggerConfig
|
||||||
from flask import Flask
|
from flask import Flask
|
||||||
from blueprints import auth_blueprint, user_blueprint
|
from blueprints import auth_blueprint, user_blueprint
|
||||||
from database import init_db
|
from database import init_db
|
||||||
|
import logging
|
||||||
|
|
||||||
|
|
||||||
def createApp() -> Flask:
|
def createApp() -> Flask:
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s"
|
||||||
|
)
|
||||||
app.config.from_object(Config)
|
app.config.from_object(Config)
|
||||||
LoggerConfig.init_logger(app)
|
LoggerConfig.init_logger(app)
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Dict, Optional
|
from typing import Dict, Optional
|
||||||
from models import UserEntity
|
from models import UserEntity, UserResponseModel
|
||||||
from schemas import RegisterSchema
|
from schemas import RegisterSchema
|
||||||
|
|
||||||
|
|
||||||
|
@ -39,3 +39,18 @@ class UserMapper:
|
||||||
updated_at=datetime.now(),
|
updated_at=datetime.now(),
|
||||||
verification_token=None,
|
verification_token=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def user_entity_to_response(user: UserEntity) -> UserResponseModel:
|
||||||
|
return UserResponseModel(
|
||||||
|
_id=str(user._id) if user._id else None,
|
||||||
|
google_id=user.google_id,
|
||||||
|
email=user.email,
|
||||||
|
name=user.name,
|
||||||
|
birth_date=user.birth_date,
|
||||||
|
pic_url=user.pic_url,
|
||||||
|
phone=user.phone,
|
||||||
|
locale=user.locale,
|
||||||
|
# created_at=user.created_at,
|
||||||
|
# updated_at=user.updated_at,
|
||||||
|
)
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
# app/models/__init__.py
|
# app/models/__init__.py
|
||||||
from .entities import UserEntity
|
from .entities import UserEntity
|
||||||
|
from .login import UserResponseModel
|
||||||
|
|
||||||
__all__ = ["UserEntity", "UserDTO"]
|
__all__ = ["UserEntity", "UserDTO", "UserResponseModel"]
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
from .login_response import UserResponseModel
|
|
@ -0,0 +1,20 @@
|
||||||
|
from pydantic import BaseModel, EmailStr, Field
|
||||||
|
from typing import Optional
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
|
||||||
|
class UserResponseModel(BaseModel):
|
||||||
|
id: Optional[str] = Field(alias="_id")
|
||||||
|
google_id: Optional[str] = None
|
||||||
|
email: EmailStr
|
||||||
|
name: str
|
||||||
|
birth_date: Optional[datetime] = None
|
||||||
|
pic_url: Optional[str] = None
|
||||||
|
phone: Optional[str] = None
|
||||||
|
locale: str
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
allow_population_by_field_name = True
|
||||||
|
json_encoders = {
|
||||||
|
datetime: lambda v: v.isoformat(),
|
||||||
|
}
|
|
@ -37,8 +37,10 @@ class AuthService:
|
||||||
|
|
||||||
def login(self, data: LoginSchema):
|
def login(self, data: LoginSchema):
|
||||||
|
|
||||||
|
current_app.logger.info(f"request data: {data}")
|
||||||
user_data = self.user_repository.get_user_by_email(data.email)
|
user_data = self.user_repository.get_user_by_email(data.email)
|
||||||
|
|
||||||
|
current_app.logger.info(f"user_data: {user_data}")
|
||||||
if user_data == None:
|
if user_data == None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
|
@ -1,53 +0,0 @@
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
class LSTM:
|
|
||||||
def __init__(self, input_dim, hidden_dim):
|
|
||||||
self.input_dim = input_dim
|
|
||||||
self.hidden_dim = hidden_dim
|
|
||||||
self.Wf = np.random.randn(hidden_dim, input_dim + hidden_dim) * 0.01
|
|
||||||
self.Wi = np.random.randn(hidden_dim, input_dim + hidden_dim) * 0.01
|
|
||||||
self.Wc = np.random.randn(hidden_dim, input_dim + hidden_dim) * 0.01
|
|
||||||
self.Wo = np.random.randn(hidden_dim, input_dim + hidden_dim) * 0.01
|
|
||||||
self.bf = np.zeros((hidden_dim, 1))
|
|
||||||
self.bi = np.zeros((hidden_dim, 1))
|
|
||||||
self.bc = np.zeros((hidden_dim, 1))
|
|
||||||
self.bo = np.zeros((hidden_dim, 1))
|
|
||||||
self.h = np.zeros((hidden_dim, 1))
|
|
||||||
self.c = np.zeros((hidden_dim, 1))
|
|
||||||
|
|
||||||
def sigmoid(self, x):
|
|
||||||
return 1 / (1 + np.exp(-x))
|
|
||||||
|
|
||||||
def tanh(self, x):
|
|
||||||
return np.tanh(x)
|
|
||||||
|
|
||||||
def forward(self, x_t):
|
|
||||||
combined = np.vstack((self.h, x_t))
|
|
||||||
f_t = self.sigmoid(np.dot(self.Wf, combined) + self.bf)
|
|
||||||
i_t = self.sigmoid(np.dot(self.Wi, combined) + self.bi)
|
|
||||||
C_tilde_t = self.tanh(np.dot(self.Wc, combined) + self.bc)
|
|
||||||
self.c = f_t * self.c + i_t * C_tilde_t
|
|
||||||
o_t = self.sigmoid(np.dot(self.Wo, combined) + self.bo)
|
|
||||||
self.h = o_t * self.tanh(self.c)
|
|
||||||
return self.h
|
|
||||||
|
|
||||||
def backward(self, x_t, h_t, y_t, learning_rate):
|
|
||||||
# Your backward pass implementation here
|
|
||||||
pass
|
|
||||||
|
|
||||||
def train(self, X, y, num_epochs, learning_rate):
|
|
||||||
for epoch in range(num_epochs):
|
|
||||||
for i in range(len(X)):
|
|
||||||
x_t = X[i]
|
|
||||||
y_t = y[i]
|
|
||||||
|
|
||||||
# Forward pass
|
|
||||||
h_t = self.forward(x_t)
|
|
||||||
|
|
||||||
# Calculate loss and perform backward pass
|
|
||||||
loss = np.mean((h_t - y_t) ** 2) # Example loss
|
|
||||||
self.backward(x_t, h_t, y_t, learning_rate)
|
|
||||||
|
|
||||||
if i % 100 == 0: # Print loss every 100 samples
|
|
||||||
print(f"Epoch {epoch}, Sample {i}, Loss: {loss}")
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
from keras.models import load_model
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
|
||||||
|
class LSTMService:
|
||||||
|
|
||||||
|
def predict(self, input_data, maxlen=50):
|
||||||
|
|
||||||
|
with open("QC/tokenizers.pkl", "rb") as f:
|
||||||
|
tokenizers = pickle.load(f)
|
||||||
|
|
||||||
|
model = load_model("QC/lstm_qg.keras")
|
||||||
|
|
||||||
|
tok_token = tokenizers["token"]
|
||||||
|
tok_ner = tokenizers["ner"]
|
||||||
|
tok_srl = tokenizers["srl"]
|
||||||
|
tok_q = tokenizers["question"]
|
||||||
|
tok_a = tokenizers["answer"]
|
||||||
|
tok_type = tokenizers["type"]
|
||||||
|
|
||||||
|
# Prepare input
|
||||||
|
tokens = input_data["tokens"]
|
||||||
|
ner = input_data["ner"]
|
||||||
|
srl = input_data["srl"]
|
||||||
|
|
||||||
|
x_tok = pad_sequences(
|
||||||
|
[tok_token.texts_to_sequences([tokens])[0]], maxlen=maxlen, padding="post"
|
||||||
|
)
|
||||||
|
x_ner = pad_sequences(
|
||||||
|
[tok_ner.texts_to_sequences([ner])[0]], maxlen=maxlen, padding="post"
|
||||||
|
)
|
||||||
|
x_srl = pad_sequences(
|
||||||
|
[tok_srl.texts_to_sequences([srl])[0]], maxlen=maxlen, padding="post"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Predict
|
||||||
|
pred_q, pred_a, pred_type = model.predict([x_tok, x_ner, x_srl])
|
||||||
|
pred_q_ids = np.argmax(pred_q[0], axis=-1)
|
||||||
|
pred_a_ids = np.argmax(pred_a[0], axis=-1)
|
||||||
|
pred_type_id = np.argmax(pred_type[0])
|
||||||
|
|
||||||
|
# Decode
|
||||||
|
index2word_q = {v: k for k, v in tok_q.word_index.items()}
|
||||||
|
index2word_a = {v: k for k, v in tok_a.word_index.items()}
|
||||||
|
index2word_q[0] = "<PAD>"
|
||||||
|
index2word_a[0] = "<PAD>"
|
||||||
|
|
||||||
|
decoded_q = [index2word_q[i] for i in pred_q_ids if i != 0]
|
||||||
|
decoded_a = [index2word_a[i] for i in pred_a_ids if i != 0]
|
||||||
|
|
||||||
|
index2type = {v - 1: k for k, v in tok_type.word_index.items()}
|
||||||
|
decoded_type = index2type.get(pred_type_id, "unknown")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"question": " ".join(decoded_q),
|
||||||
|
"answer": " ".join(decoded_a),
|
||||||
|
"type": decoded_type,
|
||||||
|
}
|
|
@ -1,7 +1,9 @@
|
||||||
annotated-types==0.7.0
|
annotated-types==0.7.0
|
||||||
Authlib==1.5.1
|
Authlib==1.5.1
|
||||||
bcrypt==4.3.0
|
bcrypt==4.3.0
|
||||||
|
bidict==0.23.1
|
||||||
blinker==1.9.0
|
blinker==1.9.0
|
||||||
|
cachetools==5.5.2
|
||||||
certifi==2025.1.31
|
certifi==2025.1.31
|
||||||
cffi==1.17.1
|
cffi==1.17.1
|
||||||
charset-normalizer==3.4.1
|
charset-normalizer==3.4.1
|
||||||
|
@ -16,23 +18,40 @@ Flask-Bcrypt==1.0.1
|
||||||
Flask-JWT-Extended==4.7.1
|
Flask-JWT-Extended==4.7.1
|
||||||
Flask-Login==0.6.3
|
Flask-Login==0.6.3
|
||||||
Flask-PyMongo==3.0.1
|
Flask-PyMongo==3.0.1
|
||||||
|
Flask-SocketIO==5.5.1
|
||||||
|
flask-swagger-ui==4.11.1
|
||||||
|
google-auth==2.38.0
|
||||||
|
google-auth-httplib2==0.2.0
|
||||||
|
google-auth-oauthlib==1.2.1
|
||||||
|
h11==0.14.0
|
||||||
|
httplib2==0.22.0
|
||||||
idna==3.10
|
idna==3.10
|
||||||
iniconfig==2.0.0
|
iniconfig==2.0.0
|
||||||
itsdangerous==2.2.0
|
itsdangerous==2.2.0
|
||||||
Jinja2==3.1.6
|
Jinja2==3.1.6
|
||||||
MarkupSafe==3.0.2
|
MarkupSafe==3.0.2
|
||||||
numpy==2.1.2
|
numpy==2.1.2
|
||||||
|
oauthlib==3.2.2
|
||||||
packaging==24.2
|
packaging==24.2
|
||||||
pluggy==1.5.0
|
pluggy==1.5.0
|
||||||
|
pyasn1==0.6.1
|
||||||
|
pyasn1_modules==0.4.1
|
||||||
pycparser==2.22
|
pycparser==2.22
|
||||||
pydantic==2.10.6
|
pydantic==2.10.6
|
||||||
pydantic_core==2.27.2
|
pydantic_core==2.27.2
|
||||||
PyJWT==2.10.1
|
PyJWT==2.10.1
|
||||||
pymongo==4.11.2
|
pymongo==4.11.2
|
||||||
|
pyparsing==3.2.1
|
||||||
pytest==8.3.4
|
pytest==8.3.4
|
||||||
python-dotenv==1.0.1
|
python-dotenv==1.0.1
|
||||||
|
python-engineio==4.11.2
|
||||||
|
python-socketio==5.12.1
|
||||||
requests==2.32.3
|
requests==2.32.3
|
||||||
|
requests-oauthlib==2.0.0
|
||||||
|
rsa==4.9
|
||||||
|
simple-websocket==1.1.0
|
||||||
tomli==2.2.1
|
tomli==2.2.1
|
||||||
typing_extensions==4.12.2
|
typing_extensions==4.12.2
|
||||||
urllib3==2.3.0
|
urllib3==2.3.0
|
||||||
Werkzeug==3.1.3
|
Werkzeug==3.1.3
|
||||||
|
wsproto==1.2.0
|
||||||
|
|
Loading…
Reference in New Issue