TIF_E41202334/(Fix)_Telegram_Bot_Voice_Re...

588 lines
31 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "-s-0Go_idJSV",
"outputId": "998d34fd-9d21-4854-da2a-867e14600624"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Collecting python-telegram-bot==13.7\n",
" Downloading python_telegram_bot-13.7-py3-none-any.whl (490 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m490.1/490.1 kB\u001b[0m \u001b[31m3.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from python-telegram-bot==13.7) (2024.2.2)\n",
"Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.10/dist-packages (from python-telegram-bot==13.7) (6.3.3)\n",
"Collecting APScheduler==3.6.3 (from python-telegram-bot==13.7)\n",
" Downloading APScheduler-3.6.3-py2.py3-none-any.whl (58 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.9/58.9 kB\u001b[0m \u001b[31m5.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: pytz>=2018.6 in /usr/local/lib/python3.10/dist-packages (from python-telegram-bot==13.7) (2023.4)\n",
"Collecting cachetools==4.2.2 (from python-telegram-bot==13.7)\n",
" Downloading cachetools-4.2.2-py3-none-any.whl (11 kB)\n",
"Requirement already satisfied: setuptools>=0.7 in /usr/local/lib/python3.10/dist-packages (from APScheduler==3.6.3->python-telegram-bot==13.7) (67.7.2)\n",
"Requirement already satisfied: six>=1.4.0 in /usr/local/lib/python3.10/dist-packages (from APScheduler==3.6.3->python-telegram-bot==13.7) (1.16.0)\n",
"Requirement already satisfied: tzlocal>=1.2 in /usr/local/lib/python3.10/dist-packages (from APScheduler==3.6.3->python-telegram-bot==13.7) (5.2)\n",
"Installing collected packages: cachetools, APScheduler, python-telegram-bot\n",
" Attempting uninstall: cachetools\n",
" Found existing installation: cachetools 5.3.3\n",
" Uninstalling cachetools-5.3.3:\n",
" Successfully uninstalled cachetools-5.3.3\n",
"Successfully installed APScheduler-3.6.3 cachetools-4.2.2 python-telegram-bot-13.7\n"
]
}
],
"source": [
"pip install python-telegram-bot==13.7"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "SnIbxiLxpsWI",
"outputId": "336a0a96-4a81-4ed3-ce0a-8018705c85da"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Collecting openai-whisper\n",
" Downloading openai-whisper-20231117.tar.gz (798 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m798.6/798.6 kB\u001b[0m \u001b[31m5.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
" Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
" Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
"Requirement already satisfied: triton<3,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from openai-whisper) (2.2.0)\n",
"Requirement already satisfied: numba in /usr/local/lib/python3.10/dist-packages (from openai-whisper) (0.58.1)\n",
"Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from openai-whisper) (1.25.2)\n",
"Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (from openai-whisper) (2.2.1+cu121)\n",
"Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from openai-whisper) (4.66.2)\n",
"Requirement already satisfied: more-itertools in /usr/local/lib/python3.10/dist-packages (from openai-whisper) (10.1.0)\n",
"Collecting tiktoken (from openai-whisper)\n",
" Downloading tiktoken-0.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.8 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m22.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from triton<3,>=2.0.0->openai-whisper) (3.13.4)\n",
"Requirement already satisfied: llvmlite<0.42,>=0.41.0dev0 in /usr/local/lib/python3.10/dist-packages (from numba->openai-whisper) (0.41.1)\n",
"Requirement already satisfied: regex>=2022.1.18 in /usr/local/lib/python3.10/dist-packages (from tiktoken->openai-whisper) (2023.12.25)\n",
"Requirement already satisfied: requests>=2.26.0 in /usr/local/lib/python3.10/dist-packages (from tiktoken->openai-whisper) (2.31.0)\n",
"Requirement already satisfied: typing-extensions>=4.8.0 in /usr/local/lib/python3.10/dist-packages (from torch->openai-whisper) (4.11.0)\n",
"Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch->openai-whisper) (1.12)\n",
"Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch->openai-whisper) (3.3)\n",
"Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch->openai-whisper) (3.1.3)\n",
"Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch->openai-whisper) (2023.6.0)\n",
"Collecting nvidia-cuda-nvrtc-cu12==12.1.105 (from torch->openai-whisper)\n",
" Using cached nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (23.7 MB)\n",
"Collecting nvidia-cuda-runtime-cu12==12.1.105 (from torch->openai-whisper)\n",
" Using cached nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (823 kB)\n",
"Collecting nvidia-cuda-cupti-cu12==12.1.105 (from torch->openai-whisper)\n",
" Using cached nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (14.1 MB)\n",
"Collecting nvidia-cudnn-cu12==8.9.2.26 (from torch->openai-whisper)\n",
" Using cached nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl (731.7 MB)\n",
"Collecting nvidia-cublas-cu12==12.1.3.1 (from torch->openai-whisper)\n",
" Using cached nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl (410.6 MB)\n",
"Collecting nvidia-cufft-cu12==11.0.2.54 (from torch->openai-whisper)\n",
" Using cached nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl (121.6 MB)\n",
"Collecting nvidia-curand-cu12==10.3.2.106 (from torch->openai-whisper)\n",
" Using cached nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl (56.5 MB)\n",
"Collecting nvidia-cusolver-cu12==11.4.5.107 (from torch->openai-whisper)\n",
" Using cached nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl (124.2 MB)\n",
"Collecting nvidia-cusparse-cu12==12.1.0.106 (from torch->openai-whisper)\n",
" Using cached nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl (196.0 MB)\n",
"Collecting nvidia-nccl-cu12==2.19.3 (from torch->openai-whisper)\n",
" Using cached nvidia_nccl_cu12-2.19.3-py3-none-manylinux1_x86_64.whl (166.0 MB)\n",
"Collecting nvidia-nvtx-cu12==12.1.105 (from torch->openai-whisper)\n",
" Using cached nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (99 kB)\n",
"Collecting nvidia-nvjitlink-cu12 (from nvidia-cusolver-cu12==11.4.5.107->torch->openai-whisper)\n",
" Using cached nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl (21.1 MB)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken->openai-whisper) (3.3.2)\n",
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken->openai-whisper) (3.7)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken->openai-whisper) (2.0.7)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken->openai-whisper) (2024.2.2)\n",
"Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch->openai-whisper) (2.1.5)\n",
"Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch->openai-whisper) (1.3.0)\n",
"Building wheels for collected packages: openai-whisper\n",
" Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for openai-whisper: filename=openai_whisper-20231117-py3-none-any.whl size=801358 sha256=e35ec844abfadc2b26e9cb01404f2e6b85a1048b6d8afb437958914d59bd983d\n",
" Stored in directory: /root/.cache/pip/wheels/d0/85/e1/9361b4cbea7dd4b7f6702fa4c3afc94877952eeb2b62f45f56\n",
"Successfully built openai-whisper\n",
"Installing collected packages: nvidia-nvtx-cu12, nvidia-nvjitlink-cu12, nvidia-nccl-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, tiktoken, nvidia-cusparse-cu12, nvidia-cudnn-cu12, nvidia-cusolver-cu12, openai-whisper\n",
"Successfully installed nvidia-cublas-cu12-12.1.3.1 nvidia-cuda-cupti-cu12-12.1.105 nvidia-cuda-nvrtc-cu12-12.1.105 nvidia-cuda-runtime-cu12-12.1.105 nvidia-cudnn-cu12-8.9.2.26 nvidia-cufft-cu12-11.0.2.54 nvidia-curand-cu12-10.3.2.106 nvidia-cusolver-cu12-11.4.5.107 nvidia-cusparse-cu12-12.1.0.106 nvidia-nccl-cu12-2.19.3 nvidia-nvjitlink-cu12-12.4.127 nvidia-nvtx-cu12-12.1.105 openai-whisper-20231117 tiktoken-0.6.0\n"
]
}
],
"source": [
"pip install -U openai-whisper"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "FH9pRMGkps08",
"outputId": "b6487d28-bdac-40ff-e71e-b27502c01cdc"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (2.2.1+cu121)\n",
"Collecting torch\n",
" Downloading torch-2.2.2-cp310-cp310-manylinux1_x86_64.whl (755.5 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m755.5/755.5 MB\u001b[0m \u001b[31m1.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch) (3.13.4)\n",
"Requirement already satisfied: typing-extensions>=4.8.0 in /usr/local/lib/python3.10/dist-packages (from torch) (4.11.0)\n",
"Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch) (1.12)\n",
"Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch) (3.3)\n",
"Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch) (3.1.3)\n",
"Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch) (2023.6.0)\n",
"Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.1.105 in /usr/local/lib/python3.10/dist-packages (from torch) (12.1.105)\n",
"Requirement already satisfied: nvidia-cuda-runtime-cu12==12.1.105 in /usr/local/lib/python3.10/dist-packages (from torch) (12.1.105)\n",
"Requirement already satisfied: nvidia-cuda-cupti-cu12==12.1.105 in /usr/local/lib/python3.10/dist-packages (from torch) (12.1.105)\n",
"Requirement already satisfied: nvidia-cudnn-cu12==8.9.2.26 in /usr/local/lib/python3.10/dist-packages (from torch) (8.9.2.26)\n",
"Requirement already satisfied: nvidia-cublas-cu12==12.1.3.1 in /usr/local/lib/python3.10/dist-packages (from torch) (12.1.3.1)\n",
"Requirement already satisfied: nvidia-cufft-cu12==11.0.2.54 in /usr/local/lib/python3.10/dist-packages (from torch) (11.0.2.54)\n",
"Requirement already satisfied: nvidia-curand-cu12==10.3.2.106 in /usr/local/lib/python3.10/dist-packages (from torch) (10.3.2.106)\n",
"Requirement already satisfied: nvidia-cusolver-cu12==11.4.5.107 in /usr/local/lib/python3.10/dist-packages (from torch) (11.4.5.107)\n",
"Requirement already satisfied: nvidia-cusparse-cu12==12.1.0.106 in /usr/local/lib/python3.10/dist-packages (from torch) (12.1.0.106)\n",
"Requirement already satisfied: nvidia-nccl-cu12==2.19.3 in /usr/local/lib/python3.10/dist-packages (from torch) (2.19.3)\n",
"Requirement already satisfied: nvidia-nvtx-cu12==12.1.105 in /usr/local/lib/python3.10/dist-packages (from torch) (12.1.105)\n",
"Requirement already satisfied: triton==2.2.0 in /usr/local/lib/python3.10/dist-packages (from torch) (2.2.0)\n",
"Requirement already satisfied: nvidia-nvjitlink-cu12 in /usr/local/lib/python3.10/dist-packages (from nvidia-cusolver-cu12==11.4.5.107->torch) (12.4.127)\n",
"Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch) (2.1.5)\n",
"Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch) (1.3.0)\n",
"Installing collected packages: torch\n",
" Attempting uninstall: torch\n",
" Found existing installation: torch 2.2.1+cu121\n",
" Uninstalling torch-2.2.1+cu121:\n",
" Successfully uninstalled torch-2.2.1+cu121\n",
"\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
"torchaudio 2.2.1+cu121 requires torch==2.2.1, but you have torch 2.2.2 which is incompatible.\n",
"torchtext 0.17.1 requires torch==2.2.1, but you have torch 2.2.2 which is incompatible.\n",
"torchvision 0.17.1+cu121 requires torch==2.2.1, but you have torch 2.2.2 which is incompatible.\u001b[0m\u001b[31m\n",
"\u001b[0mSuccessfully installed torch-2.2.2\n"
]
}
],
"source": [
"pip install --upgrade torch"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "SCUSLgFAc7Z4"
},
"outputs": [],
"source": [
"import librosa\n",
"import librosa.display\n",
"import pandas as pd\n",
"import os\n",
"import numpy as np\n",
"import torch\n",
"import whisper\n",
"import requests\n",
"import time\n",
"np.random.seed(0)\n",
"from joblib import load\n",
"from scipy.fftpack import dct\n",
"from telegram.ext import Updater, MessageHandler, Filters"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "jzQaNFgedBmu"
},
"outputs": [],
"source": [
"# Load model yang telah dilatih\n",
"model = load('/content/drive/MyDrive/Skripsi/rbf/new_user_linear.pkl')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "sh0-ovkleZS0"
},
"outputs": [],
"source": [
"# Fungsi pre-emphasis\n",
"def pre_emphasis(signal, coefficient=0.97):\n",
" return np.append(signal[0], signal[1:] - coefficient * signal[:-1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "HeISpxGTebIs"
},
"outputs": [],
"source": [
"# Fungsi framing\n",
"def framing(signal, sample_rate, frame_length=0.025, frame_step=0.010):\n",
" nsamples_signal = len(signal)\n",
" nsamples_frame = int(round(frame_length * sample_rate))\n",
" nsamples_stride = int(round(frame_step * sample_rate))\n",
" n_frames = int(np.ceil((nsamples_signal - nsamples_frame) / nsamples_stride) + 1)\n",
" nsamples_padding = ((n_frames - 1) * nsamples_stride + nsamples_frame) - nsamples_signal\n",
" z = np.zeros(nsamples_padding)\n",
" signal = np.append(signal, z)\n",
" frames = np.empty((n_frames, nsamples_frame))\n",
" for i in range(n_frames):\n",
" left = i * nsamples_stride\n",
" right = left + nsamples_frame\n",
" frame = signal[left:right]\n",
" frames[i] = frame\n",
" return frames"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "t24vLFANedWf"
},
"outputs": [],
"source": [
"# Fungsi untuk mengekstrak fitur MFCC dari file audio\n",
"def extract_mfcc(audio_file, sr=44100, frame_length=0.025, frame_step=0.010, nfilt=40, num_ceps=12):\n",
" y, _ = librosa.load(audio_file, sr=sr)\n",
"\n",
" # Normalize\n",
" y_norm = librosa.util.normalize(y)\n",
"\n",
" # Pre-emphasis\n",
" y_pre_emphasis = pre_emphasis(y_norm)\n",
"\n",
" # Framing\n",
" frames = framing(y_pre_emphasis, sr, frame_length, frame_step)\n",
"\n",
" # Hamming window\n",
" frames *= np.hanning(len(frames[0]))\n",
"\n",
" # FFT\n",
" NFFT = 512\n",
" mag_frames = np.absolute(np.fft.rfft(frames, NFFT))\n",
" pow_frames = ((1.0 / NFFT) * ((mag_frames) ** 2))\n",
"\n",
" # Mel filter bank\n",
" low_freq_mel = 0\n",
" high_freq_mel = (2595 * np.log10(1 + (sr / 2) / 700))\n",
" mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2)\n",
" hz_points = (700 * (10**(mel_points / 2595) - 1))\n",
" bin = np.floor((NFFT + 1) * hz_points / sr)\n",
"\n",
" fbank = np.zeros((nfilt, int(np.floor(NFFT / 2 + 1))))\n",
" for m in range(1, nfilt + 1):\n",
" f_m_minus = int(bin[m - 1])\n",
" f_m = int(bin[m])\n",
" f_m_plus = int(bin[m + 1])\n",
" for k in range(f_m_minus, f_m):\n",
" fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])\n",
" for k in range(f_m, f_m_plus):\n",
" fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])\n",
"\n",
" filter_banks = np.dot(pow_frames, fbank.T)\n",
" filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks)\n",
" filter_banks = 20 * np.log10(filter_banks)\n",
"\n",
" # Discrete Cosine Transform (DCT) for MFCC\n",
" mfcc = dct(filter_banks, type=2, axis=1, norm='ortho')[:, 1:(num_ceps + 1)]\n",
"\n",
" return np.mean(mfcc, axis=0)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "3l8j6nW2_gRB"
},
"outputs": [],
"source": [
"def send_command_to_firebase(label):\n",
" # Firebase RTDB path untuk setiap lampu\n",
" paths = {\n",
" \"Depan Menyala\": \"/DEPAN\",\n",
" \"Depan Mati\": \"/DEPAN\",\n",
" \"Samping Menyala\": \"/SAMPING\",\n",
" \"Samping Mati\": \"/SAMPING\",\n",
" \"Tengah Menyala\": \"/TENGAH\",\n",
" \"Tengah Mati\": \"/TENGAH\"\n",
" }\n",
"\n",
" # Firebase value untuk setiap label\n",
" values = {\n",
" \"Depan Menyala\": \"1\",\n",
" \"Depan Mati\": \"0\",\n",
" \"Samping Menyala\": \"1\",\n",
" \"Samping Mati\": \"0\",\n",
" \"Tengah Menyala\": \"1\",\n",
" \"Tengah Mati\": \"0\"\n",
" }\n",
"\n",
" # Mengirim perintah ke Firebase\n",
" if label in paths:\n",
" path = paths[label]\n",
" value = values[label]\n",
" firebase_url = f\"https://smart-home-e64ae-default-rtdb.firebaseio.com/{path}.json?auth=AIzaSyCmquF32M-zZDpW_Hswg9ZvREgikmteJMY\"\n",
" headers = {'Content-type': 'application/json'}\n",
" response = requests.put(firebase_url, headers=headers, json=value)\n",
" if response.status_code == 200:\n",
" print(f\"Firebase update successful for {label}\")\n",
" else:\n",
" print(f\"Firebase update failed for {label}\")\n",
" else:\n",
" print(f\"Label {label} tidak valid\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "-E17U5INqFBS"
},
"outputs": [],
"source": [
"# Load Whisper model\n",
"model_m = whisper.load_model('medium')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "I4zG7F7-d9Zw"
},
"outputs": [],
"source": [
"def voice_message_handler(update, context):\n",
" # Mendapatkan file audio dari pesan suara\n",
" file_id = update.message.voice.file_id\n",
" new_file = context.bot.get_file(file_id)\n",
" file_path = new_file.download()\n",
"\n",
" # Ekstraksi fitur MFCC dari file audio\n",
" mfcc_features = extract_mfcc(file_path)\n",
"\n",
" # Reshape MFCC features for prediction\n",
" X_testing = mfcc_features.reshape(1, -1)\n",
"\n",
" # Predict User for the testing data using the trained model\n",
" start_time = time.time()\n",
" y_pred_user = model.predict(X_testing)\n",
" user_prediction_time = (time.time() - start_time) * 1000\n",
"\n",
" # Speech to text Label\n",
" start_time = time.time()\n",
" translation = model_m.transcribe(file_path, language='id', fp16=False)['text']\n",
" translation_time = (time.time() - start_time) * 1000\n",
"\n",
" # Check if the predicted label is within your expected range of labels\n",
" expected_users = ['HILMI', 'VASYILLA', 'TANTI', 'YUDHA']\n",
" if y_pred_user[0] in expected_users:\n",
" update.message.reply_text(f\"Predicted User: {y_pred_user[0]} (Response Time: {user_prediction_time:.2f} ms)\")\n",
"\n",
" # Jika user dikenali, dilanjutkan dengan memproses suara ke fungsi speech to text dengan whisper\n",
" if 'pan menyala' in translation.lower():\n",
" send_command_to_firebase(\"Depan Menyala\")\n",
" update.message.reply_text(f\"Depan Menyala (Response Time: {translation_time:.2f} ms)\")\n",
" elif 'an manyala' in translation.lower():\n",
" send_command_to_firebase(\"Depan Menyala\")\n",
" update.message.reply_text(f\"Depan Menyala (Response Time: {translation_time:.2f} ms)\")\n",
" elif 'an mati' in translation.lower():\n",
" send_command_to_firebase(\"Depan Mati\")\n",
" update.message.reply_text(f\"Depan Mati (Response Time: {translation_time:.2f} ms)\")\n",
" elif 'samping menyala' in translation.lower():\n",
" send_command_to_firebase(\"Samping Menyala\")\n",
" update.message.reply_text(f\"Samping Menyala (Response Time: {translation_time:.2f} ms)\")\n",
" elif 'ing menyala' in translation.lower():\n",
" send_command_to_firebase(\"Samping Menyala\")\n",
" update.message.reply_text(f\"Samping Menyala (Response Time: {translation_time:.2f} ms)\")\n",
" elif 'samping mati' in translation.lower():\n",
" send_command_to_firebase(\"Samping Mati\")\n",
" update.message.reply_text(f\"Samping Mati (Response Time: {translation_time:.2f} ms)\")\n",
" elif 'ing mati' in translation.lower():\n",
" send_command_to_firebase(\"Samping Mati\")\n",
" update.message.reply_text(f\"Samping Mati (Response Time: {translation_time:.2f} ms)\")\n",
" elif 'tengah menyala' in translation.lower():\n",
" send_command_to_firebase(\"Tengah Menyala\")\n",
" update.message.reply_text(f\"Tengah Menyala (Response Time: {translation_time:.2f} ms)\")\n",
" elif 'ngah, menyala' in translation.lower():\n",
" send_command_to_firebase(\"Tengah Menyala\")\n",
" update.message.reply_text(f\"Tengah Menyala (Response Time: {translation_time:.2f} ms)\")\n",
" elif 'ah, manyalah' in translation.lower():\n",
" send_command_to_firebase(\"Tengah Menyala\")\n",
" elif 'tengah mati' in translation.lower():\n",
" send_command_to_firebase(\"Tengah Mati\")\n",
" update.message.reply_text(f\"Tengah Mati(Response Time: {translation_time:.2f} ms)\")\n",
" else:\n",
" update.message.reply_text(f\"Label tidak dikenali\")\n",
" else:\n",
" update.message.reply_text(\"User tidak dikenali\")\n",
"\n",
" # Hapus file audio yang sudah di-download\n",
" os.remove(file_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"background_save": true,
"base_uri": "https://localhost:8080/"
},
"id": "xrB_fZEWeVn3",
"outputId": "0ba85899-8921-497f-d550-dc2165a2c5d2"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.10/dist-packages/sklearn/base.py:439: UserWarning: X does not have valid feature names, but SVC was fitted with feature names\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Firebase update successful for Depan Menyala\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.10/dist-packages/sklearn/base.py:439: UserWarning: X does not have valid feature names, but SVC was fitted with feature names\n",
" warnings.warn(\n",
"/usr/local/lib/python3.10/dist-packages/sklearn/base.py:439: UserWarning: X does not have valid feature names, but SVC was fitted with feature names\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Firebase update successful for Depan Mati\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.10/dist-packages/sklearn/base.py:439: UserWarning: X does not have valid feature names, but SVC was fitted with feature names\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Firebase update successful for Samping Menyala\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.10/dist-packages/sklearn/base.py:439: UserWarning: X does not have valid feature names, but SVC was fitted with feature names\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Firebase update successful for Samping Mati\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.10/dist-packages/sklearn/base.py:439: UserWarning: X does not have valid feature names, but SVC was fitted with feature names\n",
" warnings.warn(\n",
"/usr/local/lib/python3.10/dist-packages/sklearn/base.py:439: UserWarning: X does not have valid feature names, but SVC was fitted with feature names\n",
" warnings.warn(\n",
"/usr/local/lib/python3.10/dist-packages/sklearn/base.py:439: UserWarning: X does not have valid feature names, but SVC was fitted with feature names\n",
" warnings.warn(\n",
"/usr/local/lib/python3.10/dist-packages/sklearn/base.py:439: UserWarning: X does not have valid feature names, but SVC was fitted with feature names\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Firebase update successful for Tengah Menyala\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.10/dist-packages/sklearn/base.py:439: UserWarning: X does not have valid feature names, but SVC was fitted with feature names\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Firebase update successful for Tengah Mati\n"
]
}
],
"source": [
"updater = Updater(\"6727323206:AAHLV35b-yi3bZUU3LPHYe0p2M_sZWWSUWw\", use_context=True)\n",
"dp = updater.dispatcher\n",
"\n",
"# Menambahkan handler untuk pesan suara\n",
"dp.add_handler(MessageHandler(Filters.voice, voice_message_handler))\n",
"\n",
"# Mulai bot\n",
"updater.start_polling()\n",
"updater.idle()"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"gpuType": "T4",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}