commit 36fe9f603eadaa90701231d3458bd5430ee000c9 Author: aime.rolandi Date: Tue Jun 17 08:48:55 2025 -0300 minimo reconocimiento de voz diff --git a/minimal_server/RealtimeSTT/__init__.py b/minimal_server/RealtimeSTT/__init__.py new file mode 100644 index 00000000..e6179cc3 --- /dev/null +++ b/minimal_server/RealtimeSTT/__init__.py @@ -0,0 +1,3 @@ +from .audio_recorder import AudioToTextRecorder +from .audio_recorder_client import AudioToTextRecorderClient +from .audio_input import AudioInput \ No newline at end of file diff --git a/minimal_server/RealtimeSTT/__pycache__/__init__.cpython-310.pyc b/minimal_server/RealtimeSTT/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 00000000..25fc887a Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/__init__.cpython-310.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/__init__.cpython-311.pyc b/minimal_server/RealtimeSTT/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 00000000..7f197ce3 Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/__init__.cpython-311.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/__init__.cpython-312.pyc b/minimal_server/RealtimeSTT/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..d6c7ea0a Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/__init__.cpython-312.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/__init__.cpython-313.pyc b/minimal_server/RealtimeSTT/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 00000000..68a4b1eb Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/__init__.cpython-313.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/audio_input.cpython-310.pyc b/minimal_server/RealtimeSTT/__pycache__/audio_input.cpython-310.pyc new file mode 100644 index 00000000..2348d083 Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/audio_input.cpython-310.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/audio_input.cpython-311.pyc b/minimal_server/RealtimeSTT/__pycache__/audio_input.cpython-311.pyc new file mode 100644 index 00000000..16ea91fd Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/audio_input.cpython-311.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/audio_input.cpython-313.pyc b/minimal_server/RealtimeSTT/__pycache__/audio_input.cpython-313.pyc new file mode 100644 index 00000000..544a8687 Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/audio_input.cpython-313.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/audio_recorder.cpython-310.pyc b/minimal_server/RealtimeSTT/__pycache__/audio_recorder.cpython-310.pyc new file mode 100644 index 00000000..15089ecc Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/audio_recorder.cpython-310.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/audio_recorder.cpython-311.pyc b/minimal_server/RealtimeSTT/__pycache__/audio_recorder.cpython-311.pyc new file mode 100644 index 00000000..68d808af Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/audio_recorder.cpython-311.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/audio_recorder.cpython-312.pyc b/minimal_server/RealtimeSTT/__pycache__/audio_recorder.cpython-312.pyc new file mode 100644 index 00000000..7cedcb8f Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/audio_recorder.cpython-312.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/audio_recorder.cpython-313.pyc b/minimal_server/RealtimeSTT/__pycache__/audio_recorder.cpython-313.pyc new file mode 100644 index 00000000..38996aab Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/audio_recorder.cpython-313.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/audio_recorder_client.cpython-310.pyc b/minimal_server/RealtimeSTT/__pycache__/audio_recorder_client.cpython-310.pyc new file mode 100644 index 00000000..a1506f35 Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/audio_recorder_client.cpython-310.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/audio_recorder_client.cpython-311.pyc b/minimal_server/RealtimeSTT/__pycache__/audio_recorder_client.cpython-311.pyc new file mode 100644 index 00000000..673e2ca4 Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/audio_recorder_client.cpython-311.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/audio_recorder_client.cpython-313.pyc b/minimal_server/RealtimeSTT/__pycache__/audio_recorder_client.cpython-313.pyc new file mode 100644 index 00000000..f4d27f73 Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/audio_recorder_client.cpython-313.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/safepipe.cpython-310.pyc b/minimal_server/RealtimeSTT/__pycache__/safepipe.cpython-310.pyc new file mode 100644 index 00000000..3afb4fb8 Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/safepipe.cpython-310.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/safepipe.cpython-311.pyc b/minimal_server/RealtimeSTT/__pycache__/safepipe.cpython-311.pyc new file mode 100644 index 00000000..b04674e4 Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/safepipe.cpython-311.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/safepipe.cpython-313.pyc b/minimal_server/RealtimeSTT/__pycache__/safepipe.cpython-313.pyc new file mode 100644 index 00000000..f5f5a64a Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/safepipe.cpython-313.pyc differ diff --git a/minimal_server/RealtimeSTT/__pycache__/server.cpython-311.pyc b/minimal_server/RealtimeSTT/__pycache__/server.cpython-311.pyc new file mode 100644 index 00000000..e95b5342 Binary files /dev/null and b/minimal_server/RealtimeSTT/__pycache__/server.cpython-311.pyc differ diff --git a/minimal_server/RealtimeSTT/audio_input.py b/minimal_server/RealtimeSTT/audio_input.py new file mode 100644 index 00000000..6fc68c2f --- /dev/null +++ b/minimal_server/RealtimeSTT/audio_input.py @@ -0,0 +1,220 @@ +from colorama import init, Fore, Style +from scipy.signal import butter, filtfilt, resample_poly +import pyaudio +import logging + +DESIRED_RATE = 16000 +CHUNK_SIZE = 1024 +AUDIO_FORMAT = pyaudio.paInt16 +CHANNELS = 1 + +class AudioInput: + def __init__( + self, + input_device_index: int = None, + debug_mode: bool = False, + target_samplerate: int = DESIRED_RATE, + chunk_size: int = CHUNK_SIZE, + audio_format: int = AUDIO_FORMAT, + channels: int = CHANNELS, + resample_to_target: bool = True, + ): + + self.input_device_index = input_device_index + self.debug_mode = debug_mode + self.audio_interface = None + self.stream = None + self.device_sample_rate = None + self.target_samplerate = target_samplerate + self.chunk_size = chunk_size + self.audio_format = audio_format + self.channels = channels + self.resample_to_target = resample_to_target + + def get_supported_sample_rates(self, device_index): + """Test which standard sample rates are supported by the specified device.""" + standard_rates = [8000, 9600, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000] + supported_rates = [] + + device_info = self.audio_interface.get_device_info_by_index(device_index) + max_channels = device_info.get('maxInputChannels') # Changed from maxOutputChannels + + for rate in standard_rates: + try: + if self.audio_interface.is_format_supported( + rate, + input_device=device_index, # Changed to input_device + input_channels=max_channels, # Changed to input_channels + input_format=self.audio_format, # Changed to input_format + ): + supported_rates.append(rate) + except: + continue + return supported_rates + + def _get_best_sample_rate(self, actual_device_index, desired_rate): + """Determines the best available sample rate for the device.""" + try: + device_info = self.audio_interface.get_device_info_by_index(actual_device_index) + supported_rates = self.get_supported_sample_rates(actual_device_index) + + if desired_rate in supported_rates: + return desired_rate + + return max(supported_rates) + + # lower_rates = [r for r in supported_rates if r <= desired_rate] + # if lower_rates: + # return max(lower_rates) + + # higher_rates = [r for r in supported_rates if r > desired_rate] + # if higher_rates: + # return min(higher_rates) + + return int(device_info.get('defaultSampleRate', 44100)) + + except Exception as e: + logging.warning(f"Error determining sample rate: {e}") + return 44100 # Safe fallback + + def list_devices(self): + """List all available audio input devices with supported sample rates.""" + try: + init() # Initialize colorama + self.audio_interface = pyaudio.PyAudio() + device_count = self.audio_interface.get_device_count() + + print(f"Available audio input devices:") + #print(f"{Fore.LIGHTBLUE_EX}Available audio input devices:{Style.RESET_ALL}") + for i in range(device_count): + device_info = self.audio_interface.get_device_info_by_index(i) + device_name = device_info.get('name') + max_input_channels = device_info.get('maxInputChannels', 0) + + if max_input_channels > 0: # Only consider devices with input capabilities + supported_rates = self.get_supported_sample_rates(i) + print(f"{Fore.LIGHTGREEN_EX}Device {Style.RESET_ALL}{i}{Fore.LIGHTGREEN_EX}: {device_name}{Style.RESET_ALL}") + + # Format each rate in cyan + if supported_rates: + rates_formatted = ", ".join([f"{Fore.CYAN}{rate}{Style.RESET_ALL}" for rate in supported_rates]) + print(f" {Fore.YELLOW}Supported sample rates: {rates_formatted}{Style.RESET_ALL}") + else: + print(f" {Fore.YELLOW}Supported sample rates: None{Style.RESET_ALL}") + + except Exception as e: + print(f"Error listing devices: {e}") + finally: + if self.audio_interface: + self.audio_interface.terminate() + + def setup(self): + """Initialize audio interface and open stream""" + try: + self.audio_interface = pyaudio.PyAudio() + + if self.debug_mode: + print(f"Input device index: {self.input_device_index}") + actual_device_index = (self.input_device_index if self.input_device_index is not None + else self.audio_interface.get_default_input_device_info()['index']) + + if self.debug_mode: + print(f"Actual selected device index: {actual_device_index}") + self.input_device_index = actual_device_index + self.device_sample_rate = self._get_best_sample_rate(actual_device_index, self.target_samplerate) + + if self.debug_mode: + print(f"Setting up audio on device {self.input_device_index} with sample rate {self.device_sample_rate}") + + try: + self.stream = self.audio_interface.open( + format=self.audio_format, + channels=self.channels, + rate=self.device_sample_rate, + input=True, + frames_per_buffer=self.chunk_size, + input_device_index=self.input_device_index, + ) + if self.debug_mode: + print(f"Audio recording initialized successfully at {self.device_sample_rate} Hz") + return True + except Exception as e: + print(f"Failed to initialize audio stream at {self.device_sample_rate} Hz: {e}") + return False + + except Exception as e: + print(f"Error initializing audio recording: {e}") + if self.audio_interface: + self.audio_interface.terminate() + return False + + def lowpass_filter(self, signal, cutoff_freq, sample_rate): + """ + Apply a low-pass Butterworth filter to prevent aliasing in the signal. + + Args: + signal (np.ndarray): Input audio signal to filter + cutoff_freq (float): Cutoff frequency in Hz + sample_rate (float): Sampling rate of the input signal in Hz + + Returns: + np.ndarray: Filtered audio signal + + Notes: + - Uses a 5th order Butterworth filter + - Applies zero-phase filtering using filtfilt + """ + # Calculate the Nyquist frequency (half the sample rate) + nyquist_rate = sample_rate / 2.0 + + # Normalize cutoff frequency to Nyquist rate (required by butter()) + normal_cutoff = cutoff_freq / nyquist_rate + + # Design the Butterworth filter + b, a = butter(5, normal_cutoff, btype='low', analog=False) + + # Apply zero-phase filtering (forward and backward) + filtered_signal = filtfilt(b, a, signal) + return filtered_signal + + def resample_audio(self, pcm_data, target_sample_rate, original_sample_rate): + """ + Filter and resample audio data to a target sample rate. + + Args: + pcm_data (np.ndarray): Input audio data + target_sample_rate (int): Desired output sample rate in Hz + original_sample_rate (int): Original sample rate of input in Hz + + Returns: + np.ndarray: Resampled audio data + + Notes: + - Applies anti-aliasing filter before resampling + - Uses polyphase filtering for high-quality resampling + """ + if target_sample_rate < original_sample_rate: + # Downsampling with low-pass filter + pcm_filtered = self.lowpass_filter(pcm_data, target_sample_rate / 2, original_sample_rate) + resampled = resample_poly(pcm_filtered, target_sample_rate, original_sample_rate) + else: + # Upsampling without low-pass filter + resampled = resample_poly(pcm_data, target_sample_rate, original_sample_rate) + return resampled + + def read_chunk(self): + """Read a chunk of audio data""" + return self.stream.read(self.chunk_size, exception_on_overflow=False) + + def cleanup(self): + """Clean up audio resources""" + try: + if self.stream: + self.stream.stop_stream() + self.stream.close() + self.stream = None + if self.audio_interface: + self.audio_interface.terminate() + self.audio_interface = None + except Exception as e: + print(f"Error cleaning up audio resources: {e}") diff --git a/minimal_server/RealtimeSTT/audio_recorder.py b/minimal_server/RealtimeSTT/audio_recorder.py new file mode 100644 index 00000000..215f1db2 --- /dev/null +++ b/minimal_server/RealtimeSTT/audio_recorder.py @@ -0,0 +1,2850 @@ +""" + +The AudioToTextRecorder class in the provided code facilitates +fast speech-to-text transcription. + +The class employs the faster_whisper library to transcribe the recorded audio +into text using machine learning models, which can be run either on a GPU or +CPU. Voice activity detection (VAD) is built in, meaning the software can +automatically start or stop recording based on the presence or absence of +speech. It integrates wake word detection through the pvporcupine library, +allowing the software to initiate recording when a specific word or phrase +is spoken. The system provides real-time feedback and can be further +customized. + +Features: +- Voice Activity Detection: Automatically starts/stops recording when speech + is detected or when speech ends. +- Wake Word Detection: Starts recording when a specified wake word (or words) + is detected. +- Event Callbacks: Customizable callbacks for when recording starts + or finishes. +- Fast Transcription: Returns the transcribed text from the audio as fast + as possible. + +Author: Kolja Beigel + +""" + +from faster_whisper import WhisperModel, BatchedInferencePipeline +from typing import Iterable, List, Optional, Union +from openwakeword.model import Model +import torch.multiprocessing as mp +from scipy.signal import resample +import signal as system_signal +from ctypes import c_bool +from scipy import signal +from .safepipe import SafePipe +import soundfile as sf +import faster_whisper +import openwakeword +import collections +import numpy as np +import pvporcupine +import traceback +import threading +import webrtcvad +import datetime +import platform +import logging +import struct +import base64 +import queue +import torch +import halo +import time +import copy +import os +import re +import gc + +# Named logger for this module. +logger = logging.getLogger("realtimestt") +logger.propagate = False + +# Set OpenMP runtime duplicate library handling to OK (Use only for development!) +os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' + +INIT_MODEL_TRANSCRIPTION = "tiny" +INIT_MODEL_TRANSCRIPTION_REALTIME = "tiny" +INIT_REALTIME_PROCESSING_PAUSE = 0.2 +INIT_REALTIME_INITIAL_PAUSE = 0.2 +INIT_SILERO_SENSITIVITY = 0.4 +INIT_WEBRTC_SENSITIVITY = 3 +INIT_POST_SPEECH_SILENCE_DURATION = 0.6 +INIT_MIN_LENGTH_OF_RECORDING = 0.5 +INIT_MIN_GAP_BETWEEN_RECORDINGS = 0 +INIT_WAKE_WORDS_SENSITIVITY = 0.6 +INIT_PRE_RECORDING_BUFFER_DURATION = 1.0 +INIT_WAKE_WORD_ACTIVATION_DELAY = 0.0 +INIT_WAKE_WORD_TIMEOUT = 5.0 +INIT_WAKE_WORD_BUFFER_DURATION = 0.1 +ALLOWED_LATENCY_LIMIT = 100 + +TIME_SLEEP = 0.02 +SAMPLE_RATE = 16000 +BUFFER_SIZE = 512 +INT16_MAX_ABS_VALUE = 32768.0 + +INIT_HANDLE_BUFFER_OVERFLOW = False +if platform.system() != 'Darwin': + INIT_HANDLE_BUFFER_OVERFLOW = True + + +class TranscriptionWorker: + def __init__(self, conn, stdout_pipe, model_path, download_root, compute_type, gpu_device_index, device, + ready_event, shutdown_event, interrupt_stop_event, beam_size, initial_prompt, suppress_tokens, + batch_size, faster_whisper_vad_filter, normalize_audio): + self.conn = conn + self.stdout_pipe = stdout_pipe + self.model_path = model_path + self.download_root = download_root + self.compute_type = compute_type + self.gpu_device_index = gpu_device_index + self.device = device + self.ready_event = ready_event + self.shutdown_event = shutdown_event + self.interrupt_stop_event = interrupt_stop_event + self.beam_size = beam_size + self.initial_prompt = initial_prompt + self.suppress_tokens = suppress_tokens + self.batch_size = batch_size + self.faster_whisper_vad_filter = faster_whisper_vad_filter + self.normalize_audio = normalize_audio + self.queue = queue.Queue() + + def custom_print(self, *args, **kwargs): + message = ' '.join(map(str, args)) + try: + self.stdout_pipe.send(message) + except (BrokenPipeError, EOFError, OSError): + pass + + def poll_connection(self): + while not self.shutdown_event.is_set(): + try: + if self.conn.poll(0.01): + data = self.conn.recv() + self.queue.put(data) + else: + time.sleep(TIME_SLEEP) + except OSError as e: + if hasattr(e, "winerror") and e.winerror == 6: + logging.info("Conexión cerrada (WinError 6), saliendo del bucle de polling.") + break + else: + logging.error(f"Error OSError recibiendo datos: {e}", exc_info=True) + time.sleep(TIME_SLEEP) + except Exception as e: + logging.error(f"Error recibiendo datos de la conexión: {e}", exc_info=True) + time.sleep(TIME_SLEEP) + + def run(self): + if __name__ == "__main__": + system_signal.signal(system_signal.SIGINT, system_signal.SIG_IGN) + __builtins__['print'] = self.custom_print + + logging.info(f"Initializing faster_whisper main transcription model {self.model_path}") + + try: + model = faster_whisper.WhisperModel( + model_size_or_path=self.model_path, + device=self.device, + compute_type=self.compute_type, + device_index=self.gpu_device_index, + download_root=self.download_root, + ) + # Create a short dummy audio array, for example 1 second of silence at 16 kHz + if self.batch_size > 0: + model = BatchedInferencePipeline(model=model) + + # Run a warm-up transcription + current_dir = os.path.dirname(os.path.realpath(__file__)) + warmup_audio_path = os.path.join( + current_dir, "warmup_audio.wav" + ) + warmup_audio_data, _ = sf.read(warmup_audio_path, dtype="float32") + segments, info = model.transcribe(warmup_audio_data, language="es", beam_size=1) + model_warmup_transcription = " ".join(segment.text for segment in segments) + except Exception as e: + logging.exception(f"Error initializing main faster_whisper transcription model: {e}") + raise + + self.ready_event.set() + logging.debug("Faster_whisper main speech to text transcription model initialized successfully") + + # Start the polling thread + polling_thread = threading.Thread(target=self.poll_connection) + polling_thread.start() + + try: + while not self.shutdown_event.is_set(): + try: + audio, language, use_prompt = self.queue.get(timeout=0.1) + try: + logging.debug(f"Transcribing audio with language {language}") + start_t = time.time() + + # normalize audio to -0.95 dBFS + if audio is not None and audio .size > 0: + if self.normalize_audio: + peak = np.max(np.abs(audio)) + if peak > 0: + audio = (audio / peak) * 0.95 + else: + logging.error("Received None audio for transcription") + self.conn.send(('error', "Received None audio for transcription")) + continue + + prompt = None + if use_prompt: + prompt = self.initial_prompt if self.initial_prompt else None + + if self.batch_size > 0: + segments, info = model.transcribe( + audio, + # vad_filter=True, + language=language if language else None, + beam_size=self.beam_size, + initial_prompt=prompt, + suppress_tokens=self.suppress_tokens, + batch_size=self.batch_size, + vad_filter=self.faster_whisper_vad_filter + ) + else: + segments, info = model.transcribe( + audio, + # vad_filter=True, + language=language if language else None, + beam_size=self.beam_size, + initial_prompt=prompt, + suppress_tokens=self.suppress_tokens, + vad_filter=self.faster_whisper_vad_filter + ) + elapsed = time.time() - start_t + transcription = " ".join(seg.text for seg in segments).strip() + logging.debug(f"Final text detected with main model: {transcription} in {elapsed:.4f}s") + self.conn.send(('success', (transcription, info))) + except Exception as e: + logging.error(f"General error in transcription: {e}", exc_info=True) + self.conn.send(('error', str(e))) + except queue.Empty: + continue + except KeyboardInterrupt: + self.interrupt_stop_event.set() + logging.debug("Transcription worker process finished due to KeyboardInterrupt") + break + except Exception as e: + logging.error(f"General error in processing queue item: {e}", exc_info=True) + finally: + __builtins__['print'] = print # Restore the original print function + self.conn.close() + self.stdout_pipe.close() + self.shutdown_event.set() # Ensure the polling thread will stop + polling_thread.join() # Wait for the polling thread to finish + + +class bcolors: + OKGREEN = '\033[92m' # Green for active speech detection + WARNING = '\033[93m' # Yellow for silence detection + ENDC = '\033[0m' # Reset to default color + + +class AudioToTextRecorder: + """ + A class responsible for capturing audio from the microphone, detecting + voice activity, and then transcribing the captured audio using the + `faster_whisper` model. + """ + + def __init__(self, + model: str = INIT_MODEL_TRANSCRIPTION, + download_root: str = None, + language: str = "", + compute_type: str = "default", + input_device_index: int = None, + gpu_device_index: Union[int, List[int]] = 0, + device: str = "cuda", + on_recording_start=None, + on_recording_stop=None, + on_transcription_start=None, + ensure_sentence_starting_uppercase=True, + ensure_sentence_ends_with_period=True, + use_microphone=True, + spinner=True, + level=logging.WARNING, + batch_size: int = 16, + + # Realtime transcription parameters + enable_realtime_transcription=False, + use_main_model_for_realtime=False, + realtime_model_type=INIT_MODEL_TRANSCRIPTION_REALTIME, + realtime_processing_pause=INIT_REALTIME_PROCESSING_PAUSE, + init_realtime_after_seconds=INIT_REALTIME_INITIAL_PAUSE, + on_realtime_transcription_update=None, + on_realtime_transcription_stabilized=None, + realtime_batch_size: int = 16, + + # Voice activation parameters + silero_sensitivity: float = INIT_SILERO_SENSITIVITY, + silero_use_onnx: bool = False, + silero_deactivity_detection: bool = False, + webrtc_sensitivity: int = INIT_WEBRTC_SENSITIVITY, + post_speech_silence_duration: float = ( + INIT_POST_SPEECH_SILENCE_DURATION + ), + min_length_of_recording: float = ( + INIT_MIN_LENGTH_OF_RECORDING + ), + min_gap_between_recordings: float = ( + INIT_MIN_GAP_BETWEEN_RECORDINGS + ), + pre_recording_buffer_duration: float = ( + INIT_PRE_RECORDING_BUFFER_DURATION + ), + on_vad_start=None, + on_vad_stop=None, + on_vad_detect_start=None, + on_vad_detect_stop=None, + on_turn_detection_start=None, + on_turn_detection_stop=None, + + # Wake word parameters + wakeword_backend: str = "", + openwakeword_model_paths: str = None, + openwakeword_inference_framework: str = "onnx", + wake_words: str = "", + wake_words_sensitivity: float = INIT_WAKE_WORDS_SENSITIVITY, + wake_word_activation_delay: float = ( + INIT_WAKE_WORD_ACTIVATION_DELAY + ), + wake_word_timeout: float = INIT_WAKE_WORD_TIMEOUT, + wake_word_buffer_duration: float = INIT_WAKE_WORD_BUFFER_DURATION, + on_wakeword_detected=None, + on_wakeword_timeout=None, + on_wakeword_detection_start=None, + on_wakeword_detection_end=None, + on_recorded_chunk=None, + debug_mode=False, + handle_buffer_overflow: bool = INIT_HANDLE_BUFFER_OVERFLOW, + beam_size: int = 5, + beam_size_realtime: int = 3, + buffer_size: int = BUFFER_SIZE, + sample_rate: int = SAMPLE_RATE, + initial_prompt: Optional[Union[str, Iterable[int]]] = None, + initial_prompt_realtime: Optional[Union[str, Iterable[int]]] = None, + suppress_tokens: Optional[List[int]] = [-1], + print_transcription_time: bool = False, + early_transcription_on_silence: int = 0, + allowed_latency_limit: int = ALLOWED_LATENCY_LIMIT, + no_log_file: bool = False, + use_extended_logging: bool = False, + faster_whisper_vad_filter: bool = True, + normalize_audio: bool = False, + start_callback_in_new_thread: bool = False, + ): + """ + Initializes an audio recorder and transcription + and wake word detection. + + Args: + - model (str, default="tiny"): Specifies the size of the transcription + model to use or the path to a converted model directory. + Valid options are 'tiny', 'tiny', 'base', 'base', + 'small', 'small', 'medium', 'medium', 'large-v1', + 'large-v2'. + If a specific size is provided, the model is downloaded + from the Hugging Face Hub. + - download_root (str, default=None): Specifies the root path were the Whisper models + are downloaded to. When empty, the default is used. + - language (str, default=""): Language code for speech-to-text engine. + If not specified, the model will attempt to detect the language + automatically. + - compute_type (str, default="default"): Specifies the type of + computation to be used for transcription. + See https://opennmt.net/CTranslate2/quantization.html. + - input_device_index (int, default=0): The index of the audio input + device to use. + - gpu_device_index (int, default=0): Device ID to use. + The model can also be loaded on multiple GPUs by passing a list of + IDs (e.g. [0, 1, 2, 3]). In that case, multiple transcriptions can + run in parallel when transcribe() is called from multiple Python + threads + - device (str, default="cuda"): Device for model to use. Can either be + "cuda" or "cpu". + - on_recording_start (callable, default=None): Callback function to be + called when recording of audio to be transcripted starts. + - on_recording_stop (callable, default=None): Callback function to be + called when recording of audio to be transcripted stops. + - on_transcription_start (callable, default=None): Callback function + to be called when transcription of audio to text starts. + - ensure_sentence_starting_uppercase (bool, default=True): Ensures + that every sentence detected by the algorithm starts with an + uppercase letter. + - ensure_sentence_ends_with_period (bool, default=True): Ensures that + every sentence that doesn't end with punctuation such as "?", "!" + ends with a period + - use_microphone (bool, default=True): Specifies whether to use the + microphone as the audio input source. If set to False, the + audio input source will be the audio data sent through the + feed_audio() method. + - spinner (bool, default=True): Show spinner animation with current + state. + - level (int, default=logging.WARNING): Logging level. + - batch_size (int, default=16): Batch size for the main transcription + - enable_realtime_transcription (bool, default=False): Enables or + disables real-time transcription of audio. When set to True, the + audio will be transcribed continuously as it is being recorded. + - use_main_model_for_realtime (str, default=False): + If True, use the main transcription model for both regular and + real-time transcription. If False, use a separate model specified + by realtime_model_type for real-time transcription. + Using a single model can save memory and potentially improve + performance, but may not be optimized for real-time processing. + Using separate models allows for a smaller, faster model for + real-time transcription while keeping a more accurate model for + final transcription. + - realtime_model_type (str, default="tiny"): Specifies the machine + learning model to be used for real-time transcription. Valid + options include 'tiny', 'tiny', 'base', 'base', 'small', + 'small', 'medium', 'medium', 'large-v1', 'large-v2'. + - realtime_processing_pause (float, default=0.1): Specifies the time + interval in seconds after a chunk of audio gets transcribed. Lower + values will result in more "real-time" (frequent) transcription + updates but may increase computational load. + - init_realtime_after_seconds (float, default=0.2): Specifies the + initial waiting time after the recording was initiated before + yielding the first realtime transcription + - on_realtime_transcription_update = A callback function that is + triggered whenever there's an update in the real-time + transcription. The function is called with the newly transcribed + text as its argument. + - on_realtime_transcription_stabilized = A callback function that is + triggered when the transcribed text stabilizes in quality. The + stabilized text is generally more accurate but may arrive with a + slight delay compared to the regular real-time updates. + - realtime_batch_size (int, default=16): Batch size for the real-time + transcription model. + - silero_sensitivity (float, default=SILERO_SENSITIVITY): Sensitivity + for the Silero Voice Activity Detection model ranging from 0 + (least sensitive) to 1 (most sensitive). Default is 0.5. + - silero_use_onnx (bool, default=False): Enables usage of the + pre-trained model from Silero in the ONNX (Open Neural Network + Exchange) format instead of the PyTorch format. This is + recommended for faster performance. + - silero_deactivity_detection (bool, default=False): Enables the Silero + model for end-of-speech detection. More robust against background + noise. Utilizes additional GPU resources but improves accuracy in + noisy environments. When False, uses the default WebRTC VAD, + which is more sensitive but may continue recording longer due + to background sounds. + - webrtc_sensitivity (int, default=WEBRTC_SENSITIVITY): Sensitivity + for the WebRTC Voice Activity Detection engine ranging from 0 + (least aggressive / most sensitive) to 3 (most aggressive, + least sensitive). Default is 3. + - post_speech_silence_duration (float, default=0.2): Duration in + seconds of silence that must follow speech before the recording + is considered to be completed. This ensures that any brief + pauses during speech don't prematurely end the recording. + - min_gap_between_recordings (float, default=1.0): Specifies the + minimum time interval in seconds that should exist between the + end of one recording session and the beginning of another to + prevent rapid consecutive recordings. + - min_length_of_recording (float, default=1.0): Specifies the minimum + duration in seconds that a recording session should last to ensure + meaningful audio capture, preventing excessively short or + fragmented recordings. + - pre_recording_buffer_duration (float, default=0.2): Duration in + seconds for the audio buffer to maintain pre-roll audio + (compensates speech activity detection latency) + - on_vad_start (callable, default=None): Callback function to be called + when the system detected the start of voice activity presence. + - on_vad_stop (callable, default=None): Callback function to be called + when the system detected the stop (end) of voice activity presence. + - on_vad_detect_start (callable, default=None): Callback function to + be called when the system listens for voice activity. This is not + called when VAD actually happens (use on_vad_start for this), but + when the system starts listening for it. + - on_vad_detect_stop (callable, default=None): Callback function to be + called when the system stops listening for voice activity. This is + not called when VAD actually stops (use on_vad_stop for this), but + when the system stops listening for it. + - on_turn_detection_start (callable, default=None): Callback function + to be called when the system starts to listen for a turn of speech. + - on_turn_detection_stop (callable, default=None): Callback function to + be called when the system stops listening for a turn of speech. + - wakeword_backend (str, default=""): Specifies the backend library to + use for wake word detection. Supported options include 'pvporcupine' + for using the Porcupine wake word engine or 'oww' for using the + OpenWakeWord engine. + - wakeword_backend (str, default="pvporcupine"): Specifies the backend + library to use for wake word detection. Supported options include + 'pvporcupine' for using the Porcupine wake word engine or 'oww' for + using the OpenWakeWord engine. + - openwakeword_model_paths (str, default=None): Comma-separated paths + to model files for the openwakeword library. These paths point to + custom models that can be used for wake word detection when the + openwakeword library is selected as the wakeword_backend. + - openwakeword_inference_framework (str, default="onnx"): Specifies + the inference framework to use with the openwakeword library. + Can be either 'onnx' for Open Neural Network Exchange format + or 'tflite' for TensorFlow Lite. + - wake_words (str, default=""): Comma-separated string of wake words to + initiate recording when using the 'pvporcupine' wakeword backend. + Supported wake words include: 'alexa', 'americano', 'blueberry', + 'bumblebee', 'computer', 'grapefruits', 'grasshopper', 'hey google', + 'hey siri', 'jarvis', 'ok google', 'picovoice', 'porcupine', + 'terminator'. For the 'openwakeword' backend, wake words are + automatically extracted from the provided model files, so specifying + them here is not necessary. + - wake_words_sensitivity (float, default=0.5): Sensitivity for wake + word detection, ranging from 0 (least sensitive) to 1 (most + sensitive). Default is 0.5. + - wake_word_activation_delay (float, default=0): Duration in seconds + after the start of monitoring before the system switches to wake + word activation if no voice is initially detected. If set to + zero, the system uses wake word activation immediately. + - wake_word_timeout (float, default=5): Duration in seconds after a + wake word is recognized. If no subsequent voice activity is + detected within this window, the system transitions back to an + inactive state, awaiting the next wake word or voice activation. + - wake_word_buffer_duration (float, default=0.1): Duration in seconds + to buffer audio data during wake word detection. This helps in + cutting out the wake word from the recording buffer so it does not + falsely get detected along with the following spoken text, ensuring + cleaner and more accurate transcription start triggers. + Increase this if parts of the wake word get detected as text. + - on_wakeword_detected (callable, default=None): Callback function to + be called when a wake word is detected. + - on_wakeword_timeout (callable, default=None): Callback function to + be called when the system goes back to an inactive state after when + no speech was detected after wake word activation + - on_wakeword_detection_start (callable, default=None): Callback + function to be called when the system starts to listen for wake + words + - on_wakeword_detection_end (callable, default=None): Callback + function to be called when the system stops to listen for + wake words (e.g. because of timeout or wake word detected) + - on_recorded_chunk (callable, default=None): Callback function to be + called when a chunk of audio is recorded. The function is called + with the recorded audio chunk as its argument. + - debug_mode (bool, default=False): If set to True, the system will + print additional debug information to the console. + - handle_buffer_overflow (bool, default=True): If set to True, the system + will log a warning when an input overflow occurs during recording and + remove the data from the buffer. + - beam_size (int, default=5): The beam size to use for beam search + decoding. + - beam_size_realtime (int, default=3): The beam size to use for beam + search decoding in the real-time transcription model. + - buffer_size (int, default=512): The buffer size to use for audio + recording. Changing this may break functionality. + - sample_rate (int, default=16000): The sample rate to use for audio + recording. Changing this will very probably functionality (as the + WebRTC VAD model is very sensitive towards the sample rate). + - initial_prompt (str or iterable of int, default=None): Initial + prompt to be fed to the main transcription model. + - initial_prompt_realtime (str or iterable of int, default=None): + Initial prompt to be fed to the real-time transcription model. + - suppress_tokens (list of int, default=[-1]): Tokens to be suppressed + from the transcription output. + - print_transcription_time (bool, default=False): Logs processing time + of main model transcription + - early_transcription_on_silence (int, default=0): If set, the + system will transcribe audio faster when silence is detected. + Transcription will start after the specified milliseconds, so + keep this value lower than post_speech_silence_duration. + Ideally around post_speech_silence_duration minus the estimated + transcription time with the main model. + If silence lasts longer than post_speech_silence_duration, the + recording is stopped, and the transcription is submitted. If + voice activity resumes within this period, the transcription + is discarded. Results in faster final transcriptions to the cost + of additional GPU load due to some unnecessary final transcriptions. + - allowed_latency_limit (int, default=100): Maximal amount of chunks + that can be unprocessed in queue before discarding chunks. + - no_log_file (bool, default=False): Skips writing of debug log file. + - use_extended_logging (bool, default=False): Writes extensive + log messages for the recording worker, that processes the audio + chunks. + - faster_whisper_vad_filter (bool, default=True): If set to True, + the system will additionally use the VAD filter from the faster_whisper library + for voice activity detection. This filter is more robust against + background noise but requires additional GPU resources. + - normalize_audio (bool, default=False): If set to True, the system will + normalize the audio to a specific range before processing. This can + help improve the quality of the transcription. + - start_callback_in_new_thread (bool, default=False): If set to True, + the callback functions will be executed in a + new thread. This can help improve performance by allowing the + callback to run concurrently with other operations. + + Raises: + Exception: Errors related to initializing transcription + model, wake word detection, or audio recording. + """ + + self.language = language + self.compute_type = compute_type + self.input_device_index = input_device_index + self.gpu_device_index = gpu_device_index + self.device = device + self.wake_words = wake_words + self.wake_word_activation_delay = wake_word_activation_delay + self.wake_word_timeout = wake_word_timeout + self.wake_word_buffer_duration = wake_word_buffer_duration + self.ensure_sentence_starting_uppercase = ( + ensure_sentence_starting_uppercase + ) + self.ensure_sentence_ends_with_period = ( + ensure_sentence_ends_with_period + ) + self.use_microphone = mp.Value(c_bool, use_microphone) + self.min_gap_between_recordings = min_gap_between_recordings + self.min_length_of_recording = min_length_of_recording + self.pre_recording_buffer_duration = pre_recording_buffer_duration + self.post_speech_silence_duration = post_speech_silence_duration + self.on_recording_start = on_recording_start + self.on_recording_stop = on_recording_stop + self.on_wakeword_detected = on_wakeword_detected + self.on_wakeword_timeout = on_wakeword_timeout + self.on_vad_start = on_vad_start + self.on_vad_stop = on_vad_stop + self.on_vad_detect_start = on_vad_detect_start + self.on_vad_detect_stop = on_vad_detect_stop + self.on_turn_detection_start = on_turn_detection_start + self.on_turn_detection_stop = on_turn_detection_stop + self.on_wakeword_detection_start = on_wakeword_detection_start + self.on_wakeword_detection_end = on_wakeword_detection_end + self.on_recorded_chunk = on_recorded_chunk + self.on_transcription_start = on_transcription_start + self.enable_realtime_transcription = enable_realtime_transcription + self.use_main_model_for_realtime = use_main_model_for_realtime + self.main_model_type = model + if not download_root: + download_root = None + self.download_root = download_root + self.realtime_model_type = realtime_model_type + self.realtime_processing_pause = realtime_processing_pause + self.init_realtime_after_seconds = init_realtime_after_seconds + self.on_realtime_transcription_update = ( + on_realtime_transcription_update + ) + self.on_realtime_transcription_stabilized = ( + on_realtime_transcription_stabilized + ) + self.debug_mode = debug_mode + self.handle_buffer_overflow = handle_buffer_overflow + self.beam_size = beam_size + self.beam_size_realtime = beam_size_realtime + self.allowed_latency_limit = allowed_latency_limit + self.batch_size = batch_size + self.realtime_batch_size = realtime_batch_size + + self.level = level + self.audio_queue = mp.Queue() + self.buffer_size = buffer_size + self.sample_rate = sample_rate + self.recording_start_time = 0 + self.recording_stop_time = 0 + self.last_recording_start_time = 0 + self.last_recording_stop_time = 0 + self.wake_word_detect_time = 0 + self.silero_check_time = 0 + self.silero_working = False + self.speech_end_silence_start = 0 + self.silero_sensitivity = silero_sensitivity + self.silero_deactivity_detection = silero_deactivity_detection + self.listen_start = 0 + self.spinner = spinner + self.halo = None + self.state = "inactive" + self.wakeword_detected = False + self.text_storage = [] + self.realtime_stabilized_text = "" + self.realtime_stabilized_safetext = "" + self.is_webrtc_speech_active = False + self.is_silero_speech_active = False + self.recording_thread = None + self.realtime_thread = None + self.audio_interface = None + self.audio = None + self.stream = None + self.start_recording_event = threading.Event() + self.stop_recording_event = threading.Event() + self.backdate_stop_seconds = 0.0 + self.backdate_resume_seconds = 0.0 + self.last_transcription_bytes = None + self.last_transcription_bytes_b64 = None + self.initial_prompt = initial_prompt + self.initial_prompt_realtime = initial_prompt_realtime + self.suppress_tokens = suppress_tokens + self.use_wake_words = wake_words or wakeword_backend in {'oww', 'openwakeword', 'openwakewords'} + self.detected_language = None + self.detected_language_probability = 0 + self.detected_realtime_language = None + self.detected_realtime_language_probability = 0 + self.transcription_lock = threading.Lock() + self.shutdown_lock = threading.Lock() + self.transcribe_count = 0 + self.print_transcription_time = print_transcription_time + self.early_transcription_on_silence = early_transcription_on_silence + self.use_extended_logging = use_extended_logging + self.faster_whisper_vad_filter = faster_whisper_vad_filter + self.normalize_audio = normalize_audio + self.awaiting_speech_end = False + self.start_callback_in_new_thread = start_callback_in_new_thread + + # ---------------------------------------------------------------------------- + # Named logger configuration + # By default, let's set it up so it logs at 'level' to the console. + # If you do NOT want this default configuration, remove the lines below + # and manage your "realtimestt" logger from your application code. + logger.setLevel(logging.DEBUG) # We capture all, then filter via handlers + + log_format = "RealTimeSTT: %(name)s - %(levelname)s - %(message)s" + file_log_format = "%(asctime)s.%(msecs)03d - " + log_format + + # Create and set up console handler + console_handler = logging.StreamHandler() + console_handler.setLevel(self.level) + console_handler.setFormatter(logging.Formatter(log_format)) + + logger.addHandler(console_handler) + + if not no_log_file: + file_handler = logging.FileHandler('realtimesst.log') + file_handler.setLevel(logging.DEBUG) + file_handler.setFormatter(logging.Formatter(file_log_format, datefmt='%Y-%m-%d %H:%M:%S')) + logger.addHandler(file_handler) + # ---------------------------------------------------------------------------- + + self.is_shut_down = False + self.shutdown_event = mp.Event() + + try: + # Only set the start method if it hasn't been set already + if mp.get_start_method(allow_none=True) is None: + mp.set_start_method("spawn") + except RuntimeError as e: + logger.info(f"Start method has already been set. Details: {e}") + + logger.info("Starting RealTimeSTT") + + if use_extended_logging: + logger.info("RealtimeSTT was called with these parameters:") + for param, value in locals().items(): + logger.info(f"{param}: {value}") + + self.interrupt_stop_event = mp.Event() + self.was_interrupted = mp.Event() + self.main_transcription_ready_event = mp.Event() + + self.parent_transcription_pipe, child_transcription_pipe = SafePipe() + self.parent_stdout_pipe, child_stdout_pipe = SafePipe() + + # Set device for model + self.device = "cuda" if self.device == "cuda" and torch.cuda.is_available() else "cpu" + + self.transcript_process = self._start_thread( + target=AudioToTextRecorder._transcription_worker, + args=( + child_transcription_pipe, + child_stdout_pipe, + self.main_model_type, + self.download_root, + self.compute_type, + self.gpu_device_index, + self.device, + self.main_transcription_ready_event, + self.shutdown_event, + self.interrupt_stop_event, + self.beam_size, + self.initial_prompt, + self.suppress_tokens, + self.batch_size, + self.faster_whisper_vad_filter, + self.normalize_audio, + ) + ) + + # Start audio data reading process + if self.use_microphone.value: + logger.info("Initializing audio recording" + " (creating pyAudio input stream," + f" sample rate: {self.sample_rate}" + f" buffer size: {self.buffer_size}" + ) + self.reader_process = self._start_thread( + target=AudioToTextRecorder._audio_data_worker, + args=( + self.audio_queue, + self.sample_rate, + self.buffer_size, + self.input_device_index, + self.shutdown_event, + self.interrupt_stop_event, + self.use_microphone + ) + ) + + # Initialize the realtime transcription model + if self.enable_realtime_transcription and not self.use_main_model_for_realtime: + try: + logger.info("Initializing faster_whisper realtime " + f"transcription model {self.realtime_model_type}, " + f"default device: {self.device}, " + f"compute type: {self.compute_type}, " + f"device index: {self.gpu_device_index}, " + f"download root: {self.download_root}" + ) + self.realtime_model_type = faster_whisper.WhisperModel( + model_size_or_path=self.realtime_model_type, + device=self.device, + compute_type=self.compute_type, + device_index=self.gpu_device_index, + download_root=self.download_root, + ) + if self.realtime_batch_size > 0: + self.realtime_model_type = BatchedInferencePipeline(model=self.realtime_model_type) + + # Run a warm-up transcription + current_dir = os.path.dirname(os.path.realpath(__file__)) + warmup_audio_path = os.path.join( + current_dir, "warmup_audio.wav" + ) + warmup_audio_data, _ = sf.read(warmup_audio_path, dtype="float32") + segments, info = self.realtime_model_type.transcribe(warmup_audio_data, language="es", beam_size=1) + model_warmup_transcription = " ".join(segment.text for segment in segments) + except Exception as e: + logger.exception("Error initializing faster_whisper " + f"realtime transcription model: {e}" + ) + raise + + logger.debug("Faster_whisper realtime speech to text " + "transcription model initialized successfully") + + # Setup wake word detection + if wake_words or wakeword_backend in {'oww', 'openwakeword', 'openwakewords', 'pvp', 'pvporcupine'}: + self.wakeword_backend = wakeword_backend + + self.wake_words_list = [ + word.strip() for word in wake_words.lower().split(',') + ] + self.wake_words_sensitivity = wake_words_sensitivity + self.wake_words_sensitivities = [ + float(wake_words_sensitivity) + for _ in range(len(self.wake_words_list)) + ] + + if wake_words and self.wakeword_backend in {'pvp', 'pvporcupine'}: + + try: + self.porcupine = pvporcupine.create( + keywords=self.wake_words_list, + sensitivities=self.wake_words_sensitivities + ) + self.buffer_size = self.porcupine.frame_length + self.sample_rate = self.porcupine.sample_rate + + except Exception as e: + logger.exception( + "Error initializing porcupine " + f"wake word detection engine: {e}. " + f"Wakewords: {self.wake_words_list}." + ) + raise + + logger.debug( + "Porcupine wake word detection engine initialized successfully" + ) + + elif wake_words and self.wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}: + + openwakeword.utils.download_models() + + try: + if openwakeword_model_paths: + model_paths = openwakeword_model_paths.split(',') + self.owwModel = Model( + wakeword_models=model_paths, + inference_framework=openwakeword_inference_framework + ) + logger.info( + "Successfully loaded wakeword model(s): " + f"{openwakeword_model_paths}" + ) + else: + self.owwModel = Model( + inference_framework=openwakeword_inference_framework) + + self.oww_n_models = len(self.owwModel.models.keys()) + if not self.oww_n_models: + logger.error( + "No wake word models loaded." + ) + + for model_key in self.owwModel.models.keys(): + logger.info( + "Successfully loaded openwakeword model: " + f"{model_key}" + ) + + except Exception as e: + logger.exception( + "Error initializing openwakeword " + f"wake word detection engine: {e}" + ) + raise + + logger.debug( + "Open wake word detection engine initialized successfully" + ) + + else: + logger.exception(f"Wakeword engine {self.wakeword_backend} unknown/unsupported or wake_words not specified. Please specify one of: pvporcupine, openwakeword.") + + + # Setup voice activity detection model WebRTC + try: + logger.info("Initializing WebRTC voice with " + f"Sensitivity {webrtc_sensitivity}" + ) + self.webrtc_vad_model = webrtcvad.Vad() + self.webrtc_vad_model.set_mode(webrtc_sensitivity) + + except Exception as e: + logger.exception("Error initializing WebRTC voice " + f"activity detection engine: {e}" + ) + raise + + logger.debug("WebRTC VAD voice activity detection " + "engine initialized successfully" + ) + + # Setup voice activity detection model Silero VAD + try: + self.silero_vad_model, _ = torch.hub.load( + repo_or_dir="snakers4/silero-vad", + model="silero_vad", + verbose=False, + onnx=silero_use_onnx + ) + + except Exception as e: + logger.exception(f"Error initializing Silero VAD " + f"voice activity detection engine: {e}" + ) + raise + + logger.debug("Silero VAD voice activity detection " + "engine initialized successfully" + ) + + self.audio_buffer = collections.deque( + maxlen=int((self.sample_rate // self.buffer_size) * + self.pre_recording_buffer_duration) + ) + self.last_words_buffer = collections.deque( + maxlen=int((self.sample_rate // self.buffer_size) * + 0.3) + ) + self.frames = [] + self.last_frames = [] + + # Recording control flags + self.is_recording = False + self.is_running = True + self.start_recording_on_voice_activity = False + self.stop_recording_on_voice_deactivity = False + + # Start the recording worker thread + self.recording_thread = threading.Thread(target=self._recording_worker) + self.recording_thread.daemon = True + self.recording_thread.start() + + # Start the realtime transcription worker thread + self.realtime_thread = threading.Thread(target=self._realtime_worker) + self.realtime_thread.daemon = True + self.realtime_thread.start() + + # Wait for transcription models to start + logger.debug('Waiting for main transcription model to start') + self.main_transcription_ready_event.wait() + logger.debug('Main transcription model ready') + + self.stdout_thread = threading.Thread(target=self._read_stdout) + self.stdout_thread.daemon = True + self.stdout_thread.start() + + logger.debug('RealtimeSTT initialization completed successfully') + + def _start_thread(self, target=None, args=()): + """ + Implement a consistent threading model across the library. + + This method is used to start any thread in this library. It uses the + standard threading. Thread for Linux and for all others uses the pytorch + MultiProcessing library 'Process'. + Args: + target (callable object): is the callable object to be invoked by + the run() method. Defaults to None, meaning nothing is called. + args (tuple): is a list or tuple of arguments for the target + invocation. Defaults to (). + """ + if (platform.system() == 'Linux'): + thread = threading.Thread(target=target, args=args) + thread.deamon = True + thread.start() + return thread + else: + thread = mp.Process(target=target, args=args) + thread.start() + return thread + + def _read_stdout(self): + while not self.shutdown_event.is_set(): + try: + if self.parent_stdout_pipe.poll(0.1): + logger.debug("Receive from stdout pipe") + message = self.parent_stdout_pipe.recv() + logger.info(message) + except (BrokenPipeError, EOFError, OSError): + # The pipe probably has been closed, so we ignore the error + pass + except KeyboardInterrupt: # handle manual interruption (Ctrl+C) + logger.info("KeyboardInterrupt in read from stdout detected, exiting...") + break + except Exception as e: + logger.error(f"Unexpected error in read from stdout: {e}", exc_info=True) + logger.error(traceback.format_exc()) # Log the full traceback here + break + time.sleep(0.1) + + def _transcription_worker(*args, **kwargs): + worker = TranscriptionWorker(*args, **kwargs) + worker.run() + + def _run_callback(self, cb, *args, **kwargs): + if self.start_callback_in_new_thread: + # Run the callback in a new thread to avoid blocking the main thread + threading.Thread(target=cb, args=args, kwargs=kwargs, daemon=True).start() + else: + # Run the callback in the main thread to avoid threading issues + cb(*args, **kwargs) + + @staticmethod + def _audio_data_worker( + audio_queue, + target_sample_rate, + buffer_size, + input_device_index, + shutdown_event, + interrupt_stop_event, + use_microphone + ): + """ + Worker method that handles the audio recording process. + + This method runs in a separate process and is responsible for: + - Setting up the audio input stream for recording at the highest possible sample rate. + - Continuously reading audio data from the input stream, resampling if necessary, + preprocessing the data, and placing complete chunks in a queue. + - Handling errors during the recording process. + - Gracefully terminating the recording process when a shutdown event is set. + + Args: + audio_queue (queue.Queue): A queue where recorded audio data is placed. + target_sample_rate (int): The desired sample rate for the output audio (for Silero VAD). + buffer_size (int): The number of samples expected by the Silero VAD model. + input_device_index (int): The index of the audio input device. + shutdown_event (threading.Event): An event that, when set, signals this worker method to terminate. + interrupt_stop_event (threading.Event): An event to signal keyboard interrupt. + use_microphone (multiprocessing.Value): A shared value indicating whether to use the microphone. + + Raises: + Exception: If there is an error while initializing the audio recording. + """ + import pyaudio + import numpy as np + from scipy import signal + + if __name__ == '__main__': + system_signal.signal(system_signal.SIGINT, system_signal.SIG_IGN) + + def get_highest_sample_rate(audio_interface, device_index): + """Get the highest supported sample rate for the specified device.""" + try: + device_info = audio_interface.get_device_info_by_index(device_index) + logger.debug(f"Retrieving highest sample rate for device index {device_index}: {device_info}") + max_rate = int(device_info['defaultSampleRate']) + + if 'supportedSampleRates' in device_info: + supported_rates = [int(rate) for rate in device_info['supportedSampleRates']] + if supported_rates: + max_rate = max(supported_rates) + + logger.debug(f"Highest supported sample rate for device index {device_index} is {max_rate}") + return max_rate + except Exception as e: + logger.warning(f"Failed to get highest sample rate: {e}") + return 48000 # Fallback to a common high sample rate + + def initialize_audio_stream(audio_interface, sample_rate, chunk_size): + nonlocal input_device_index + + def validate_device(device_index): + """Validate that the device exists and is actually available for input.""" + try: + device_info = audio_interface.get_device_info_by_index(device_index) + logger.debug(f"Validating device index {device_index} with info: {device_info}") + if not device_info.get('maxInputChannels', 0) > 0: + logger.debug("Device has no input channels, invalid for recording.") + return False + + # Try to actually read from the device + test_stream = audio_interface.open( + format=pyaudio.paInt16, + channels=1, + rate=target_sample_rate, + input=True, + frames_per_buffer=chunk_size, + input_device_index=device_index, + start=False # Don't start the stream yet + ) + + test_stream.start_stream() + test_data = test_stream.read(chunk_size, exception_on_overflow=False) + test_stream.stop_stream() + test_stream.close() + + if len(test_data) == 0: + logger.debug("Device produced no data, invalid for recording.") + return False + + logger.debug(f"Device index {device_index} successfully validated.") + return True + + except Exception as e: + logger.debug(f"Device validation failed for index {device_index}: {e}") + return False + + """Initialize the audio stream with error handling.""" + while not shutdown_event.is_set(): + try: + # First, get a list of all available input devices + input_devices = [] + device_count = audio_interface.get_device_count() + logger.debug(f"Found {device_count} total audio devices on the system.") + for i in range(device_count): + try: + device_info = audio_interface.get_device_info_by_index(i) + if device_info.get('maxInputChannels', 0) > 0: + input_devices.append(i) + except Exception as e: + logger.debug(f"Could not retrieve info for device index {i}: {e}") + continue + + logger.debug(f"Available input devices with input channels: {input_devices}") + if not input_devices: + raise Exception("No input devices found") + + # If input_device_index is None or invalid, try to find a working device + if input_device_index is None or input_device_index not in input_devices: + # First try the default device + try: + default_device = audio_interface.get_default_input_device_info() + logger.debug(f"Default device info: {default_device}") + if validate_device(default_device['index']): + input_device_index = default_device['index'] + logger.debug(f"Default device {input_device_index} selected.") + except Exception: + # If default device fails, try other available input devices + logger.debug("Default device validation failed, checking other devices...") + for device_index in input_devices: + if validate_device(device_index): + input_device_index = device_index + logger.debug(f"Device {input_device_index} selected.") + break + else: + raise Exception("No working input devices found") + + # Validate the selected device one final time + if not validate_device(input_device_index): + raise Exception("Selected device validation failed") + + # If we get here, we have a validated device + logger.debug(f"Opening stream with device index {input_device_index}, " + f"sample_rate={sample_rate}, chunk_size={chunk_size}") + stream = audio_interface.open( + format=pyaudio.paInt16, + channels=1, + rate=sample_rate, + input=True, + frames_per_buffer=chunk_size, + input_device_index=input_device_index, + ) + + logger.info(f"Microphone connected and validated (device index: {input_device_index}, " + f"sample rate: {sample_rate}, chunk size: {chunk_size})") + return stream + + except Exception as e: + logger.error(f"Microphone connection failed: {e}. Retrying...", exc_info=True) + input_device_index = None + time.sleep(3) # Wait before retrying + continue + + def preprocess_audio(chunk, original_sample_rate, target_sample_rate): + """Preprocess audio chunk similar to feed_audio method.""" + if isinstance(chunk, np.ndarray): + # Handle stereo to mono conversion if necessary + if chunk.ndim == 2: + chunk = np.mean(chunk, axis=1) + + # Resample to target_sample_rate if necessary + if original_sample_rate != target_sample_rate: + logger.debug(f"Resampling from {original_sample_rate} Hz to {target_sample_rate} Hz.") + num_samples = int(len(chunk) * target_sample_rate / original_sample_rate) + chunk = signal.resample(chunk, num_samples) + + chunk = chunk.astype(np.int16) + else: + # If chunk is bytes, convert to numpy array + chunk = np.frombuffer(chunk, dtype=np.int16) + + # Resample if necessary + if original_sample_rate != target_sample_rate: + logger.debug(f"Resampling from {original_sample_rate} Hz to {target_sample_rate} Hz.") + num_samples = int(len(chunk) * target_sample_rate / original_sample_rate) + chunk = signal.resample(chunk, num_samples) + chunk = chunk.astype(np.int16) + + return chunk.tobytes() + + audio_interface = None + stream = None + device_sample_rate = None + chunk_size = 1024 # Increased chunk size for better performance + + def setup_audio(): + nonlocal audio_interface, stream, device_sample_rate, input_device_index + try: + if audio_interface is None: + logger.debug("Creating PyAudio interface...") + audio_interface = pyaudio.PyAudio() + + if input_device_index is None: + try: + default_device = audio_interface.get_default_input_device_info() + input_device_index = default_device['index'] + logger.debug(f"No device index supplied; using default device {input_device_index}") + except OSError as e: + logger.debug(f"Default device retrieval failed: {e}") + input_device_index = None + + # We'll try 16000 Hz first, then the highest rate we detect, then fallback if needed + sample_rates_to_try = [16000] + if input_device_index is not None: + highest_rate = get_highest_sample_rate(audio_interface, input_device_index) + if highest_rate != 16000: + sample_rates_to_try.append(highest_rate) + else: + sample_rates_to_try.append(48000) + + logger.debug(f"Sample rates to try for device {input_device_index}: {sample_rates_to_try}") + + for rate in sample_rates_to_try: + try: + device_sample_rate = rate + logger.debug(f"Attempting to initialize audio stream at {device_sample_rate} Hz.") + stream = initialize_audio_stream(audio_interface, device_sample_rate, chunk_size) + if stream is not None: + logger.debug( + f"Audio recording initialized successfully at {device_sample_rate} Hz, " + f"reading {chunk_size} frames at a time" + ) + return True + except Exception as e: + logger.warning(f"Failed to initialize audio stream at {device_sample_rate} Hz: {e}") + continue + + # If we reach here, none of the sample rates worked + raise Exception("Failed to initialize audio stream with all sample rates.") + + except Exception as e: + logger.exception(f"Error initializing pyaudio audio recording: {e}") + if audio_interface: + audio_interface.terminate() + return False + + logger.debug(f"Starting audio data worker with target_sample_rate={target_sample_rate}, " + f"buffer_size={buffer_size}, input_device_index={input_device_index}") + + if not setup_audio(): + raise Exception("Failed to set up audio recording.") + + buffer = bytearray() + silero_buffer_size = 2 * buffer_size # Silero complains if too short + + time_since_last_buffer_message = 0 + + try: + while not shutdown_event.is_set(): + try: + data = stream.read(chunk_size, exception_on_overflow=False) + + if use_microphone.value: + processed_data = preprocess_audio(data, device_sample_rate, target_sample_rate) + buffer += processed_data + + # Check if the buffer has reached or exceeded the silero_buffer_size + while len(buffer) >= silero_buffer_size: + # Extract silero_buffer_size amount of data from the buffer + to_process = buffer[:silero_buffer_size] + buffer = buffer[silero_buffer_size:] + + # Feed the extracted data to the audio_queue + if time_since_last_buffer_message: + time_passed = time.time() - time_since_last_buffer_message + if time_passed > 1: + logger.debug("_audio_data_worker writing audio data into queue.") + time_since_last_buffer_message = time.time() + else: + time_since_last_buffer_message = time.time() + + audio_queue.put(to_process) + + except OSError as e: + if e.errno == pyaudio.paInputOverflowed: + logger.warning("Input overflowed. Frame dropped.") + else: + logger.error(f"OSError during recording: {e}", exc_info=True) + # Attempt to reinitialize the stream + logger.error("Attempting to reinitialize the audio stream...") + + try: + if stream: + stream.stop_stream() + stream.close() + except Exception: + pass + + time.sleep(1) + if not setup_audio(): + logger.error("Failed to reinitialize audio stream. Exiting.") + break + else: + logger.error("Audio stream reinitialized successfully.") + continue + + except Exception as e: + logger.error(f"Unknown error during recording: {e}") + tb_str = traceback.format_exc() + logger.error(f"Traceback: {tb_str}") + logger.error(f"Error: {e}") + # Attempt to reinitialize the stream + logger.info("Attempting to reinitialize the audio stream...") + try: + if stream: + stream.stop_stream() + stream.close() + except Exception: + pass + + time.sleep(1) + if not setup_audio(): + logger.error("Failed to reinitialize audio stream. Exiting.") + break + else: + logger.info("Audio stream reinitialized successfully.") + continue + + except KeyboardInterrupt: + interrupt_stop_event.set() + logger.debug("Audio data worker process finished due to KeyboardInterrupt") + finally: + # After recording stops, feed any remaining audio data + if buffer: + audio_queue.put(bytes(buffer)) + + try: + if stream: + stream.stop_stream() + stream.close() + except Exception: + pass + if audio_interface: + audio_interface.terminate() + + def wakeup(self): + """ + If in wake work modus, wake up as if a wake word was spoken. + """ + self.listen_start = time.time() + + def abort(self): + state = self.state + self.start_recording_on_voice_activity = False + self.stop_recording_on_voice_deactivity = False + self.interrupt_stop_event.set() + if self.state != "inactive": # if inactive, was_interrupted will never be set + self.was_interrupted.wait() + self._set_state("transcribing") + self.was_interrupted.clear() + if self.is_recording: # if recording, make sure to stop the recorder + self.stop() + + + def wait_audio(self): + """ + Waits for the start and completion of the audio recording process. + + This method is responsible for: + - Waiting for voice activity to begin recording if not yet started. + - Waiting for voice inactivity to complete the recording. + - Setting the audio buffer from the recorded frames. + - Resetting recording-related attributes. + + Side effects: + - Updates the state of the instance. + - Modifies the audio attribute to contain the processed audio data. + """ + + try: + logger.info("Setting listen time") + if self.listen_start == 0: + self.listen_start = time.time() + + # If not yet started recording, wait for voice activity to initiate. + if not self.is_recording and not self.frames: + self._set_state("listening") + self.start_recording_on_voice_activity = True + + # Wait until recording starts + logger.debug('Waiting for recording start') + while not self.interrupt_stop_event.is_set(): + if self.start_recording_event.wait(timeout=0.02): + break + + # If recording is ongoing, wait for voice inactivity + # to finish recording. + if self.is_recording: + self.stop_recording_on_voice_deactivity = True + + # Wait until recording stops + logger.debug('Waiting for recording stop') + while not self.interrupt_stop_event.is_set(): + if (self.stop_recording_event.wait(timeout=0.02)): + break + + frames = self.frames + if len(frames) == 0: + frames = self.last_frames + + # Calculate samples needed for backdating resume + samples_to_keep = int(self.sample_rate * self.backdate_resume_seconds) + + # First convert all current frames to audio array + full_audio_array = np.frombuffer(b''.join(frames), dtype=np.int16) + full_audio = full_audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE + + # Calculate how many samples we need to keep for backdating resume + if samples_to_keep > 0: + samples_to_keep = min(samples_to_keep, len(full_audio)) + # Keep the last N samples for backdating resume + frames_to_read_audio = full_audio[-samples_to_keep:] + + # Convert the audio back to int16 bytes for frames + frames_to_read_int16 = (frames_to_read_audio * INT16_MAX_ABS_VALUE).astype(np.int16) + frame_bytes = frames_to_read_int16.tobytes() + + # Split into appropriate frame sizes (assuming standard frame size) + FRAME_SIZE = 2048 # Typical frame size + frames_to_read = [] + for i in range(0, len(frame_bytes), FRAME_SIZE): + frame = frame_bytes[i:i + FRAME_SIZE] + if frame: # Only add non-empty frames + frames_to_read.append(frame) + else: + frames_to_read = [] + + # Process backdate stop seconds + samples_to_remove = int(self.sample_rate * self.backdate_stop_seconds) + + if samples_to_remove > 0: + if samples_to_remove < len(full_audio): + self.audio = full_audio[:-samples_to_remove] + logger.debug(f"Removed {samples_to_remove} samples " + f"({samples_to_remove/self.sample_rate:.3f}s) from end of audio") + else: + self.audio = np.array([], dtype=np.float32) + logger.debug("Cleared audio (samples_to_remove >= audio length)") + else: + self.audio = full_audio + logger.debug(f"No samples removed, final audio length: {len(self.audio)}") + + self.frames.clear() + self.last_frames.clear() + self.frames.extend(frames_to_read) + + # Reset backdating parameters + self.backdate_stop_seconds = 0.0 + self.backdate_resume_seconds = 0.0 + + self.listen_start = 0 + + self._set_state("inactive") + + except KeyboardInterrupt: + logger.info("KeyboardInterrupt in wait_audio, shutting down") + self.shutdown() + raise # Re-raise the exception after cleanup + + + def perform_final_transcription(self, audio_bytes=None, use_prompt=True): + start_time = 0 + with self.transcription_lock: + if audio_bytes is None: + audio_bytes = copy.deepcopy(self.audio) + + if audio_bytes is None or len(audio_bytes) == 0: + print("No audio data available for transcription") + #logger.info("No audio data available for transcription") + return "" + + try: + if self.transcribe_count == 0: + logger.debug("Adding transcription request, no early transcription started") + start_time = time.time() # Start timing + self.parent_transcription_pipe.send((audio_bytes, self.language, use_prompt)) + self.transcribe_count += 1 + + while self.transcribe_count > 0: + logger.debug(F"Receive from parent_transcription_pipe after sendiung transcription request, transcribe_count: {self.transcribe_count}") + if not self.parent_transcription_pipe.poll(0.1): # check if transcription done + if self.interrupt_stop_event.is_set(): # check if interrupted + self.was_interrupted.set() + self._set_state("inactive") + return "" # return empty string if interrupted + continue + status, result = self.parent_transcription_pipe.recv() + self.transcribe_count -= 1 + + self.allowed_to_early_transcribe = True + self._set_state("inactive") + if status == 'success': + segments, info = result + self.detected_language = info.language if info.language_probability > 0 else None + self.detected_language_probability = info.language_probability + self.last_transcription_bytes = copy.deepcopy(audio_bytes) + self.last_transcription_bytes_b64 = base64.b64encode(self.last_transcription_bytes.tobytes()).decode('utf-8') + transcription = self._preprocess_output(segments) + end_time = time.time() # End timing + transcription_time = end_time - start_time + + if start_time: + if self.print_transcription_time: + print(f"Model {self.main_model_type} completed transcription in {transcription_time:.2f} seconds") + else: + logger.debug(f"Model {self.main_model_type} completed transcription in {transcription_time:.2f} seconds") + return "" if self.interrupt_stop_event.is_set() else transcription # if interrupted return empty string + else: + logger.error(f"Transcription error: {result}") + raise Exception(result) + except Exception as e: + logger.error(f"Error during transcription: {str(e)}", exc_info=True) + raise e + + + def transcribe(self): + """ + Transcribes audio captured by this class instance using the + `faster_whisper` model. + + Automatically starts recording upon voice activity if not manually + started using `recorder.start()`. + Automatically stops recording upon voice deactivity if not manually + stopped with `recorder.stop()`. + Processes the recorded audio to generate transcription. + + Args: + on_transcription_finished (callable, optional): Callback function + to be executed when transcription is ready. + If provided, transcription will be performed asynchronously, + and the callback will receive the transcription as its argument. + If omitted, the transcription will be performed synchronously, + and the result will be returned. + + Returns (if no callback is set): + str: The transcription of the recorded audio. + + Raises: + Exception: If there is an error during the transcription process. + """ + audio_copy = copy.deepcopy(self.audio) + self._set_state("transcribing") + if self.on_transcription_start: + abort_value = self.on_transcription_start(audio_copy) + if not abort_value: + return self.perform_final_transcription(audio_copy) + return None + else: + return self.perform_final_transcription(audio_copy) + + + def _process_wakeword(self, data): + """ + Processes audio data to detect wake words. + """ + if self.wakeword_backend in {'pvp', 'pvporcupine'}: + pcm = struct.unpack_from( + "h" * self.buffer_size, + data + ) + porcupine_index = self.porcupine.process(pcm) + if self.debug_mode: + logger.info(f"wake words porcupine_index: {porcupine_index}") + return porcupine_index + + elif self.wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}: + pcm = np.frombuffer(data, dtype=np.int16) + prediction = self.owwModel.predict(pcm) + max_score = -1 + max_index = -1 + wake_words_in_prediction = len(self.owwModel.prediction_buffer.keys()) + self.wake_words_sensitivities + if wake_words_in_prediction: + for idx, mdl in enumerate(self.owwModel.prediction_buffer.keys()): + scores = list(self.owwModel.prediction_buffer[mdl]) + if scores[-1] >= self.wake_words_sensitivity and scores[-1] > max_score: + max_score = scores[-1] + max_index = idx + if self.debug_mode: + logger.info(f"wake words oww max_index, max_score: {max_index} {max_score}") + return max_index + else: + if self.debug_mode: + logger.info(f"wake words oww_index: -1") + return -1 + + if self.debug_mode: + logger.info("wake words no match") + + return -1 + + def text(self, + on_transcription_finished=None, + ): + """ + Transcribes audio captured by this class instance + using the `faster_whisper` model. + + - Automatically starts recording upon voice activity if not manually + started using `recorder.start()`. + - Automatically stops recording upon voice deactivity if not manually + stopped with `recorder.stop()`. + - Processes the recorded audio to generate transcription. + + Args: + on_transcription_finished (callable, optional): Callback function + to be executed when transcription is ready. + If provided, transcription will be performed asynchronously, and + the callback will receive the transcription as its argument. + If omitted, the transcription will be performed synchronously, + and the result will be returned. + + Returns (if not callback is set): + str: The transcription of the recorded audio + """ + self.interrupt_stop_event.clear() + self.was_interrupted.clear() + try: + self.wait_audio() + except KeyboardInterrupt: + logger.info("KeyboardInterrupt in text() method") + self.shutdown() + raise # Re-raise the exception after cleanup + + if self.is_shut_down or self.interrupt_stop_event.is_set(): + if self.interrupt_stop_event.is_set(): + self.was_interrupted.set() + return "" + + if on_transcription_finished: + threading.Thread(target=on_transcription_finished, + args=(self.transcribe(),)).start() + else: + return self.transcribe() + + + def format_number(self, num): + # Convert the number to a string + num_str = f"{num:.10f}" # Ensure precision is sufficient + # Split the number into integer and decimal parts + integer_part, decimal_part = num_str.split('.') + # Take the last two digits of the integer part and the first two digits of the decimal part + result = f"{integer_part[-2:]}.{decimal_part[:2]}" + return result + + def start(self, frames = None): + """ + Starts recording audio directly without waiting for voice activity. + """ + + # Ensure there's a minimum interval + # between stopping and starting recording + if (time.time() - self.recording_stop_time + < self.min_gap_between_recordings): + logger.info("Attempted to start recording " + "too soon after stopping." + ) + return self + + logger.info("recording started") + self._set_state("recording") + self.text_storage = [] + self.realtime_stabilized_text = "" + self.realtime_stabilized_safetext = "" + self.wakeword_detected = False + self.wake_word_detect_time = 0 + self.frames = [] + if frames: + self.frames = frames + self.is_recording = True + + self.recording_start_time = time.time() + self.is_silero_speech_active = False + self.is_webrtc_speech_active = False + self.stop_recording_event.clear() + self.start_recording_event.set() + + if self.on_recording_start: + self._run_callback(self.on_recording_start) + + return self + + def stop(self, + backdate_stop_seconds: float = 0.0, + backdate_resume_seconds: float = 0.0, + ): + """ + Stops recording audio. + + Args: + - backdate_stop_seconds (float, default="0.0"): Specifies the number of + seconds to backdate the stop time. This is useful when the stop + command is issued after the actual stop time. + - backdate_resume_seconds (float, default="0.0"): Specifies the number + of seconds to backdate the time relistening is initiated. + """ + + # Ensure there's a minimum interval + # between starting and stopping recording + if (time.time() - self.recording_start_time + < self.min_length_of_recording): + logger.info("Attempted to stop recording " + "too soon after starting." + ) + return self + + logger.info("recording stopped") + self.last_frames = copy.deepcopy(self.frames) + self.backdate_stop_seconds = backdate_stop_seconds + self.backdate_resume_seconds = backdate_resume_seconds + self.is_recording = False + self.recording_stop_time = time.time() + self.is_silero_speech_active = False + self.is_webrtc_speech_active = False + self.silero_check_time = 0 + self.start_recording_event.clear() + self.stop_recording_event.set() + + self.last_recording_start_time = self.recording_start_time + self.last_recording_stop_time = self.recording_stop_time + + if self.on_recording_stop: + self._run_callback(self.on_recording_stop) + + return self + + def listen(self): + """ + Puts recorder in immediate "listen" state. + This is the state after a wake word detection, for example. + The recorder now "listens" for voice activation. + Once voice is detected we enter "recording" state. + """ + self.listen_start = time.time() + self._set_state("listening") + self.start_recording_on_voice_activity = True + + def feed_audio(self, chunk, original_sample_rate=16000): + """ + Feed an audio chunk into the processing pipeline. Chunks are + accumulated until the buffer size is reached, and then the accumulated + data is fed into the audio_queue. + """ + # Check if the buffer attribute exists, if not, initialize it + if not hasattr(self, 'buffer'): + self.buffer = bytearray() + + # Check if input is a NumPy array + if isinstance(chunk, np.ndarray): + # Handle stereo to mono conversion if necessary + if chunk.ndim == 2: + chunk = np.mean(chunk, axis=1) + + # Resample to 16000 Hz if necessary + if original_sample_rate != 16000: + num_samples = int(len(chunk) * 16000 / original_sample_rate) + chunk = resample(chunk, num_samples) + + # Ensure data type is int16 + chunk = chunk.astype(np.int16) + + # Convert the NumPy array to bytes + chunk = chunk.tobytes() + + # Append the chunk to the buffer + self.buffer += chunk + buf_size = 2 * self.buffer_size # silero complains if too short + + # Check if the buffer has reached or exceeded the buffer_size + while len(self.buffer) >= buf_size: + # Extract self.buffer_size amount of data from the buffer + to_process = self.buffer[:buf_size] + self.buffer = self.buffer[buf_size:] + + # Feed the extracted data to the audio_queue + self.audio_queue.put(to_process) + + def set_microphone(self, microphone_on=True): + """ + Set the microphone on or off. + """ + logger.info("Setting microphone to: " + str(microphone_on)) + self.use_microphone.value = microphone_on + + def shutdown(self): + """ + Safely shuts down the audio recording by stopping the + recording worker and closing the audio stream. + """ + + with self.shutdown_lock: + if self.is_shut_down: + return + + print("\033[91mRealtimeSTT shutting down\033[0m") + + # Force wait_audio() and text() to exit + self.is_shut_down = True + self.start_recording_event.set() + self.stop_recording_event.set() + + self.shutdown_event.set() + self.is_recording = False + self.is_running = False + + logger.debug('Finishing recording thread') + if self.recording_thread: + self.recording_thread.join() + + logger.debug('Terminating reader process') + + # Give it some time to finish the loop and cleanup. + if self.use_microphone.value: + self.reader_process.join(timeout=10) + + if self.reader_process.is_alive(): + logger.warning("Reader process did not terminate " + "in time. Terminating forcefully." + ) + self.reader_process.terminate() + + logger.debug('Terminating transcription process') + self.transcript_process.join(timeout=10) + + if self.transcript_process.is_alive(): + logger.warning("Transcript process did not terminate " + "in time. Terminating forcefully." + ) + self.transcript_process.terminate() + + self.parent_transcription_pipe.close() + + logger.debug('Finishing realtime thread') + if self.realtime_thread: + self.realtime_thread.join() + + if self.enable_realtime_transcription: + if self.realtime_model_type: + del self.realtime_model_type + self.realtime_model_type = None + gc.collect() + + def _recording_worker(self): + """ + The main worker method which constantly monitors the audio + input for voice activity and accordingly starts/stops the recording. + """ + + if self.use_extended_logging: + logger.debug('Debug: Entering try block') + + last_inner_try_time = 0 + try: + if self.use_extended_logging: + logger.debug('Debug: Initializing variables') + time_since_last_buffer_message = 0 + was_recording = False + delay_was_passed = False + wakeword_detected_time = None + wakeword_samples_to_remove = None + self.allowed_to_early_transcribe = True + + if self.use_extended_logging: + logger.debug('Debug: Starting main loop') + # Continuously monitor audio for voice activity + while self.is_running: + + # if self.use_extended_logging: + # logger.debug('Debug: Entering inner try block') + if last_inner_try_time: + last_processing_time = time.time() - last_inner_try_time + if last_processing_time > 0.1: + if self.use_extended_logging: + logger.warning('### WARNING: PROCESSING TOOK TOO LONG') + last_inner_try_time = time.time() + try: + # if self.use_extended_logging: + # logger.debug('Debug: Trying to get data from audio queue') + try: + data = self.audio_queue.get(timeout=0.01) + self.last_words_buffer.append(data) + except queue.Empty: + # if self.use_extended_logging: + # logger.debug('Debug: Queue is empty, checking if still running') + if not self.is_running: + if self.use_extended_logging: + logger.debug('Debug: Not running, breaking loop') + break + # if self.use_extended_logging: + # logger.debug('Debug: Continuing to next iteration') + continue + + if self.use_extended_logging: + logger.debug('Debug: Checking for on_recorded_chunk callback') + if self.on_recorded_chunk: + if self.use_extended_logging: + logger.debug('Debug: Calling on_recorded_chunk') + self._run_callback(self.on_recorded_chunk, data) + + if self.use_extended_logging: + logger.debug('Debug: Checking if handle_buffer_overflow is True') + if self.handle_buffer_overflow: + if self.use_extended_logging: + logger.debug('Debug: Handling buffer overflow') + # Handle queue overflow + if (self.audio_queue.qsize() > + self.allowed_latency_limit): + if self.use_extended_logging: + logger.debug('Debug: Queue size exceeds limit, logging warnings') + logger.warning("Audio queue size exceeds " + "latency limit. Current size: " + f"{self.audio_queue.qsize()}. " + "Discarding old audio chunks." + ) + + if self.use_extended_logging: + logger.debug('Debug: Discarding old chunks if necessary') + while (self.audio_queue.qsize() > + self.allowed_latency_limit): + + data = self.audio_queue.get() + + except BrokenPipeError: + logger.error("BrokenPipeError _recording_worker", exc_info=True) + self.is_running = False + break + + if self.use_extended_logging: + logger.debug('Debug: Updating time_since_last_buffer_message') + # Feed the extracted data to the audio_queue + if time_since_last_buffer_message: + time_passed = time.time() - time_since_last_buffer_message + if time_passed > 1: + if self.use_extended_logging: + logger.debug("_recording_worker processing audio data") + time_since_last_buffer_message = time.time() + else: + time_since_last_buffer_message = time.time() + + if self.use_extended_logging: + logger.debug('Debug: Initializing failed_stop_attempt') + failed_stop_attempt = False + + if self.use_extended_logging: + logger.debug('Debug: Checking if not recording') + if not self.is_recording: + if self.use_extended_logging: + logger.debug('Debug: Handling not recording state') + # Handle not recording state + time_since_listen_start = (time.time() - self.listen_start + if self.listen_start else 0) + + wake_word_activation_delay_passed = ( + time_since_listen_start > + self.wake_word_activation_delay + ) + + if self.use_extended_logging: + logger.debug('Debug: Handling wake-word timeout callback') + # Handle wake-word timeout callback + if wake_word_activation_delay_passed \ + and not delay_was_passed: + + if self.use_wake_words and self.wake_word_activation_delay: + if self.on_wakeword_timeout: + if self.use_extended_logging: + logger.debug('Debug: Calling on_wakeword_timeout') + self._run_callback(self.on_wakeword_timeout) + delay_was_passed = wake_word_activation_delay_passed + + if self.use_extended_logging: + logger.debug('Debug: Setting state and spinner text') + # Set state and spinner text + if not self.recording_stop_time: + if self.use_wake_words \ + and wake_word_activation_delay_passed \ + and not self.wakeword_detected: + if self.use_extended_logging: + logger.debug('Debug: Setting state to "wakeword"') + self._set_state("wakeword") + else: + if self.listen_start: + if self.use_extended_logging: + logger.debug('Debug: Setting state to "listening"') + self._set_state("listening") + else: + if self.use_extended_logging: + logger.debug('Debug: Setting state to "inactive"') + self._set_state("inactive") + + if self.use_extended_logging: + logger.debug('Debug: Checking wake word conditions') + if self.use_wake_words and wake_word_activation_delay_passed: + try: + if self.use_extended_logging: + logger.debug('Debug: Processing wakeword') + wakeword_index = self._process_wakeword(data) + + except struct.error: + logger.error("Error unpacking audio data " + "for wake word processing.", exc_info=True) + continue + + except Exception as e: + logger.error(f"Wake word processing error: {e}", exc_info=True) + continue + + if self.use_extended_logging: + logger.debug('Debug: Checking if wake word detected') + # If a wake word is detected + if wakeword_index >= 0: + if self.use_extended_logging: + logger.debug('Debug: Wake word detected, updating variables') + self.wake_word_detect_time = time.time() + wakeword_detected_time = time.time() + wakeword_samples_to_remove = int(self.sample_rate * self.wake_word_buffer_duration) + self.wakeword_detected = True + if self.on_wakeword_detected: + if self.use_extended_logging: + logger.debug('Debug: Calling on_wakeword_detected') + self._run_callback(self.on_wakeword_detected) + + if self.use_extended_logging: + logger.debug('Debug: Checking voice activity conditions') + # Check for voice activity to + # trigger the start of recording + if ((not self.use_wake_words + or not wake_word_activation_delay_passed) + and self.start_recording_on_voice_activity) \ + or self.wakeword_detected: + + if self.use_extended_logging: + logger.debug('Debug: Checking if voice is active') + + if self._is_voice_active(): + + if self.on_vad_start: + self._run_callback(self.on_vad_start) + + if self.use_extended_logging: + logger.debug('Debug: Voice activity detected') + logger.info("voice activity detected") + + if self.use_extended_logging: + logger.debug('Debug: Starting recording') + self.start() + + self.start_recording_on_voice_activity = False + + if self.use_extended_logging: + logger.debug('Debug: Adding buffered audio to frames') + # Add the buffered audio + # to the recording frames + self.frames.extend(list(self.audio_buffer)) + self.audio_buffer.clear() + + if self.use_extended_logging: + logger.debug('Debug: Resetting Silero VAD model states') + self.silero_vad_model.reset_states() + else: + if self.use_extended_logging: + logger.debug('Debug: Checking voice activity') + data_copy = data[:] + self._check_voice_activity(data_copy) + + if self.use_extended_logging: + logger.debug('Debug: Resetting speech_end_silence_start') + + if self.speech_end_silence_start != 0: + self.speech_end_silence_start = 0 + if self.on_turn_detection_stop: + if self.use_extended_logging: + logger.debug('Debug: Calling on_turn_detection_stop') + self._run_callback(self.on_turn_detection_stop) + + else: + if self.use_extended_logging: + logger.debug('Debug: Handling recording state') + # If we are currently recording + if wakeword_samples_to_remove and wakeword_samples_to_remove > 0: + if self.use_extended_logging: + logger.debug('Debug: Removing wakeword samples') + # Remove samples from the beginning of self.frames + samples_removed = 0 + while wakeword_samples_to_remove > 0 and self.frames: + frame = self.frames[0] + frame_samples = len(frame) // 2 # Assuming 16-bit audio + if wakeword_samples_to_remove >= frame_samples: + self.frames.pop(0) + samples_removed += frame_samples + wakeword_samples_to_remove -= frame_samples + else: + self.frames[0] = frame[wakeword_samples_to_remove * 2:] + samples_removed += wakeword_samples_to_remove + samples_to_remove = 0 + + wakeword_samples_to_remove = 0 + + if self.use_extended_logging: + logger.debug('Debug: Checking if stop_recording_on_voice_deactivity is True') + # Stop the recording if silence is detected after speech + if self.stop_recording_on_voice_deactivity: + if self.use_extended_logging: + logger.debug('Debug: Determining if speech is detected') + is_speech = ( + self._is_silero_speech(data) if self.silero_deactivity_detection + else self._is_webrtc_speech(data, True) + ) + + if self.use_extended_logging: + logger.debug('Debug: Formatting speech_end_silence_start') + if not self.speech_end_silence_start: + str_speech_end_silence_start = "0" + else: + str_speech_end_silence_start = datetime.datetime.fromtimestamp(self.speech_end_silence_start).strftime('%H:%M:%S.%f')[:-3] + if self.use_extended_logging: + logger.debug(f"is_speech: {is_speech}, str_speech_end_silence_start: {str_speech_end_silence_start}") + + if self.use_extended_logging: + logger.debug('Debug: Checking if speech is not detected') + if not is_speech: + if self.use_extended_logging: + logger.debug('Debug: Handling voice deactivity') + # Voice deactivity was detected, so we start + # measuring silence time before stopping recording + if self.speech_end_silence_start == 0 and \ + (time.time() - self.recording_start_time > self.min_length_of_recording): + + self.speech_end_silence_start = time.time() + self.awaiting_speech_end = True + if self.on_turn_detection_start: + if self.use_extended_logging: + logger.debug('Debug: Calling on_turn_detection_start') + + self._run_callback(self.on_turn_detection_start) + + if self.use_extended_logging: + logger.debug('Debug: Checking early transcription conditions') + if self.speech_end_silence_start and self.early_transcription_on_silence and len(self.frames) > 0 and \ + (time.time() - self.speech_end_silence_start > self.early_transcription_on_silence) and \ + self.allowed_to_early_transcribe: + if self.use_extended_logging: + logger.debug("Debug:Adding early transcription request") + self.transcribe_count += 1 + audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16) + audio = audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE + + if self.use_extended_logging: + logger.debug("Debug: early transcription request pipe send") + self.parent_transcription_pipe.send((audio, self.language, True)) + if self.use_extended_logging: + logger.debug("Debug: early transcription request pipe send return") + self.allowed_to_early_transcribe = False + + else: + self.awaiting_speech_end = False + if self.use_extended_logging: + logger.debug('Debug: Handling speech detection') + if self.speech_end_silence_start: + if self.use_extended_logging: + logger.info("Resetting self.speech_end_silence_start") + + if self.speech_end_silence_start != 0: + self.speech_end_silence_start = 0 + if self.on_turn_detection_stop: + if self.use_extended_logging: + logger.debug('Debug: Calling on_turn_detection_stop') + self._run_callback(self.on_turn_detection_stop) + + self.allowed_to_early_transcribe = True + + if self.use_extended_logging: + logger.debug('Debug: Checking if silence duration exceeds threshold') + # Wait for silence to stop recording after speech + if self.speech_end_silence_start and time.time() - \ + self.speech_end_silence_start >= \ + self.post_speech_silence_duration: + + if self.on_vad_stop: + self._run_callback(self.on_vad_stop) + + if self.use_extended_logging: + logger.debug('Debug: Formatting silence start time') + # Get time in desired format (HH:MM:SS.nnn) + silence_start_time = datetime.datetime.fromtimestamp(self.speech_end_silence_start).strftime('%H:%M:%S.%f')[:-3] + + if self.use_extended_logging: + logger.debug('Debug: Calculating time difference') + # Calculate time difference + time_diff = time.time() - self.speech_end_silence_start + + if self.use_extended_logging: + logger.debug('Debug: Logging voice deactivity detection') + logger.info(f"voice deactivity detected at {silence_start_time}, " + f"time since silence start: {time_diff:.3f} seconds") + + logger.debug('Debug: Appending data to frames and stopping recording') + self.frames.append(data) + self.stop() + if not self.is_recording: + if self.speech_end_silence_start != 0: + self.speech_end_silence_start = 0 + if self.on_turn_detection_stop: + if self.use_extended_logging: + logger.debug('Debug: Calling on_turn_detection_stop') + self._run_callback(self.on_turn_detection_stop) + + if self.use_extended_logging: + logger.debug('Debug: Handling non-wake word scenario') + else: + if self.use_extended_logging: + logger.debug('Debug: Setting failed_stop_attempt to True') + failed_stop_attempt = True + + self.awaiting_speech_end = False + + if self.use_extended_logging: + logger.debug('Debug: Checking if recording stopped') + if not self.is_recording and was_recording: + if self.use_extended_logging: + logger.debug('Debug: Resetting after stopping recording') + # Reset after stopping recording to ensure clean state + self.stop_recording_on_voice_deactivity = False + + if self.use_extended_logging: + logger.debug('Debug: Checking Silero time') + if time.time() - self.silero_check_time > 0.1: + self.silero_check_time = 0 + + if self.use_extended_logging: + logger.debug('Debug: Handling wake word timeout') + # Handle wake word timeout (waited to long initiating + # speech after wake word detection) + if self.wake_word_detect_time and time.time() - \ + self.wake_word_detect_time > self.wake_word_timeout: + + self.wake_word_detect_time = 0 + if self.wakeword_detected and self.on_wakeword_timeout: + if self.use_extended_logging: + logger.debug('Debug: Calling on_wakeword_timeout') + self._run_callback(self.on_wakeword_timeout) + self.wakeword_detected = False + + if self.use_extended_logging: + logger.debug('Debug: Updating was_recording') + was_recording = self.is_recording + + if self.use_extended_logging: + logger.debug('Debug: Checking if recording and not failed stop attempt') + if self.is_recording and not failed_stop_attempt: + if self.use_extended_logging: + logger.debug('Debug: Appending data to frames') + self.frames.append(data) + + if self.use_extended_logging: + logger.debug('Debug: Checking if not recording or speech end silence start') + if not self.is_recording or self.speech_end_silence_start: + if self.use_extended_logging: + logger.debug('Debug: Appending data to audio buffer') + self.audio_buffer.append(data) + + except Exception as e: + logger.debug('Debug: Caught exception in main try block') + if not self.interrupt_stop_event.is_set(): + logger.error(f"Unhandled exeption in _recording_worker: {e}", exc_info=True) + raise + + if self.use_extended_logging: + logger.debug('Debug: Exiting _recording_worker method') + + + + + def _realtime_worker(self): + """ + Performs real-time transcription if the feature is enabled. + + The method is responsible transcribing recorded audio frames + in real-time based on the specified resolution interval. + The transcribed text is stored in `self.realtime_transcription_text` + and a callback + function is invoked with this text if specified. + """ + + try: + + logger.debug('Starting realtime worker') + + # Return immediately if real-time transcription is not enabled + if not self.enable_realtime_transcription: + return + + # Track time of last transcription + last_transcription_time = time.time() + + while self.is_running: + + if self.is_recording: + + # MODIFIED SLEEP LOGIC: + # Wait until realtime_processing_pause has elapsed, + # but check often so we can respond to changes quickly. + while ( + time.time() - last_transcription_time + ) < self.realtime_processing_pause: + time.sleep(0.001) + if not self.is_running or not self.is_recording: + break + + if self.awaiting_speech_end: + time.sleep(0.001) + continue + + # Update transcription time + last_transcription_time = time.time() + + # Convert the buffer frames to a NumPy array + audio_array = np.frombuffer( + b''.join(self.frames), + dtype=np.int16 + ) + + logger.debug(f"Current realtime buffer size: {len(audio_array)}") + + # Normalize the array to a [-1, 1] range + audio_array = audio_array.astype(np.float32) / \ + INT16_MAX_ABS_VALUE + + if self.use_main_model_for_realtime: + with self.transcription_lock: + try: + self.parent_transcription_pipe.send((audio_array, self.language, True)) + if self.parent_transcription_pipe.poll(timeout=5): # Wait for 5 seconds + logger.debug("Receive from realtime worker after transcription request to main model") + status, result = self.parent_transcription_pipe.recv() + if status == 'success': + segments, info = result + self.detected_realtime_language = info.language if info.language_probability > 0 else None + self.detected_realtime_language_probability = info.language_probability + realtime_text = segments + logger.debug(f"Realtime text detected with main model: {realtime_text}") + else: + logger.error(f"Realtime transcription error: {result}") + continue + else: + logger.warning("Realtime transcription timed out") + continue + except Exception as e: + logger.error(f"Error in realtime transcription: {str(e)}", exc_info=True) + continue + else: + # Perform transcription and assemble the text + if self.normalize_audio: + # normalize audio to -0.95 dBFS + if audio_array is not None and audio_array.size > 0: + peak = np.max(np.abs(audio_array)) + if peak > 0: + audio_array = (audio_array / peak) * 0.95 + + if self.realtime_batch_size > 0: + segments, info = self.realtime_model_type.transcribe( + audio_array, + language=self.language if self.language else None, + beam_size=self.beam_size_realtime, + initial_prompt=self.initial_prompt_realtime, + suppress_tokens=self.suppress_tokens, + batch_size=self.realtime_batch_size, + vad_filter=self.faster_whisper_vad_filter + ) + else: + segments, info = self.realtime_model_type.transcribe( + audio_array, + language=self.language if self.language else None, + beam_size=self.beam_size_realtime, + initial_prompt=self.initial_prompt_realtime, + suppress_tokens=self.suppress_tokens, + vad_filter=self.faster_whisper_vad_filter + ) + + self.detected_realtime_language = info.language if info.language_probability > 0 else None + self.detected_realtime_language_probability = info.language_probability + realtime_text = " ".join( + seg.text for seg in segments + ) + logger.debug(f"Realtime text detected: {realtime_text}") + + # double check recording state + # because it could have changed mid-transcription + if self.is_recording and time.time() - \ + self.recording_start_time > self.init_realtime_after_seconds: + + self.realtime_transcription_text = realtime_text + self.realtime_transcription_text = \ + self.realtime_transcription_text.strip() + + self.text_storage.append( + self.realtime_transcription_text + ) + + # Take the last two texts in storage, if they exist + if len(self.text_storage) >= 2: + last_two_texts = self.text_storage[-2:] + + # Find the longest common prefix + # between the two texts + prefix = os.path.commonprefix( + [last_two_texts[0], last_two_texts[1]] + ) + + # This prefix is the text that was transcripted + # two times in the same way + # Store as "safely detected text" + if len(prefix) >= \ + len(self.realtime_stabilized_safetext): + + # Only store when longer than the previous + # as additional security + self.realtime_stabilized_safetext = prefix + + # Find parts of the stabilized text + # in the freshly transcripted text + matching_pos = self._find_tail_match_in_text( + self.realtime_stabilized_safetext, + self.realtime_transcription_text + ) + + if matching_pos < 0: + # pick which text to send + text_to_send = ( + self.realtime_stabilized_safetext + if self.realtime_stabilized_safetext + else self.realtime_transcription_text + ) + # preprocess once + processed = self._preprocess_output(text_to_send, True) + # invoke on its own thread + self._run_callback(self._on_realtime_transcription_stabilized, processed) + + else: + # We found parts of the stabilized text + # in the transcripted text + # We now take the stabilized text + # and add only the freshly transcripted part to it + output_text = self.realtime_stabilized_safetext + \ + self.realtime_transcription_text[matching_pos:] + + # This yields us the "left" text part as stabilized + # AND at the same time delivers fresh detected + # parts on the first run without the need for + # two transcriptions + self._run_callback(self._on_realtime_transcription_stabilized, self._preprocess_output(output_text, True)) + + # Invoke the callback with the transcribed text + self._run_callback(self._on_realtime_transcription_update, self._preprocess_output(self.realtime_transcription_text,True)) + + # If not recording, sleep briefly before checking again + else: + time.sleep(TIME_SLEEP) + + except Exception as e: + logger.error(f"Unhandled exeption in _realtime_worker: {e}", exc_info=True) + raise + + def _is_silero_speech(self, chunk): + """ + Returns true if speech is detected in the provided audio data + + Args: + data (bytes): raw bytes of audio data (1024 raw bytes with + 16000 sample rate and 16 bits per sample) + """ + if self.sample_rate != 16000: + pcm_data = np.frombuffer(chunk, dtype=np.int16) + data_16000 = signal.resample_poly( + pcm_data, 16000, self.sample_rate) + chunk = data_16000.astype(np.int16).tobytes() + + self.silero_working = True + audio_chunk = np.frombuffer(chunk, dtype=np.int16) + audio_chunk = audio_chunk.astype(np.float32) / INT16_MAX_ABS_VALUE + vad_prob = self.silero_vad_model( + torch.from_numpy(audio_chunk), + SAMPLE_RATE).item() + is_silero_speech_active = vad_prob > (1 - self.silero_sensitivity) + if is_silero_speech_active: + if not self.is_silero_speech_active and self.use_extended_logging: + logger.info(f"{bcolors.OKGREEN}Silero VAD detected speech{bcolors.ENDC}") + elif self.is_silero_speech_active and self.use_extended_logging: + logger.info(f"{bcolors.WARNING}Silero VAD detected silence{bcolors.ENDC}") + self.is_silero_speech_active = is_silero_speech_active + self.silero_working = False + return is_silero_speech_active + + def _is_webrtc_speech(self, chunk, all_frames_must_be_true=False): + """ + Returns true if speech is detected in the provided audio data + + Args: + data (bytes): raw bytes of audio data (1024 raw bytes with + 16000 sample rate and 16 bits per sample) + """ + speech_str = f"{bcolors.OKGREEN}WebRTC VAD detected speech{bcolors.ENDC}" + silence_str = f"{bcolors.WARNING}WebRTC VAD detected silence{bcolors.ENDC}" + if self.sample_rate != 16000: + pcm_data = np.frombuffer(chunk, dtype=np.int16) + data_16000 = signal.resample_poly( + pcm_data, 16000, self.sample_rate) + chunk = data_16000.astype(np.int16).tobytes() + + # Number of audio frames per millisecond + frame_length = int(16000 * 0.01) # for 10ms frame + num_frames = int(len(chunk) / (2 * frame_length)) + speech_frames = 0 + + for i in range(num_frames): + start_byte = i * frame_length * 2 + end_byte = start_byte + frame_length * 2 + frame = chunk[start_byte:end_byte] + if self.webrtc_vad_model.is_speech(frame, 16000): + speech_frames += 1 + if not all_frames_must_be_true: + if self.debug_mode: + logger.info(f"Speech detected in frame {i + 1}" + f" of {num_frames}") + if not self.is_webrtc_speech_active and self.use_extended_logging: + logger.info(speech_str) + self.is_webrtc_speech_active = True + return True + if all_frames_must_be_true: + if self.debug_mode and speech_frames == num_frames: + logger.info(f"Speech detected in {speech_frames} of " + f"{num_frames} frames") + elif self.debug_mode: + logger.info(f"Speech not detected in all {num_frames} frames") + speech_detected = speech_frames == num_frames + if speech_detected and not self.is_webrtc_speech_active and self.use_extended_logging: + logger.info(speech_str) + elif not speech_detected and self.is_webrtc_speech_active and self.use_extended_logging: + logger.info(silence_str) + self.is_webrtc_speech_active = speech_detected + return speech_detected + else: + if self.debug_mode: + logger.info(f"Speech not detected in any of {num_frames} frames") + if self.is_webrtc_speech_active and self.use_extended_logging: + logger.info(silence_str) + self.is_webrtc_speech_active = False + return False + + def _check_voice_activity(self, data): + """ + Initiate check if voice is active based on the provided data. + + Args: + data: The audio data to be checked for voice activity. + """ + self._is_webrtc_speech(data) + + # First quick performing check for voice activity using WebRTC + if self.is_webrtc_speech_active: + + if not self.silero_working: + self.silero_working = True + + # Run the intensive check in a separate thread + threading.Thread( + target=self._is_silero_speech, + args=(data,)).start() + + def clear_audio_queue(self): + """ + Safely empties the audio queue to ensure no remaining audio + fragments get processed e.g. after waking up the recorder. + """ + self.audio_buffer.clear() + try: + while True: + self.audio_queue.get_nowait() + except: + # PyTorch's mp.Queue doesn't have a specific Empty exception + # so we catch any exception that might occur when the queue is empty + pass + + def _is_voice_active(self): + """ + Determine if voice is active. + + Returns: + bool: True if voice is active, False otherwise. + """ + return self.is_webrtc_speech_active and self.is_silero_speech_active + + def _set_state(self, new_state): + """ + Update the current state of the recorder and execute + corresponding state-change callbacks. + + Args: + new_state (str): The new state to set. + + """ + # Check if the state has actually changed + if new_state == self.state: + return + + # Store the current state for later comparison + old_state = self.state + + # Update to the new state + self.state = new_state + + # Log the state change + logger.info(f"State changed from '{old_state}' to '{new_state}'") + + # Execute callbacks based on transitioning FROM a particular state + if old_state == "listening": + if self.on_vad_detect_stop: + self._run_callback(self.on_vad_detect_stop) + elif old_state == "wakeword": + if self.on_wakeword_detection_end: + self._run_callback(self.on_wakeword_detection_end) + + # Execute callbacks based on transitioning TO a particular state + if new_state == "listening": + if self.on_vad_detect_start: + self._run_callback(self.on_vad_detect_start) + self._set_spinner("speak now") + if self.spinner and self.halo: + self.halo._interval = 250 + elif new_state == "wakeword": + if self.on_wakeword_detection_start: + self._run_callback(self.on_wakeword_detection_start) + self._set_spinner(f"say {self.wake_words}") + if self.spinner and self.halo: + self.halo._interval = 500 + elif new_state == "transcribing": + self._set_spinner("transcribing") + if self.spinner and self.halo: + self.halo._interval = 50 + elif new_state == "recording": + self._set_spinner("recording") + if self.spinner and self.halo: + self.halo._interval = 100 + elif new_state == "inactive": + if self.spinner and self.halo: + self.halo.stop() + self.halo = None + + def _set_spinner(self, text): + """ + Update the spinner's text or create a new + spinner with the provided text. + + Args: + text (str): The text to be displayed alongside the spinner. + """ + if self.spinner: + # If the Halo spinner doesn't exist, create and start it + if self.halo is None: + self.halo = halo.Halo(text=text) + self.halo.start() + # If the Halo spinner already exists, just update the text + else: + self.halo.text = text + + def _preprocess_output(self, text, preview=False): + """ + Preprocesses the output text by removing any leading or trailing + whitespace, converting all whitespace sequences to a single space + character, and capitalizing the first character of the text. + + Args: + text (str): The text to be preprocessed. + + Returns: + str: The preprocessed text. + """ + text = re.sub(r'\s+', ' ', text.strip()) + + if self.ensure_sentence_starting_uppercase: + if text: + text = text[0].upper() + text[1:] + + # Ensure the text ends with a proper punctuation + # if it ends with an alphanumeric character + if not preview: + if self.ensure_sentence_ends_with_period: + if text and text[-1].isalnum(): + text += '.' + + return text + + def _find_tail_match_in_text(self, text1, text2, length_of_match=10): + """ + Find the position where the last 'n' characters of text1 + match with a substring in text2. + + This method takes two texts, extracts the last 'n' characters from + text1 (where 'n' is determined by the variable 'length_of_match'), and + searches for an occurrence of this substring in text2, starting from + the end of text2 and moving towards the beginning. + + Parameters: + - text1 (str): The text containing the substring that we want to find + in text2. + - text2 (str): The text in which we want to find the matching + substring. + - length_of_match(int): The length of the matching string that we are + looking for + + Returns: + int: The position (0-based index) in text2 where the matching + substring starts. If no match is found or either of the texts is + too short, returns -1. + """ + + # Check if either of the texts is too short + if len(text1) < length_of_match or len(text2) < length_of_match: + return -1 + + # The end portion of the first text that we want to compare + target_substring = text1[-length_of_match:] + + # Loop through text2 from right to left + for i in range(len(text2) - length_of_match + 1): + # Extract the substring from text2 + # to compare with the target_substring + current_substring = text2[len(text2) - i - length_of_match: + len(text2) - i] + + # Compare the current_substring with the target_substring + if current_substring == target_substring: + # Position in text2 where the match starts + return len(text2) - i + + return -1 + + def _on_realtime_transcription_stabilized(self, text): + """ + Callback method invoked when the real-time transcription stabilizes. + + This method is called internally when the transcription text is + considered "stable" meaning it's less likely to change significantly + with additional audio input. It notifies any registered external + listener about the stabilized text if recording is still ongoing. + This is particularly useful for applications that need to display + live transcription results to users and want to highlight parts of the + transcription that are less likely to change. + + Args: + text (str): The stabilized transcription text. + """ + if self.on_realtime_transcription_stabilized: + if self.is_recording: + self._run_callback(self.on_realtime_transcription_stabilized, text) + + def _on_realtime_transcription_update(self, text): + """ + Callback method invoked when there's an update in the real-time + transcription. + + This method is called internally whenever there's a change in the + transcription text, notifying any registered external listener about + the update if recording is still ongoing. This provides a mechanism + for applications to receive and possibly display live transcription + updates, which could be partial and still subject to change. + + Args: + text (str): The updated transcription text. + """ + if self.on_realtime_transcription_update: + if self.is_recording: + self._run_callback(self.on_realtime_transcription_update, text) + + def __enter__(self): + """ + Method to setup the context manager protocol. + + This enables the instance to be used in a `with` statement, ensuring + proper resource management. When the `with` block is entered, this + method is automatically called. + + Returns: + self: The current instance of the class. + """ + return self + + def __exit__(self, exc_type, exc_value, traceback): + """ + Method to define behavior when the context manager protocol exits. + + This is called when exiting the `with` block and ensures that any + necessary cleanup or resource release processes are executed, such as + shutting down the system properly. + + Args: + exc_type (Exception or None): The type of the exception that + caused the context to be exited, if any. + exc_value (Exception or None): The exception instance that caused + the context to be exited, if any. + traceback (Traceback or None): The traceback corresponding to the + exception, if any. + """ + self.shutdown() \ No newline at end of file diff --git a/minimal_server/RealtimeSTT/audio_recorder_client.py b/minimal_server/RealtimeSTT/audio_recorder_client.py new file mode 100644 index 00000000..89478c82 --- /dev/null +++ b/minimal_server/RealtimeSTT/audio_recorder_client.py @@ -0,0 +1,881 @@ +log_outgoing_chunks = False +debug_mode = False + +from typing import Iterable, List, Optional, Union +from urllib.parse import urlparse +from datetime import datetime +from websocket import WebSocketApp +from websocket import ABNF +import numpy as np +import subprocess +import threading +import platform +import logging +import struct +import base64 +import wave +import json +import time +import sys +import os + +# Import the AudioInput class +from .audio_input import AudioInput + +DEFAULT_CONTROL_URL = "ws://127.0.0.1:8011" +DEFAULT_DATA_URL = "ws://127.0.0.1:8012" + +INIT_MODEL_TRANSCRIPTION = "tiny" +INIT_MODEL_TRANSCRIPTION_REALTIME = "tiny" +INIT_REALTIME_PROCESSING_PAUSE = 0.2 +INIT_REALTIME_INITIAL_PAUSE = 0.2 +INIT_SILERO_SENSITIVITY = 0.4 +INIT_WEBRTC_SENSITIVITY = 3 +INIT_POST_SPEECH_SILENCE_DURATION = 0.6 +INIT_MIN_LENGTH_OF_RECORDING = 0.5 +INIT_MIN_GAP_BETWEEN_RECORDINGS = 0 +INIT_WAKE_WORDS_SENSITIVITY = 0.6 +INIT_PRE_RECORDING_BUFFER_DURATION = 1.0 +INIT_WAKE_WORD_ACTIVATION_DELAY = 0.0 +INIT_WAKE_WORD_TIMEOUT = 5.0 +INIT_WAKE_WORD_BUFFER_DURATION = 0.1 +ALLOWED_LATENCY_LIMIT = 100 + +BUFFER_SIZE = 512 +SAMPLE_RATE = 16000 + +INIT_HANDLE_BUFFER_OVERFLOW = False +if platform.system() != 'Darwin': + INIT_HANDLE_BUFFER_OVERFLOW = True + +# Define ANSI color codes for terminal output +class bcolors: + HEADER = '\033[95m' # Magenta + OKBLUE = '\033[94m' # Blue + OKCYAN = '\033[96m' # Cyan + OKGREEN = '\033[92m' # Green + WARNING = '\033[93m' # Yellow + FAIL = '\033[91m' # Red + ENDC = '\033[0m' # Reset to default + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + +def format_timestamp_ns(timestamp_ns: int) -> str: + # Split into whole seconds and the nanosecond remainder + seconds = timestamp_ns // 1_000_000_000 + remainder_ns = timestamp_ns % 1_000_000_000 + + # Convert seconds part into a datetime object (local time) + dt = datetime.fromtimestamp(seconds) + + # Format the main time as HH:MM:SS + time_str = dt.strftime("%H:%M:%S") + + # For instance, if you want milliseconds, divide the remainder by 1e6 and format as 3-digit + milliseconds = remainder_ns // 1_000_000 + formatted_timestamp = f"{time_str}.{milliseconds:03d}" + + return formatted_timestamp + +class AudioToTextRecorderClient: + """ + A class responsible for capturing audio from the microphone, detecting + voice activity, and then transcribing the captured audio using the + `faster_whisper` model. + """ + + def __init__(self, + model: str = INIT_MODEL_TRANSCRIPTION, + download_root: str = None, + language: str = "", + compute_type: str = "default", + input_device_index: int = None, + gpu_device_index: Union[int, List[int]] = 0, + device: str = "cuda", + on_recording_start=None, + on_recording_stop=None, + on_transcription_start=None, + ensure_sentence_starting_uppercase=True, + ensure_sentence_ends_with_period=True, + use_microphone=True, + spinner=True, + level=logging.WARNING, + batch_size: int = 16, + + # Realtime transcription parameters + enable_realtime_transcription=False, + use_main_model_for_realtime=False, + realtime_model_type=INIT_MODEL_TRANSCRIPTION_REALTIME, + realtime_processing_pause=INIT_REALTIME_PROCESSING_PAUSE, + init_realtime_after_seconds=INIT_REALTIME_INITIAL_PAUSE, + on_realtime_transcription_update=None, + on_realtime_transcription_stabilized=None, + realtime_batch_size: int = 16, + + # Voice activation parameters + silero_sensitivity: float = INIT_SILERO_SENSITIVITY, + silero_use_onnx: bool = False, + silero_deactivity_detection: bool = False, + webrtc_sensitivity: int = INIT_WEBRTC_SENSITIVITY, + post_speech_silence_duration: float = ( + INIT_POST_SPEECH_SILENCE_DURATION + ), + min_length_of_recording: float = ( + INIT_MIN_LENGTH_OF_RECORDING + ), + min_gap_between_recordings: float = ( + INIT_MIN_GAP_BETWEEN_RECORDINGS + ), + pre_recording_buffer_duration: float = ( + INIT_PRE_RECORDING_BUFFER_DURATION + ), + on_vad_start=None, + on_vad_stop=None, + on_vad_detect_start=None, + on_vad_detect_stop=None, + on_turn_detection_start=None, + on_turn_detection_stop=None, + + # Wake word parameters + wakeword_backend: str = "pvporcupine", + openwakeword_model_paths: str = None, + openwakeword_inference_framework: str = "onnx", + wake_words: str = "", + wake_words_sensitivity: float = INIT_WAKE_WORDS_SENSITIVITY, + wake_word_activation_delay: float = ( + INIT_WAKE_WORD_ACTIVATION_DELAY + ), + wake_word_timeout: float = INIT_WAKE_WORD_TIMEOUT, + wake_word_buffer_duration: float = INIT_WAKE_WORD_BUFFER_DURATION, + on_wakeword_detected=None, + on_wakeword_timeout=None, + on_wakeword_detection_start=None, + on_wakeword_detection_end=None, + on_recorded_chunk=None, + debug_mode=False, + handle_buffer_overflow: bool = INIT_HANDLE_BUFFER_OVERFLOW, + beam_size: int = 5, + beam_size_realtime: int = 3, + buffer_size: int = BUFFER_SIZE, + sample_rate: int = SAMPLE_RATE, + initial_prompt: Optional[Union[str, Iterable[int]]] = None, + initial_prompt_realtime: Optional[Union[str, Iterable[int]]] = None, + suppress_tokens: Optional[List[int]] = [-1], + print_transcription_time: bool = False, + early_transcription_on_silence: int = 0, + allowed_latency_limit: int = ALLOWED_LATENCY_LIMIT, + no_log_file: bool = False, + use_extended_logging: bool = False, + + # Server urls + control_url: str = DEFAULT_CONTROL_URL, + data_url: str = DEFAULT_DATA_URL, + autostart_server: bool = True, + output_wav_file: str = None, + faster_whisper_vad_filter: bool = False, + ): + + # Set instance variables from constructor parameters + self.model = model + self.language = language + self.compute_type = compute_type + self.input_device_index = input_device_index + self.gpu_device_index = gpu_device_index + self.device = device + self.on_recording_start = on_recording_start + self.on_recording_stop = on_recording_stop + self.on_transcription_start = on_transcription_start + self.ensure_sentence_starting_uppercase = ensure_sentence_starting_uppercase + self.ensure_sentence_ends_with_period = ensure_sentence_ends_with_period + self.use_microphone = use_microphone + self.spinner = spinner + self.level = level + self.batch_size = batch_size + self.init_realtime_after_seconds = init_realtime_after_seconds + self.realtime_batch_size = realtime_batch_size + + # Real-time transcription parameters + self.enable_realtime_transcription = enable_realtime_transcription + self.use_main_model_for_realtime = use_main_model_for_realtime + self.download_root = download_root + self.realtime_model_type = realtime_model_type + self.realtime_processing_pause = realtime_processing_pause + self.on_realtime_transcription_update = on_realtime_transcription_update + self.on_realtime_transcription_stabilized = on_realtime_transcription_stabilized + + # Voice activation parameters + self.silero_sensitivity = silero_sensitivity + self.silero_use_onnx = silero_use_onnx + self.silero_deactivity_detection = silero_deactivity_detection + self.webrtc_sensitivity = webrtc_sensitivity + self.post_speech_silence_duration = post_speech_silence_duration + self.min_length_of_recording = min_length_of_recording + self.min_gap_between_recordings = min_gap_between_recordings + self.pre_recording_buffer_duration = pre_recording_buffer_duration + + self.on_vad_start = on_vad_start + self.on_vad_stop = on_vad_stop + self.on_vad_detect_start = on_vad_detect_start + self.on_vad_detect_stop = on_vad_detect_stop + self.on_turn_detection_start = on_turn_detection_start + self.on_turn_detection_stop = on_turn_detection_stop + + # Wake word parameters + self.wakeword_backend = wakeword_backend + self.openwakeword_model_paths = openwakeword_model_paths + self.openwakeword_inference_framework = openwakeword_inference_framework + self.wake_words = wake_words + self.wake_words_sensitivity = wake_words_sensitivity + self.wake_word_activation_delay = wake_word_activation_delay + self.wake_word_timeout = wake_word_timeout + self.wake_word_buffer_duration = wake_word_buffer_duration + self.on_wakeword_detected = on_wakeword_detected + self.on_wakeword_timeout = on_wakeword_timeout + self.on_wakeword_detection_start = on_wakeword_detection_start + self.on_wakeword_detection_end = on_wakeword_detection_end + self.on_recorded_chunk = on_recorded_chunk + self.debug_mode = debug_mode + self.handle_buffer_overflow = handle_buffer_overflow + self.beam_size = beam_size + self.beam_size_realtime = beam_size_realtime + self.buffer_size = buffer_size + self.sample_rate = sample_rate + self.initial_prompt = initial_prompt + self.initial_prompt_realtime = initial_prompt_realtime + self.suppress_tokens = suppress_tokens + self.print_transcription_time = print_transcription_time + self.early_transcription_on_silence = early_transcription_on_silence + self.allowed_latency_limit = allowed_latency_limit + self.no_log_file = no_log_file + self.use_extended_logging = use_extended_logging + self.faster_whisper_vad_filter = faster_whisper_vad_filter + + # Server URLs + self.control_url = control_url + self.data_url = data_url + self.autostart_server = autostart_server + self.output_wav_file = output_wav_file + + # Instance variables + self.muted = False + self.recording_thread = None + self.is_running = True + self.connection_established = threading.Event() + self.recording_start = threading.Event() + self.final_text_ready = threading.Event() + self.realtime_text = "" + self.final_text = "" + self._recording = False + self.server_already_running = False + self.wav_file = None + + self.request_counter = 0 + self.pending_requests = {} # Map from request_id to threading.Event and value + + if self.debug_mode: + print("Checking STT server") + if not self.connect(): + print("Failed to connect to the server.", file=sys.stderr) + else: + if self.debug_mode: + print("STT server is running and connected.") + + if self.use_microphone: + self.start_recording() + + + if self.server_already_running: + if not self.connection_established.wait(timeout=10): + print("Server connection not established within 10 seconds.") + else: + self.set_parameter("language", self.language) + print(f"Language set to {self.language}") + self.set_parameter("wake_word_activation_delay", self.wake_word_activation_delay) + print(f"Wake word activation delay set to {self.wake_word_activation_delay}") + + def text(self, on_transcription_finished=None): + self.realtime_text = "" + self.submitted_realtime_text = "" + self.final_text = "" + self.final_text_ready.clear() + + self.recording_start.set() + + try: + total_wait_time = 0 + wait_interval = 0.02 # Wait in small intervals, e.g., 100ms + max_wait_time = 60 # Timeout after 60 seconds + + while total_wait_time < max_wait_time and self.is_running and self._recording: + if self.final_text_ready.wait(timeout=wait_interval): + break # Break if transcription is ready + + if not self.is_running or not self._recording: + break + + total_wait_time += wait_interval + + # Check if a manual interrupt has occurred + if total_wait_time >= max_wait_time: + if self.debug_mode: + print("Timeout while waiting for text from the server.") + self.recording_start.clear() + if on_transcription_finished: + threading.Thread(target=on_transcription_finished, args=("",)).start() + return "" + + self.recording_start.clear() + + if not self.is_running or not self._recording: + return "" + + if on_transcription_finished: + threading.Thread(target=on_transcription_finished, args=(self.final_text,)).start() + + return self.final_text + + except KeyboardInterrupt: + if self.debug_mode: + print("KeyboardInterrupt in text(), exiting...") + raise KeyboardInterrupt + + except Exception as e: + print(f"Error in AudioToTextRecorderClient.text(): {e}") + return "" + + def feed_audio(self, chunk, audio_meta_data, original_sample_rate=16000): + # Start with the base metadata + metadata = {"sampleRate": original_sample_rate} + + # Merge additional metadata if provided + if audio_meta_data: + server_sent_to_stt_ns = time.time_ns() + audio_meta_data["server_sent_to_stt"] = server_sent_to_stt_ns + metadata["server_sent_to_stt_formatted"] = format_timestamp_ns(server_sent_to_stt_ns) + + metadata.update(audio_meta_data) + + # Convert metadata to JSON and prepare the message + metadata_json = json.dumps(metadata) + metadata_length = len(metadata_json) + message = struct.pack(' %s", self.name, data) + self._pipe.send(data) + request["result_queue"].put(None) + + elif request["type"] == "RECV": + logger.debug("[%s] Worker: receiving...", self.name) + data = self._pipe.recv() + request["result_queue"].put(data) + + elif request["type"] == "POLL": + timeout = request.get("timeout", 0.0) + logger.debug("[%s] Worker: poll() with timeout: %s", self.name, timeout) + result = self._pipe.poll(timeout) + request["result_queue"].put(result) + + except (EOFError, BrokenPipeError, OSError) as e: + # When the other end has closed or an error occurs, + # log and notify the waiting thread. + logger.debug("[%s] Worker: pipe closed or error occurred (%s). Shutting down.", self.name, e) + request["result_queue"].put(None) + break + + except Exception as e: + logger.exception("[%s] Worker: unexpected error.", self.name) + request["result_queue"].put(e) + break + + logger.debug("[%s] Worker: stopping.", self.name) + try: + self._pipe.close() + except Exception as e: + logger.debug("[%s] Worker: error during pipe close: %s", self.name, e) + + def send(self, data): + """ + Synchronously asks the worker thread to perform .send(). + """ + if self._closed: + logger.debug("[%s] send() called but pipe is already closed", self.name) + return + logger.debug("[%s] send() requested with: %s", self.name, data) + result_queue = queue.Queue() + request = { + "type": "SEND", + "data": data, + "result_queue": result_queue + } + self._request_queue.put(request) + result_queue.get() # Wait until sending completes. + logger.debug("[%s] send() completed", self.name) + + def recv(self): + """ + Synchronously asks the worker to perform .recv() and returns the data. + """ + if self._closed: + logger.debug("[%s] recv() called but pipe is already closed", self.name) + return None + logger.debug("[%s] recv() requested", self.name) + result_queue = queue.Queue() + request = { + "type": "RECV", + "result_queue": result_queue + } + self._request_queue.put(request) + data = result_queue.get() + + # Log a preview for huge byte blobs. + if isinstance(data, tuple) and len(data) == 2 and isinstance(data[1], bytes): + data_preview = (data[0], f"<{len(data[1])} bytes>") + else: + data_preview = data + logger.debug("[%s] recv() returning => %s", self.name, data_preview) + return data + + def poll(self, timeout=0.0): + """ + Synchronously checks whether data is available. + Returns True if data is ready, or False otherwise. + """ + if self._closed: + return False + logger.debug("[%s] poll() requested with timeout: %s", self.name, timeout) + result_queue = queue.Queue() + request = { + "type": "POLL", + "timeout": timeout, + "result_queue": result_queue + } + self._request_queue.put(request) + try: + # Use a slightly longer timeout to give the worker a chance. + result = result_queue.get(timeout=timeout + 0.1) + except queue.Empty: + result = False + logger.debug("[%s] poll() returning => %s", self.name, result) + return result + + def close(self): + """ + Closes the pipe and stops the worker thread. The _closed flag makes + sure no further operations are attempted. + """ + if self._closed: + return + logger.debug("[%s] close() called", self.name) + self._closed = True + stop_request = {"type": "CLOSE", "result_queue": queue.Queue()} + self._request_queue.put(stop_request) + self._stop_event.set() + self._worker_thread.join() + logger.debug("[%s] closed", self.name) + + +def SafePipe(debug=False): + """ + Returns a pair: (thread-safe parent pipe, raw child pipe). + """ + parent_synthesize_pipe, child_synthesize_pipe = mp.Pipe() + parent_pipe = ParentPipe(parent_synthesize_pipe) + return parent_pipe, child_synthesize_pipe + + +def child_process_code(child_end): + """ + Example child process code that receives messages, logs them, + sends acknowledgements, and then closes. + """ + for i in range(3): + msg = child_end.recv() + logger.debug("[Child] got: %s", msg) + child_end.send(f"ACK: {msg}") + child_end.close() + + +if __name__ == "__main__": + parent_pipe, child_pipe = SafePipe() + + # Create child process with the child_process_code function. + p = mp.Process(target=child_process_code, args=(child_pipe,)) + p.start() + + # Event to signal sender threads to stop if needed. + stop_polling_event = threading.Event() + + def sender_thread(n): + try: + parent_pipe.send(f"hello_from_thread_{n}") + except Exception as e: + logger.debug("[sender_thread_%s] send exception: %s", n, e) + return + + # Use a poll loop with error handling. + for _ in range(10): + try: + if parent_pipe.poll(0.1): + reply = parent_pipe.recv() + logger.debug("[sender_thread_%s] got: %s", n, reply) + break + else: + logger.debug("[sender_thread_%s] no data yet...", n) + except (OSError, EOFError, BrokenPipeError) as e: + logger.debug("[sender_thread_%s] poll/recv exception: %s. Exiting thread.", n, e) + break + + # Allow exit if a shutdown is signaled. + if stop_polling_event.is_set(): + logger.debug("[sender_thread_%s] stop event set. Exiting thread.", n) + break + + threads = [] + for i in range(3): + t = threading.Thread(target=sender_thread, args=(i,)) + t.start() + threads.append(t) + + for t in threads: + t.join() + + # Signal shutdown to any polling threads, then close the pipe. + stop_polling_event.set() + parent_pipe.close() + p.join() diff --git a/minimal_server/RealtimeSTT/server.py b/minimal_server/RealtimeSTT/server.py new file mode 100644 index 00000000..516e10c4 --- /dev/null +++ b/minimal_server/RealtimeSTT/server.py @@ -0,0 +1,23 @@ +from fastapi import FastAPI, WebSocket +from RealtimeSTT.audio_recorder import AudioToTextRecorder +import numpy as np + +app = FastAPI() + +recorder = AudioToTextRecorder( + model="tiny", + device="cuda", + compute_type="float16", + use_microphone=False, +) + +@app.websocket("/ws/transcribe") +async def websocket_endpoint(websocket: WebSocket): + await websocket.accept() + while True: + data = await websocket.receive_bytes() + # Convierte los bytes a numpy array (ajusta según tu formato) + audio = np.frombuffer(data, dtype=np.float32) + recorder.feed_audio(audio) + text = recorder.text() + await websocket.send_text(text) \ No newline at end of file diff --git a/minimal_server/RealtimeSTT/warmup_audio.wav b/minimal_server/RealtimeSTT/warmup_audio.wav new file mode 100644 index 00000000..31458f5e Binary files /dev/null and b/minimal_server/RealtimeSTT/warmup_audio.wav differ diff --git a/minimal_server/__pycache__/install_packages.cpython-311.pyc b/minimal_server/__pycache__/install_packages.cpython-311.pyc new file mode 100644 index 00000000..5402f01e Binary files /dev/null and b/minimal_server/__pycache__/install_packages.cpython-311.pyc differ diff --git a/minimal_server/__pycache__/stt_server.cpython-311.pyc b/minimal_server/__pycache__/stt_server.cpython-311.pyc new file mode 100644 index 00000000..a6359bdc Binary files /dev/null and b/minimal_server/__pycache__/stt_server.cpython-311.pyc differ diff --git a/minimal_server/server/__pycache__/install_packages.cpython-311.pyc b/minimal_server/server/__pycache__/install_packages.cpython-311.pyc new file mode 100644 index 00000000..23155347 Binary files /dev/null and b/minimal_server/server/__pycache__/install_packages.cpython-311.pyc differ diff --git a/minimal_server/server/__pycache__/stt_server.cpython-311.pyc b/minimal_server/server/__pycache__/stt_server.cpython-311.pyc new file mode 100644 index 00000000..af756425 Binary files /dev/null and b/minimal_server/server/__pycache__/stt_server.cpython-311.pyc differ diff --git a/minimal_server/server/install_packages.py b/minimal_server/server/install_packages.py new file mode 100644 index 00000000..9b5a9b16 --- /dev/null +++ b/minimal_server/server/install_packages.py @@ -0,0 +1,55 @@ +import subprocess +import sys +import importlib + +def check_and_install_packages(packages): + """ + Checks if the specified packages are installed, and if not, prompts the user + to install them. + + Parameters: + - packages: A list of dictionaries, each containing: + - 'module_name': The module or package name to import. + - 'attribute': (Optional) The attribute or class to check within the module. + - 'install_name': The name used in the pip install command. + - 'version': (Optional) Version constraint for the package. + """ + for package in packages: + module_name = package['module_name'] + attribute = package.get('attribute') + install_name = package.get('install_name', module_name) + version = package.get('version', '') + + try: + # Attempt to import the module + module = importlib.import_module(module_name) + # If an attribute is specified, check if it exists + if attribute: + getattr(module, attribute) + except (ImportError, AttributeError): + user_input = input( + f"This program requires '{module_name}'" + f"{'' if not attribute else ' with attribute ' + attribute}, which is not installed or missing.\n" + f"Do you want to install '{install_name}' now? (y/n): " + ) + if user_input.strip().lower() == 'y': + try: + # Build the pip install command + install_command = [sys.executable, "-m", "pip", "install"] + if version: + install_command.append(f"{install_name}{version}") + else: + install_command.append(install_name) + + subprocess.check_call(install_command) + # Try to import again after installation + module = importlib.import_module(module_name) + if attribute: + getattr(module, attribute) + print(f"Successfully installed '{install_name}'.") + except Exception as e: + print(f"An error occurred while installing '{install_name}': {e}") + sys.exit(1) + else: + print(f"The program requires '{install_name}' to run. Exiting...") + sys.exit(1) diff --git a/minimal_server/server/stt_server.py b/minimal_server/server/stt_server.py new file mode 100644 index 00000000..880ccac4 --- /dev/null +++ b/minimal_server/server/stt_server.py @@ -0,0 +1,913 @@ +""" +Speech-to-Text (STT) Server with Real-Time Transcription and WebSocket Interface + +This server provides real-time speech-to-text (STT) transcription using the RealtimeSTT library. It allows clients to connect via WebSocket to send audio data and receive real-time transcription updates. The server supports configurable audio recording parameters, voice activity detection (VAD), and wake word detection. It is designed to handle continuous transcription as well as post-recording processing, enabling real-time feedback with the option to improve final transcription quality after the complete sentence is recognized. + +### Features: +- Real-time transcription using pre-configured or user-defined STT models. +- WebSocket-based communication for control and data handling. +- Flexible recording and transcription options, including configurable pauses for sentence detection. +- Supports Silero and WebRTC VAD for robust voice activity detection. + +### Starting the Server: +You can start the server using the command-line interface (CLI) command `stt-server`, passing the desired configuration options. + +```bash +stt-server [OPTIONS] +``` + +### Available Parameters: + - `-m, --model`: Model path or size; default 'large-v2'. + - `-r, --rt-model, --realtime_model_type`: Real-time model size; default 'tiny'. + - `-l, --lang, --language`: Language code for transcription; default 'es'. + - `-i, --input-device, --input_device_index`: Audio input device index; default 1. + - `-c, --control, --control_port`: WebSocket control port; default 8011. + - `-d, --data, --data_port`: WebSocket data port; default 8012. + - `-w, --wake_words`: Wake word(s) to trigger listening; default "". + - `-D, --debug`: Enable debug logging. + - `-W, --write`: Save audio to WAV file. + - `-s, --silence_timing`: Enable dynamic silence duration for sentence detection; default True. + - `-b, --batch, --batch_size`: Batch size for inference; default 16. + - `--root, --download_root`: Specifies the root path were the Whisper models are downloaded to. + - `--silero_sensitivity`: Silero VAD sensitivity (0-1); default 0.05. + - `--silero_use_onnx`: Use Silero ONNX model; default False. + - `--webrtc_sensitivity`: WebRTC VAD sensitivity (0-3); default 3. + - `--min_length_of_recording`: Minimum recording duration in seconds; default 1.1. + - `--min_gap_between_recordings`: Min time between recordings in seconds; default 0. + - `--enable_realtime_transcription`: Enable real-time transcription; default True. + - `--realtime_processing_pause`: Pause between audio chunk processing; default 0.02. + - `--silero_deactivity_detection`: Use Silero for end-of-speech detection; default True. + - `--early_transcription_on_silence`: Start transcription after silence in seconds; default 0.2. + - `--beam_size`: Beam size for main model; default 5. + - `--beam_size_realtime`: Beam size for real-time model; default 3. + - `--init_realtime_after_seconds`: Initial waiting time for realtime transcription; default 0.2. + - `--realtime_batch_size`: Batch size for the real-time transcription model; default 16. + - `--initial_prompt`: Initial main transcription guidance prompt. + - `--initial_prompt_realtime`: Initial realtime transcription guidance prompt. + - `--end_of_sentence_detection_pause`: Silence duration for sentence end detection; default 0.5. + - `--unknown_sentence_detection_pause`: Pause duration for incomplete sentence detection; default 0.5. + - `--mid_sentence_detection_pause`: Pause for mid-sentence break; default 0.5. + - `--wake_words_sensitivity`: Wake word detection sensitivity (0-1); default 0.5. + - `--wake_word_timeout`: Wake word timeout in seconds; default 5.0. + - `--wake_word_activation_delay`: Delay before wake word activation; default 20. + - `--wakeword_backend`: Backend for wake word detection; default 'none'. + - `--openwakeword_model_paths`: Paths to OpenWakeWord models. + - `--openwakeword_inference_framework`: OpenWakeWord inference framework; default 'tensorflow'. + - `--wake_word_buffer_duration`: Wake word buffer duration in seconds; default 1.0. + - `--use_main_model_for_realtime`: Use main model for real-time transcription. + - `--use_extended_logging`: Enable extensive log messages. + - `--logchunks`: Log incoming audio chunks. + - `--compute_type`: Type of computation to use. + - `--input_device_index`: Index of the audio input device. + - `--gpu_device_index`: Index of the GPU device. + - `--device`: Device to use for computation. + - `--handle_buffer_overflow`: Handle buffer overflow during transcription. + - `--suppress_tokens`: Suppress tokens during transcription. + - `--allowed_latency_limit`: Allowed latency limit for real-time transcription. + - `--faster_whisper_vad_filter`: Enable VAD filter for Faster Whisper; default False. + + +### WebSocket Interface: +The server supports two WebSocket connections: +1. **Control WebSocket**: Used to send and receive commands, such as setting parameters or calling recorder methods. +2. **Data WebSocket**: Used to send audio data for transcription and receive real-time transcription updates. + +The server will broadcast real-time transcription updates to all connected clients on the data WebSocket. +""" + +from .install_packages import check_and_install_packages +from difflib import SequenceMatcher +from collections import deque +from datetime import datetime +import logging +import asyncio +import pyaudio +import base64 +import sys + + +debug_logging = False +extended_logging = False +send_recorded_chunk = False +log_incoming_chunks = False +silence_timing = False +writechunks = False +wav_file = None + +hard_break_even_on_background_noise = 3.0 +hard_break_even_on_background_noise_min_texts = 3 +hard_break_even_on_background_noise_min_similarity = 0.99 +hard_break_even_on_background_noise_min_chars = 15 + + +text_time_deque = deque() +loglevel = logging.WARNING + +FORMAT = pyaudio.paInt16 +CHANNELS = 1 + + +if sys.platform == 'win32': + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + + +check_and_install_packages([ + { + 'module_name': 'RealtimeSTT', # Import module + 'attribute': 'AudioToTextRecorder', # Specific class to check + 'install_name': 'RealtimeSTT', # Package name for pip install + }, + { + 'module_name': 'websockets', # Import module + 'install_name': 'websockets', # Package name for pip install + }, + { + 'module_name': 'numpy', # Import module + 'install_name': 'numpy', # Package name for pip install + }, + { + 'module_name': 'scipy.signal', # Submodule of scipy + 'attribute': 'resample', # Specific function to check + 'install_name': 'scipy', # Package name for pip install + } +]) + +# Define ANSI color codes for terminal output +class bcolors: + HEADER = '\033[95m' # Magenta + OKBLUE = '\033[94m' # Blue + OKCYAN = '\033[96m' # Cyan + OKGREEN = '\033[92m' # Green + WARNING = '\033[93m' # Yellow + FAIL = '\033[91m' # Red + ENDC = '\033[0m' # Reset to default + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + +print(f"{bcolors.BOLD}{bcolors.OKCYAN}Starting server, please wait...{bcolors.ENDC}") + +# Initialize colorama +from colorama import init, Fore, Style +init() + +from RealtimeSTT import AudioToTextRecorder +from scipy.signal import resample +import numpy as np +import websockets +import threading +import logging +import wave +import json +import time + +global_args = None +recorder = None +recorder_config = {} +recorder_ready = threading.Event() +recorder_thread = None +stop_recorder = False +prev_text = "" + +# Define allowed methods and parameters for security +allowed_methods = [ + 'set_microphone', + 'abort', + 'stop', + 'clear_audio_queue', + 'wakeup', + 'shutdown', + 'text', +] +allowed_parameters = [ + 'language', + 'silero_sensitivity', + 'wake_word_activation_delay', + 'post_speech_silence_duration', + 'listen_start', + 'recording_stop_time', + 'last_transcription_bytes', + 'last_transcription_bytes_b64', + 'speech_end_silence_start', + 'is_recording', + 'use_wake_words', +] + +# Queues and connections for control and data +control_connections = set() +data_connections = set() +control_queue = asyncio.Queue() +audio_queue = asyncio.Queue() + +def preprocess_text(text): + # Remove leading whitespaces + text = text.lstrip() + + # Remove starting ellipses if present + if text.startswith("..."): + text = text[3:] + + if text.endswith("...'."): + text = text[:-1] + + if text.endswith("...'"): + text = text[:-1] + + # Remove any leading whitespaces again after ellipses removal + text = text.lstrip() + + # Uppercase the first letter + if text: + text = text[0].upper() + text[1:] + + return text + +def debug_print(message): + if debug_logging: + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + thread_name = threading.current_thread().name + print(f"{Fore.CYAN}[DEBUG][{timestamp}][{thread_name}] {message}{Style.RESET_ALL}", file=sys.stderr) + +def format_timestamp_ns(timestamp_ns: int) -> str: + # Split into whole seconds and the nanosecond remainder + seconds = timestamp_ns // 1_000_000_000 + remainder_ns = timestamp_ns % 1_000_000_000 + + # Convert seconds part into a datetime object (local time) + dt = datetime.fromtimestamp(seconds) + + # Format the main time as HH:MM:SS + time_str = dt.strftime("%H:%M:%S") + + # For instance, if you want milliseconds, divide the remainder by 1e6 and format as 3-digit + milliseconds = remainder_ns // 1_000_000 + formatted_timestamp = f"{time_str}.{milliseconds:03d}" + + return formatted_timestamp + +def text_detected(text, loop): + global prev_text + + text = preprocess_text(text) + + # if silence_timing: + # def ends_with_ellipsis(text: str): + # if text.endswith("..."): + # return True + # if len(text) > 1 and text[:-1].endswith("..."): + # return True + # return False + + # def sentence_end(text: str): + # sentence_end_marks = ['.', '!', '?', '。'] + # if text and text[-1] in sentence_end_marks: + # return True + # return False + + + # if ends_with_ellipsis(text): + # recorder.post_speech_silence_duration = global_args.mid_sentence_detection_pause + # elif sentence_end(text) and sentence_end(prev_text) and not ends_with_ellipsis(prev_text): + # recorder.post_speech_silence_duration = global_args.end_of_sentence_detection_pause + # else: + # recorder.post_speech_silence_duration = global_args.unknown_sentence_detection_pause + + + # # Append the new text with its timestamp + # current_time = time.time() + # text_time_deque.append((current_time, text)) + + # # Remove texts older than hard_break_even_on_background_noise seconds + # while text_time_deque and text_time_deque[0][0] < current_time - hard_break_even_on_background_noise: + # text_time_deque.popleft() + + # # Check if at least hard_break_even_on_background_noise_min_texts texts have arrived within the last hard_break_even_on_background_noise seconds + # if len(text_time_deque) >= hard_break_even_on_background_noise_min_texts: + # texts = [t[1] for t in text_time_deque] + # first_text = texts[0] + # last_text = texts[-1] + + # # Compute the similarity ratio between the first and last texts + # similarity = SequenceMatcher(None, first_text, last_text).ratio() + + # if similarity > hard_break_even_on_background_noise_min_similarity and len(first_text) > hard_break_even_on_background_noise_min_chars: + # recorder.stop() + # recorder.clear_audio_queue() + # prev_text = "" + + prev_text = text + + # Put the message in the audio queue to be sent to clients + message = json.dumps({ + 'type': 'realtime', + 'text': text + }) + asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + + # Get current timestamp in HH:MM:SS.nnn format + timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3] + + if extended_logging: + print(f" [{timestamp}] Realtime text: {bcolors.OKCYAN}{text}{bcolors.ENDC}\n", flush=True, end="") + else: + print(f"\r[{timestamp}] {bcolors.OKCYAN}{text}{bcolors.ENDC}", flush=True, end='') + +def on_recording_start(loop): + message = json.dumps({ + 'type': 'recording_start' + }) + asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + +def on_recording_stop(loop): + message = json.dumps({ + 'type': 'recording_stop' + }) + asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + +def on_vad_detect_start(loop): + message = json.dumps({ + 'type': 'vad_detect_start' + }) + asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + +def on_vad_detect_stop(loop): + message = json.dumps({ + 'type': 'vad_detect_stop' + }) + asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + +def on_wakeword_detected(loop): + message = json.dumps({ + 'type': 'wakeword_detected' + }) + asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + +def on_wakeword_detection_start(loop): + message = json.dumps({ + 'type': 'wakeword_detection_start' + }) + asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + +def on_wakeword_detection_end(loop): + message = json.dumps({ + 'type': 'wakeword_detection_end' + }) + asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + +def on_transcription_start(_audio_bytes, loop): + bytes_b64 = base64.b64encode(_audio_bytes.tobytes()).decode('utf-8') + message = json.dumps({ + 'type': 'transcription_start', + 'audio_bytes_base64': bytes_b64 + }) + asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + +def on_turn_detection_start(loop): + print("&&& stt_server on_turn_detection_start") + message = json.dumps({ + 'type': 'start_turn_detection' + }) + asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + +def on_turn_detection_stop(loop): + # print("&&& stt_server on_turn_detection_stop") + # message = json.dumps({ + # 'type': 'stop_turn_detection' + # }) + # asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + pass + + +# def on_realtime_transcription_update(text, loop): +# # Send real-time transcription updates to the client +# text = preprocess_text(text) +# message = json.dumps({ +# 'type': 'realtime_update', +# 'text': text +# }) +# asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + +# def on_recorded_chunk(chunk, loop): +# if send_recorded_chunk: +# bytes_b64 = base64.b64encode(chunk.tobytes()).decode('utf-8') +# message = json.dumps({ +# 'type': 'recorded_chunk', +# 'bytes': bytes_b64 +# }) +# asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + +# Define the server's arguments +def parse_arguments(): + global debug_logging, extended_logging, loglevel, writechunks, log_incoming_chunks, dynamic_silence_timing + + import argparse + parser = argparse.ArgumentParser(description='Start the Speech-to-Text (STT) server with various configuration options.') + + parser.add_argument('-m', '--model', type=str, default='large-v2', + help='Path to the STT model or model size. Options include: tiny, tiny.en, base, base.en, small, small.en, medium, medium.en, large-v1, large-v2, or any huggingface CTranslate2 STT model such as deepdml/faster-whisper-large-v3-turbo-ct2. Default is large-v2.') + + parser.add_argument('-r', '--rt-model', '--realtime_model_type', type=str, default='tiny', + help='Model size for real-time transcription. Options same as --model. This is used only if real-time transcription is enabled (enable_realtime_transcription). Default is tiny.en.') + + parser.add_argument('-l', '--lang', '--language', type=str, default='es', + help='Language code for the STT model to transcribe in a specific language. Leave this empty for auto-detection based on input audio. Default is en. List of supported language codes: https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L11-L110') + + parser.add_argument('-i', '--input-device', '--input-device-index', type=int, default=1, + help='Index of the audio input device to use. Use this option to specify a particular microphone or audio input device based on your system. Default is 1.') + + parser.add_argument('-c', '--control', '--control_port', type=int, default=8011, + help='The port number used for the control WebSocket connection. Control connections are used to send and receive commands to the server. Default is port 8011.') + + parser.add_argument('-d', '--data', '--data_port', type=int, default=8012, + help='The port number used for the data WebSocket connection. Data connections are used to send audio data and receive transcription updates in real time. Default is port 8012.') + + parser.add_argument('-w', '--wake_words', type=str, default="", + help='Specify the wake word(s) that will trigger the server to start listening. For example, setting this to "Jarvis" will make the system start transcribing when it detects the wake word "Jarvis". Default is "Jarvis".') + + parser.add_argument('-D', '--debug', action='store_true', help='Enable debug logging for detailed server operations') + + parser.add_argument('--debug_websockets', action='store_true', help='Enable debug logging for detailed server websocket operations') + + parser.add_argument('-W', '--write', metavar='FILE', help='Save received audio to a WAV file') + + parser.add_argument('-b', '--batch', '--batch_size', type=int, default=16, help='Batch size for inference. This parameter controls the number of audio chunks processed in parallel during transcription. Default is 16.') + + parser.add_argument('--root', '--download_root', type=str,default=None, help='Specifies the root path where the Whisper models are downloaded to. Default is None.') + + parser.add_argument('-s', '--silence_timing', action='store_true', default=True, + help='Enable dynamic adjustment of silence duration for sentence detection. Adjusts post-speech silence duration based on detected sentence structure and punctuation. Default is False.') + + parser.add_argument('--init_realtime_after_seconds', type=float, default=0.2, + help='The initial waiting time in seconds before real-time transcription starts. This delay helps prevent false positives at the beginning of a session. Default is 0.2 seconds.') + + parser.add_argument('--realtime_batch_size', type=int, default=16, + help='Batch size for the real-time transcription model. This parameter controls the number of audio chunks processed in parallel during real-time transcription. Default is 16.') + + parser.add_argument('--initial_prompt_realtime', type=str, default="", help='Initial prompt that guides the real-time transcription model to produce transcriptions in a particular style or format.') + + parser.add_argument('--silero_sensitivity', type=float, default=0.05, + help='Sensitivity level for Silero Voice Activity Detection (VAD), with a range from 0 to 1. Lower values make the model less sensitive, useful for noisy environments. Default is 0.05.') + + parser.add_argument('--silero_use_onnx', action='store_true', default=False, + help='Enable ONNX version of Silero model for faster performance with lower resource usage. Default is False.') + + parser.add_argument('--webrtc_sensitivity', type=int, default=3, + help='Sensitivity level for WebRTC Voice Activity Detection (VAD), with a range from 0 to 3. Higher values make the model less sensitive, useful for cleaner environments. Default is 3.') + + parser.add_argument('--min_length_of_recording', type=float, default=1.1, + help='Minimum duration of valid recordings in seconds. This prevents very short recordings from being processed, which could be caused by noise or accidental sounds. Default is 1.1 seconds.') + + parser.add_argument('--min_gap_between_recordings', type=float, default=0, + help='Minimum time (in seconds) between consecutive recordings. Setting this helps avoid overlapping recordings when there’s a brief silence between them. Default is 0 seconds.') + + parser.add_argument('--enable_realtime_transcription', action='store_true', default=True, + help='Enable continuous real-time transcription of audio as it is received. When enabled, transcriptions are sent in near real-time. Default is True.') + + parser.add_argument('--realtime_processing_pause', type=float, default=0.02, + help='Time interval (in seconds) between processing audio chunks for real-time transcription. Lower values increase responsiveness but may put more load on the CPU. Default is 0.02 seconds.') + + parser.add_argument('--silero_deactivity_detection', action='store_true', default=True, + help='Use the Silero model for end-of-speech detection. This option can provide more robust silence detection in noisy environments, though it consumes more GPU resources. Default is True.') + + parser.add_argument('--early_transcription_on_silence', type=float, default=0.2, + help='Start transcription after the specified seconds of silence. This is useful when you want to trigger transcription mid-speech when there is a brief pause. Should be lower than post_speech_silence_duration. Set to 0 to disable. Default is 0.2 seconds.') + + parser.add_argument('--beam_size', type=int, default=5, + help='Beam size for the main transcription model. Larger values may improve transcription accuracy but increase the processing time. Default is 5.') + + parser.add_argument('--beam_size_realtime', type=int, default=3, + help='Beam size for the real-time transcription model. A smaller beam size allows for faster real-time processing but may reduce accuracy. Default is 3.') + + parser.add_argument('--initial_prompt', type=str, + default="Incomplete thoughts should end with '...'. Examples of complete thoughts: 'The sky is blue.' 'She walked home.' Examples of incomplete thoughts: 'When the sky...' 'Because he...'", + help='Initial prompt that guides the transcription model to produce transcriptions in a particular style or format. The default provides instructions for handling sentence completions and ellipsis usage.') + + parser.add_argument('--end_of_sentence_detection_pause', type=float, default=5.0, + help='The duration of silence (in seconds) that the model should interpret as the end of a sentence. This helps the system detect when to finalize the transcription of a sentence. Default is 0.45 seconds.') + + parser.add_argument('--unknown_sentence_detection_pause', type=float, default=5.0, + help='The duration of pause (in seconds) that the model should interpret as an incomplete or unknown sentence. This is useful for identifying when a sentence is trailing off or unfinished. Default is 0.7 seconds.') + + parser.add_argument('--mid_sentence_detection_pause', type=float, default=5.0, + help='The duration of pause (in seconds) that the model should interpret as a mid-sentence break. Longer pauses can indicate a pause in speech but not necessarily the end of a sentence. Default is 2.0 seconds.') + + parser.add_argument('--wake_words_sensitivity', type=float, default=0.5, + help='Sensitivity level for wake word detection, with a range from 0 (most sensitive) to 1 (least sensitive). Adjust this value based on your environment to ensure reliable wake word detection. Default is 0.5.') + + parser.add_argument('--wake_word_timeout', type=float, default=5.0, + help='Maximum time in seconds that the system will wait for a wake word before timing out. After this timeout, the system stops listening for wake words until reactivated. Default is 5.0 seconds.') + + parser.add_argument('--wake_word_activation_delay', type=float, default=0, + help='The delay in seconds before the wake word detection is activated after the system starts listening. This prevents false positives during the start of a session. Default is 0 seconds.') + + parser.add_argument('--wakeword_backend', type=str, default='none', + help='The backend used for wake word detection. You can specify different backends such as "default" or any custom implementations depending on your setup. Default is "pvporcupine".') + + parser.add_argument('--openwakeword_model_paths', type=str, nargs='*', + help='A list of file paths to OpenWakeWord models. This is useful if you are using OpenWakeWord for wake word detection and need to specify custom models.') + + parser.add_argument('--openwakeword_inference_framework', type=str, default='tensorflow', + help='The inference framework to use for OpenWakeWord models. Supported frameworks could include "tensorflow", "pytorch", etc. Default is "tensorflow".') + + parser.add_argument('--wake_word_buffer_duration', type=float, default=1.0, + help='Duration of the buffer in seconds for wake word detection. This sets how long the system will store the audio before and after detecting the wake word. Default is 1.0 seconds.') + + parser.add_argument('--use_main_model_for_realtime', action='store_true', + help='Enable this option if you want to use the main model for real-time transcription, instead of the smaller, faster real-time model. Using the main model may provide better accuracy but at the cost of higher processing time.') + + parser.add_argument('--use_extended_logging', action='store_true', + help='Writes extensive log messages for the recording worker, that processes the audio chunks.') + + parser.add_argument('--compute_type', type=str, default='default', + help='Type of computation to use. See https://opennmt.net/CTranslate2/quantization.html') + + parser.add_argument('--gpu_device_index', type=int, default=0, + help='Index of the GPU device to use. Default is None.') + + parser.add_argument('--device', type=str, default='cuda', + help='Device for model to use. Can either be "cuda" or "cpu". Default is cuda.') + + parser.add_argument('--handle_buffer_overflow', action='store_true', + help='Handle buffer overflow during transcription. Default is False.') + + parser.add_argument('--suppress_tokens', type=int, default=[-1], nargs='*', help='Suppress tokens during transcription. Default is [-1].') + + parser.add_argument('--allowed_latency_limit', type=int, default=100, + help='Maximal amount of chunks that can be unprocessed in queue before discarding chunks.. Default is 100.') + + parser.add_argument('--faster_whisper_vad_filter', action='store_true', + help='Enable VAD filter for Faster Whisper. Default is False.') + + parser.add_argument('--logchunks', action='store_true', help='Enable logging of incoming audio chunks (periods)') + + # Parse arguments + args = parser.parse_args() + + debug_logging = args.debug + extended_logging = args.use_extended_logging + writechunks = args.write + log_incoming_chunks = args.logchunks + dynamic_silence_timing = args.silence_timing + + + ws_logger = logging.getLogger('websockets') + if args.debug_websockets: + # If app debug is on, let websockets be verbose too + ws_logger.setLevel(logging.DEBUG) + # Ensure it uses the handler configured by basicConfig + ws_logger.propagate = False # Prevent duplicate messages if it also propagates to root + else: + # If app debug is off, silence websockets below WARNING + ws_logger.setLevel(logging.WARNING) + ws_logger.propagate = True # Allow WARNING/ERROR messages to reach root logger's handler + + # Replace escaped newlines with actual newlines in initial_prompt + if args.initial_prompt: + args.initial_prompt = args.initial_prompt.replace("\\n", "\n") + + if args.initial_prompt_realtime: + args.initial_prompt_realtime = args.initial_prompt_realtime.replace("\\n", "\n") + + return args + +def _recorder_thread(loop): + global recorder, stop_recorder + print(f"{bcolors.OKGREEN}Initializing RealtimeSTT server with parameters:{bcolors.ENDC}") + for key, value in recorder_config.items(): + print(f" {bcolors.OKBLUE}{key}{bcolors.ENDC}: {value}") + recorder = AudioToTextRecorder(**recorder_config) + print(f"{bcolors.OKGREEN}{bcolors.BOLD}RealtimeSTT initialized{bcolors.ENDC}") + recorder_ready.set() + + def process_text(full_sentence): + global prev_text + prev_text = "" + full_sentence = preprocess_text(full_sentence) + message = json.dumps({ + 'type': 'fullSentence', # <- Mensaje final y preciso + 'text': full_sentence + }) + asyncio.run_coroutine_threadsafe(audio_queue.put(message), loop) + + timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3] + if extended_logging: + print(f" [{timestamp}] Full text: {bcolors.BOLD}Sentence:{bcolors.ENDC} {bcolors.OKGREEN}{full_sentence}{bcolors.ENDC}\n", flush=True, end="") + else: + print(f"\r[{timestamp}] {bcolors.BOLD}Sentence:{bcolors.ENDC} {bcolors.OKGREEN}{full_sentence}{bcolors.ENDC}\n") + try: + while not stop_recorder: + recorder.text(process_text) # <- Esto llama al modelo grande al terminar la frase + except KeyboardInterrupt: + print(f"{bcolors.WARNING}Exiting application due to keyboard interrupt{bcolors.ENDC}") + +def decode_and_resample( + audio_data, + original_sample_rate, + target_sample_rate): + + # Decode 16-bit PCM data to numpy array + if original_sample_rate == target_sample_rate: + return audio_data + + audio_np = np.frombuffer(audio_data, dtype=np.int16) + + # Calculate the number of samples after resampling + num_original_samples = len(audio_np) + num_target_samples = int(num_original_samples * target_sample_rate / + original_sample_rate) + + # Resample the audio + resampled_audio = resample(audio_np, num_target_samples) + + return resampled_audio.astype(np.int16).tobytes() + +async def control_handler(websocket): + debug_print(f"New control connection from {websocket.remote_address}") + print(f"{bcolors.OKGREEN}Control client connected{bcolors.ENDC}") + global recorder + control_connections.add(websocket) + try: + async for message in websocket: + debug_print(f"Received control message: {message[:200]}...") + if not recorder_ready.is_set(): + print(f"{bcolors.WARNING}Recorder not ready{bcolors.ENDC}") + continue + if isinstance(message, str): + # Handle text message (command) + try: + command_data = json.loads(message) + command = command_data.get("command") + if command == "set_parameter": + parameter = command_data.get("parameter") + value = command_data.get("value") + if parameter in allowed_parameters and hasattr(recorder, parameter): + setattr(recorder, parameter, value) + # Format the value for output + if isinstance(value, float): + value_formatted = f"{value:.2f}" + else: + value_formatted = value + timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3] + if extended_logging: + print(f" [{timestamp}] {bcolors.OKGREEN}Set recorder.{parameter} to: {bcolors.OKBLUE}{value_formatted}{bcolors.ENDC}") + # Optionally send a response back to the client + await websocket.send(json.dumps({"status": "success", "message": f"Parameter {parameter} set to {value}"})) + else: + if not parameter in allowed_parameters: + print(f"{bcolors.WARNING}Parameter {parameter} is not allowed (set_parameter){bcolors.ENDC}") + await websocket.send(json.dumps({"status": "error", "message": f"Parameter {parameter} is not allowed (set_parameter)"})) + else: + print(f"{bcolors.WARNING}Parameter {parameter} does not exist (set_parameter){bcolors.ENDC}") + await websocket.send(json.dumps({"status": "error", "message": f"Parameter {parameter} does not exist (set_parameter)"})) + + elif command == "get_parameter": + parameter = command_data.get("parameter") + request_id = command_data.get("request_id") # Get the request_id from the command data + if parameter in allowed_parameters and hasattr(recorder, parameter): + value = getattr(recorder, parameter) + if isinstance(value, float): + value_formatted = f"{value:.2f}" + else: + value_formatted = f"{value}" + + value_truncated = value_formatted[:39] + "…" if len(value_formatted) > 40 else value_formatted + + timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3] + if extended_logging: + print(f" [{timestamp}] {bcolors.OKGREEN}Get recorder.{parameter}: {bcolors.OKBLUE}{value_truncated}{bcolors.ENDC}") + response = {"status": "success", "parameter": parameter, "value": value} + if request_id is not None: + response["request_id"] = request_id + await websocket.send(json.dumps(response)) + else: + if not parameter in allowed_parameters: + print(f"{bcolors.WARNING}Parameter {parameter} is not allowed (get_parameter){bcolors.ENDC}") + await websocket.send(json.dumps({"status": "error", "message": f"Parameter {parameter} is not allowed (get_parameter)"})) + else: + print(f"{bcolors.WARNING}Parameter {parameter} does not exist (get_parameter){bcolors.ENDC}") + await websocket.send(json.dumps({"status": "error", "message": f"Parameter {parameter} does not exist (get_parameter)"})) + elif command == "call_method": + method_name = command_data.get("method") + if method_name in allowed_methods: + method = getattr(recorder, method_name, None) + if method and callable(method): + args = command_data.get("args", []) + kwargs = command_data.get("kwargs", {}) + method(*args, **kwargs) + timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3] + print(f" [{timestamp}] {bcolors.OKGREEN}Called method recorder.{bcolors.OKBLUE}{method_name}{bcolors.ENDC}") + await websocket.send(json.dumps({"status": "success", "message": f"Method {method_name} called"})) + else: + print(f"{bcolors.WARNING}Recorder does not have method {method_name}{bcolors.ENDC}") + await websocket.send(json.dumps({"status": "error", "message": f"Recorder does not have method {method_name}"})) + else: + print(f"{bcolors.WARNING}Method {method_name} is not allowed{bcolors.ENDC}") + await websocket.send(json.dumps({"status": "error", "message": f"Method {method_name} is not allowed"})) + else: + print(f"{bcolors.WARNING}Unknown command: {command}{bcolors.ENDC}") + await websocket.send(json.dumps({"status": "error", "message": f"Unknown command {command}"})) + except json.JSONDecodeError: + print(f"{bcolors.WARNING}Received invalid JSON command{bcolors.ENDC}") + await websocket.send(json.dumps({"status": "error", "message": "Invalid JSON command"})) + else: + print(f"{bcolors.WARNING}Received unknown message type on control connection{bcolors.ENDC}") + except websockets.exceptions.ConnectionClosed as e: + print(f"{bcolors.WARNING}Control client disconnected: {e}{bcolors.ENDC}") + finally: + control_connections.remove(websocket) + +async def data_handler(websocket): + global writechunks, wav_file + print(f"{bcolors.OKGREEN}Data client connected{bcolors.ENDC}") + data_connections.add(websocket) + try: + while True: + message = await websocket.recv() + if isinstance(message, bytes): + if extended_logging: + debug_print(f"Received audio chunk (size: {len(message)} bytes)") + elif log_incoming_chunks: + print(".", end='', flush=True) + # Handle binary message (audio data) + metadata_length = int.from_bytes(message[:4], byteorder='little') + metadata_json = message[4:4+metadata_length].decode('utf-8') + metadata = json.loads(metadata_json) + sample_rate = metadata['sampleRate'] + + if 'server_sent_to_stt' in metadata: + stt_received_ns = time.time_ns() + metadata["stt_received"] = stt_received_ns + metadata["stt_received_formatted"] = format_timestamp_ns(stt_received_ns) + print(f"Server received audio chunk of length {len(message)} bytes, metadata: {metadata}") + + if extended_logging: + debug_print(f"Processing audio chunk with sample rate {sample_rate}") + chunk = message[4+metadata_length:] + + if writechunks: + if not wav_file: + wav_file = wave.open(writechunks, 'wb') + wav_file.setnchannels(CHANNELS) + wav_file.setsampwidth(pyaudio.get_sample_size(FORMAT)) + wav_file.setframerate(sample_rate) + + wav_file.writeframes(chunk) + + if sample_rate != 16000: + resampled_chunk = decode_and_resample(chunk, sample_rate, 16000) + if extended_logging: + debug_print(f"Resampled chunk size: {len(resampled_chunk)} bytes") + recorder.feed_audio(resampled_chunk) + else: + recorder.feed_audio(chunk) + else: + print(f"{bcolors.WARNING}Received non-binary message on data connection{bcolors.ENDC}") + except websockets.exceptions.ConnectionClosed as e: + print(f"{bcolors.WARNING}Data client disconnected: {e}{bcolors.ENDC}") + finally: + data_connections.remove(websocket) + # recorder.clear_audio_queue() # Ensure audio queue is cleared if client disconnects + +async def broadcast_audio_messages(): + while True: + message = await audio_queue.get() + for conn in list(data_connections): + try: + timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3] + + if extended_logging: + print(f" [{timestamp}] Sending message: {bcolors.OKBLUE}{message}{bcolors.ENDC}\n", flush=True, end="") + await conn.send(message) + except websockets.exceptions.ConnectionClosed: + data_connections.remove(conn) + +# Helper function to create event loop bound closures for callbacks +def make_callback(loop, callback): + def inner_callback(*args, **kwargs): + callback(*args, **kwargs, loop=loop) + return inner_callback + +async def main_async(): + global stop_recorder, recorder_config, global_args + args = parse_arguments() + global_args = args + + # Get the event loop here and pass it to the recorder thread + loop = asyncio.get_event_loop() + + recorder_config = { + 'model': args.model, + 'download_root': args.root, + 'realtime_model_type': args.rt_model, + 'language': args.lang, + 'batch_size': args.batch, + 'init_realtime_after_seconds': args.init_realtime_after_seconds, + 'realtime_batch_size': args.realtime_batch_size, + 'initial_prompt_realtime': args.initial_prompt_realtime, + 'input_device_index': args.input_device, + 'silero_sensitivity': args.silero_sensitivity, + 'silero_use_onnx': args.silero_use_onnx, + 'webrtc_sensitivity': args.webrtc_sensitivity, + 'post_speech_silence_duration': args.unknown_sentence_detection_pause, + 'min_length_of_recording': args.min_length_of_recording, + 'min_gap_between_recordings': args.min_gap_between_recordings, + 'enable_realtime_transcription': args.enable_realtime_transcription, + 'realtime_processing_pause': args.realtime_processing_pause, + 'silero_deactivity_detection': args.silero_deactivity_detection, + 'early_transcription_on_silence': args.early_transcription_on_silence, + 'beam_size': args.beam_size, + 'beam_size_realtime': args.beam_size_realtime, + 'initial_prompt': args.initial_prompt, + 'wake_words': args.wake_words, + 'wake_words_sensitivity': args.wake_words_sensitivity, + 'wake_word_timeout': args.wake_word_timeout, + 'wake_word_activation_delay': args.wake_word_activation_delay, + 'wakeword_backend': args.wakeword_backend, + 'openwakeword_model_paths': args.openwakeword_model_paths, + 'openwakeword_inference_framework': args.openwakeword_inference_framework, + 'wake_word_buffer_duration': args.wake_word_buffer_duration, + 'use_main_model_for_realtime': args.use_main_model_for_realtime, + 'spinner': False, + 'use_microphone': False, + + 'on_realtime_transcription_update': make_callback(loop, text_detected), + 'on_recording_start': make_callback(loop, on_recording_start), + 'on_recording_stop': make_callback(loop, on_recording_stop), + 'on_vad_detect_start': make_callback(loop, on_vad_detect_start), + 'on_vad_detect_stop': make_callback(loop, on_vad_detect_stop), + 'on_wakeword_detected': make_callback(loop, on_wakeword_detected), + 'on_wakeword_detection_start': make_callback(loop, on_wakeword_detection_start), + 'on_wakeword_detection_end': make_callback(loop, on_wakeword_detection_end), + 'on_transcription_start': make_callback(loop, on_transcription_start), + 'on_turn_detection_start': make_callback(loop, on_turn_detection_start), + 'on_turn_detection_stop': make_callback(loop, on_turn_detection_stop), + + # 'on_recorded_chunk': make_callback(loop, on_recorded_chunk), + 'no_log_file': True, # Disable logging to file + 'use_extended_logging': args.use_extended_logging, + 'level': loglevel, + 'compute_type': args.compute_type, + 'gpu_device_index': args.gpu_device_index, + 'device': args.device, + 'handle_buffer_overflow': args.handle_buffer_overflow, + 'suppress_tokens': args.suppress_tokens, + 'allowed_latency_limit': args.allowed_latency_limit, + 'faster_whisper_vad_filter': args.faster_whisper_vad_filter, + } + + try: + # Attempt to start control and data servers + control_server = await websockets.serve(control_handler, "localhost", args.control) + data_server = await websockets.serve(data_handler, "localhost", args.data) + print(f"{bcolors.OKGREEN}Control server started on {bcolors.OKBLUE}ws://localhost:{args.control}{bcolors.ENDC}") + print(f"{bcolors.OKGREEN}Data server started on {bcolors.OKBLUE}ws://localhost:{args.data}{bcolors.ENDC}") + + # Start the broadcast and recorder threads + broadcast_task = asyncio.create_task(broadcast_audio_messages()) + + recorder_thread = threading.Thread(target=_recorder_thread, args=(loop,)) + recorder_thread.start() + recorder_ready.wait() + + print(f"{bcolors.OKGREEN}Server started. Press Ctrl+C to stop the server.{bcolors.ENDC}") + + # Run server tasks + await asyncio.gather(control_server.wait_closed(), data_server.wait_closed(), broadcast_task) + except OSError as e: + print(f"{bcolors.FAIL}Error: Could not start server on specified ports. It’s possible another instance of the server is already running, or the ports are being used by another application.{bcolors.ENDC}") + except KeyboardInterrupt: + print(f"{bcolors.WARNING}Server interrupted by user, shutting down...{bcolors.ENDC}") + finally: + # Shutdown procedures for recorder and server threads + await shutdown_procedure() + print(f"{bcolors.OKGREEN}Server shutdown complete.{bcolors.ENDC}") + +async def shutdown_procedure(): + global stop_recorder, recorder_thread + if recorder: + stop_recorder = True + recorder.abort() + # recorder.stop() + recorder.shutdown() + print(f"{bcolors.OKGREEN}Recorder shut down{bcolors.ENDC}") + + if recorder_thread: + recorder_thread.join() + print(f"{bcolors.OKGREEN}Recorder thread finished{bcolors.ENDC}") + + tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()] + for task in tasks: + task.cancel() + await asyncio.gather(*tasks, return_exceptions=True) + + print(f"{bcolors.OKGREEN}All tasks cancelled, closing event loop now.{bcolors.ENDC}") + +def main(): + try: + asyncio.run(main_async()) + except KeyboardInterrupt: + # Capture any final KeyboardInterrupt to prevent it from showing up in logs + print(f"{bcolors.WARNING}Server interrupted by user.{bcolors.ENDC}") + exit(0) + +if __name__ == '__main__': + main() diff --git a/stt_recorder/README.md b/stt_recorder/README.md new file mode 100644 index 00000000..f9edd992 --- /dev/null +++ b/stt_recorder/README.md @@ -0,0 +1,18 @@ +# SttRecorder + +To start your Phoenix server: + + * Run `mix setup` to install and setup dependencies + * Start Phoenix endpoint with `mix phx.server` or inside IEx with `iex -S mix phx.server` + +Now you can visit [`localhost:4000`](http://localhost:4000) from your browser. + +Ready to run in production? Please [check our deployment guides](https://hexdocs.pm/phoenix/deployment.html). + +## Learn more + + * Official website: https://www.phoenixframework.org/ + * Guides: https://hexdocs.pm/phoenix/overview.html + * Docs: https://hexdocs.pm/phoenix + * Forum: https://elixirforum.com/c/phoenix-forum + * Source: https://github.com/phoenixframework/phoenix diff --git a/stt_recorder/assets/css/app.css b/stt_recorder/assets/css/app.css new file mode 100644 index 00000000..d67ce14d --- /dev/null +++ b/stt_recorder/assets/css/app.css @@ -0,0 +1,86 @@ +@import "tailwindcss/base"; +@import "tailwindcss/components"; +@import "tailwindcss/utilities"; + +/* This file is for your main application CSS */ + body { + background-color: #f4f4f9; + color: #333; + font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; + display: flex; + align-items: center; + justify-content: center; + height: 100vh; + margin: 0; + } + #container { + display: flex; + flex-direction: column; + align-items: center; + width: 100%; + max-width: 700px; + padding: 20px; + box-sizing: border-box; + gap: 20px; /* Add more vertical space between items */ + height: 90%; /* Fixed height to prevent layout shift */ + } + #status { + color: #0056b3; + font-size: 20px; + text-align: center; + } + #transcriptionContainer { + height: 90px; /* Fixed height for approximately 3 lines of text */ + overflow-y: auto; + width: 100%; + padding: 10px; + box-sizing: border-box; + background-color: #f9f9f9; + border: 1px solid #ddd; + border-radius: 5px; + } + #transcription { + font-size: 18px; + line-height: 1.6; + color: #333; + word-wrap: break-word; + } + #fullTextContainer { + height: 150px; /* Fixed height to prevent layout shift */ + overflow-y: auto; + width: 100%; + padding: 10px; + box-sizing: border-box; + background-color: #f9f9f9; + border: 1px solid #ddd; + border-radius: 5px; + } + #fullText { + color: #4CAF50; + font-size: 18px; + font-weight: 600; + word-wrap: break-word; + } + .last-word { + color: #007bff; + font-weight: 600; + } + button { + padding: 12px 24px; + font-size: 16px; + cursor: pointer; + border: none; + border-radius: 5px; + margin: 5px; + transition: background-color 0.3s ease; + color: #fff; + background-color: #0056b3; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); + } + button:hover { + background-color: #007bff; + } + button:disabled { + background-color: #cccccc; + cursor: not-allowed; + } \ No newline at end of file diff --git a/stt_recorder/assets/js/app.js b/stt_recorder/assets/js/app.js new file mode 100644 index 00000000..6f6037fe --- /dev/null +++ b/stt_recorder/assets/js/app.js @@ -0,0 +1,55 @@ +// If you want to use Phoenix channels, run `mix help phx.gen.channel` +// to get started and then uncomment the line below. +// import "./user_socket.js" + +// You can include dependencies in two ways. +// +// The simplest option is to put them in assets/vendor and +// import them using relative paths: +// +// import "../vendor/some-package.js" +// +// Alternatively, you can `npm install some-package --prefix assets` and import +// them using a path starting with the package name: +// +// import "some-package" +// + +// Include phoenix_html to handle method=PUT/DELETE in forms and buttons. +import "phoenix_html" +// Establish Phoenix Socket and LiveView configuration. +import {Socket} from "phoenix" +import {LiveSocket} from "phoenix_live_view" +import topbar from "../vendor/topbar" +import SttRecorder from "./stt_recorder.js"; + +let liveSocket = new LiveSocket("/live", Socket, { + hooks: { SttRecorder }, + params: { _csrf_token: csrfToken } +}); + +liveSocket.connect(); + +window.liveSocket = liveSocket; + + +// let liveSocket = new LiveSocket("/live", Socket, { +// longPollFallbackMs: 2500, +// params: {_csrf_token: csrfToken}, +// hooks: Hooks +// }) + +// Show progress bar on live navigation and form submits +topbar.config({barColors: {0: "#29d"}, shadowColor: "rgba(0, 0, 0, .3)"}) +window.addEventListener("phx:page-loading-start", _info => topbar.show(300)) +window.addEventListener("phx:page-loading-stop", _info => topbar.hide()) + +// connect if there are any LiveViews on the page +liveSocket.connect() + +// expose liveSocket on window for web console debug logs and latency simulation: +// >> liveSocket.enableDebug() +// >> liveSocket.enableLatencySim(1000) // enabled for duration of browser session +// >> liveSocket.disableLatencySim() +window.liveSocket = liveSocket + diff --git a/stt_recorder/assets/js/stt_recorder.js b/stt_recorder/assets/js/stt_recorder.js new file mode 100644 index 00000000..4f3a3ba5 --- /dev/null +++ b/stt_recorder/assets/js/stt_recorder.js @@ -0,0 +1,119 @@ +// assets/js/stt_recorder.js + +let SttRecorder = { + mounted() { + const statusDiv = document.getElementById("status"); + const transcriptionDiv = document.getElementById("transcription"); + const fullTextDiv = document.getElementById("fullText"); + const startButton = document.getElementById("startButton"); + const stopButton = document.getElementById("stopButton"); + + const controlURL = "ws://127.0.0.1:8011"; + const dataURL = "ws://127.0.0.1:8012"; + let dataSocket; + let audioContext; + let mediaStream; + let mediaProcessor; + + // define startRecording and stopRecording here, or attach to this + window.startRecording = async function () { + try { + startButton.disabled = true; + stopButton.disabled = false; + statusDiv.textContent = "Recording..."; + transcriptionDiv.textContent = ""; + fullTextDiv.textContent = ""; + + audioContext = new AudioContext(); + mediaStream = await navigator.mediaDevices.getUserMedia({ audio: true }); + const input = audioContext.createMediaStreamSource(mediaStream); + + mediaProcessor = audioContext.createScriptProcessor(1024, 1, 1); + mediaProcessor.onaudioprocess = (event) => { + const audioData = event.inputBuffer.getChannelData(0); + sendAudioChunk(audioData, audioContext.sampleRate); + }; + + input.connect(mediaProcessor); + mediaProcessor.connect(audioContext.destination); + + connectToDataSocket(); + } catch (error) { + console.error("Error accessing microphone:", error); + statusDiv.textContent = "Error accessing microphone."; + stopRecording(); + } + }; + + window.stopRecording = function () { + if (mediaProcessor && audioContext) { + mediaProcessor.disconnect(); + audioContext.close(); + } + + if (mediaStream) { + mediaStream.getTracks().forEach((track) => track.stop()); + } + + if (dataSocket) { + dataSocket.close(); + } + + startButton.disabled = false; + stopButton.disabled = true; + statusDiv.textContent = "Stopped recording."; + }; + + function connectToDataSocket() { + dataSocket = new WebSocket(dataURL); + + dataSocket.onopen = () => { + statusDiv.textContent = "Connected to STT server."; + }; + + dataSocket.onmessage = (event) => { + try { + const message = JSON.parse(event.data); + if (message.type === "realtime") { + let words = message.text.split(" "); + let lastWord = words.pop(); + transcriptionDiv.innerHTML = `${words.join(" ")} ${lastWord}`; + } else if (message.type === "fullSentence") { + fullTextDiv.innerHTML += message.text + " "; + transcriptionDiv.innerHTML = message.text; + } + } catch (e) { + console.error("Error parsing message:", e); + } + }; + + dataSocket.onerror = (error) => { + console.error("WebSocket error:", error); + statusDiv.textContent = "Error connecting to the STT server."; + }; + } + + function sendAudioChunk(audioData, sampleRate) { + if (dataSocket && dataSocket.readyState === WebSocket.OPEN) { + const float32Array = new Float32Array(audioData); + const pcm16Data = new Int16Array(float32Array.length); + for (let i = 0; i < float32Array.length; i++) { + pcm16Data[i] = Math.max(-1, Math.min(1, float32Array[i])) * 0x7fff; + } + + const metadata = JSON.stringify({ sampleRate }); + const metadataLength = new Uint32Array([metadata.length]); + const metadataBuffer = new TextEncoder().encode(metadata); + + const message = new Uint8Array(metadataLength.byteLength + metadataBuffer.byteLength + pcm16Data.byteLength); + message.set(new Uint8Array(metadataLength.buffer), 0); + message.set(metadataBuffer, metadataLength.byteLength); + message.set(new Uint8Array(pcm16Data.buffer), metadataLength.byteLength + metadataBuffer.byteLength); + + dataSocket.send(message); + } + } + }, +}; + +export default SttRecorder; diff --git a/stt_recorder/assets/tailwind.config.js b/stt_recorder/assets/tailwind.config.js new file mode 100644 index 00000000..b054b85a --- /dev/null +++ b/stt_recorder/assets/tailwind.config.js @@ -0,0 +1,74 @@ +// See the Tailwind configuration guide for advanced usage +// https://tailwindcss.com/docs/configuration + +const plugin = require("tailwindcss/plugin") +const fs = require("fs") +const path = require("path") + +module.exports = { + content: [ + "./js/**/*.js", + "../lib/stt_recorder_web.ex", + "../lib/stt_recorder_web/**/*.*ex" + ], + theme: { + extend: { + colors: { + brand: "#FD4F00", + } + }, + }, + plugins: [ + require("@tailwindcss/forms"), + // Allows prefixing tailwind classes with LiveView classes to add rules + // only when LiveView classes are applied, for example: + // + //
+ // + plugin(({addVariant}) => addVariant("phx-click-loading", [".phx-click-loading&", ".phx-click-loading &"])), + plugin(({addVariant}) => addVariant("phx-submit-loading", [".phx-submit-loading&", ".phx-submit-loading &"])), + plugin(({addVariant}) => addVariant("phx-change-loading", [".phx-change-loading&", ".phx-change-loading &"])), + + // Embeds Heroicons (https://heroicons.com) into your app.css bundle + // See your `CoreComponents.icon/1` for more information. + // + plugin(function({matchComponents, theme}) { + let iconsDir = path.join(__dirname, "../deps/heroicons/optimized") + let values = {} + let icons = [ + ["", "/24/outline"], + ["-solid", "/24/solid"], + ["-mini", "/20/solid"], + ["-micro", "/16/solid"] + ] + icons.forEach(([suffix, dir]) => { + fs.readdirSync(path.join(iconsDir, dir)).forEach(file => { + let name = path.basename(file, ".svg") + suffix + values[name] = {name, fullPath: path.join(iconsDir, dir, file)} + }) + }) + matchComponents({ + "hero": ({name, fullPath}) => { + let content = fs.readFileSync(fullPath).toString().replace(/\r?\n|\r/g, "") + let size = theme("spacing.6") + if (name.endsWith("-mini")) { + size = theme("spacing.5") + } else if (name.endsWith("-micro")) { + size = theme("spacing.4") + } + return { + [`--hero-${name}`]: `url('data:image/svg+xml;utf8,${content}')`, + "-webkit-mask": `var(--hero-${name})`, + "mask": `var(--hero-${name})`, + "mask-repeat": "no-repeat", + "background-color": "currentColor", + "vertical-align": "middle", + "display": "inline-block", + "width": size, + "height": size + } + } + }, {values}) + }) + ] +} diff --git a/stt_recorder/assets/vendor/topbar.js b/stt_recorder/assets/vendor/topbar.js new file mode 100644 index 00000000..41957274 --- /dev/null +++ b/stt_recorder/assets/vendor/topbar.js @@ -0,0 +1,165 @@ +/** + * @license MIT + * topbar 2.0.0, 2023-02-04 + * https://buunguyen.github.io/topbar + * Copyright (c) 2021 Buu Nguyen + */ +(function (window, document) { + "use strict"; + + // https://gist.github.com/paulirish/1579671 + (function () { + var lastTime = 0; + var vendors = ["ms", "moz", "webkit", "o"]; + for (var x = 0; x < vendors.length && !window.requestAnimationFrame; ++x) { + window.requestAnimationFrame = + window[vendors[x] + "RequestAnimationFrame"]; + window.cancelAnimationFrame = + window[vendors[x] + "CancelAnimationFrame"] || + window[vendors[x] + "CancelRequestAnimationFrame"]; + } + if (!window.requestAnimationFrame) + window.requestAnimationFrame = function (callback, element) { + var currTime = new Date().getTime(); + var timeToCall = Math.max(0, 16 - (currTime - lastTime)); + var id = window.setTimeout(function () { + callback(currTime + timeToCall); + }, timeToCall); + lastTime = currTime + timeToCall; + return id; + }; + if (!window.cancelAnimationFrame) + window.cancelAnimationFrame = function (id) { + clearTimeout(id); + }; + })(); + + var canvas, + currentProgress, + showing, + progressTimerId = null, + fadeTimerId = null, + delayTimerId = null, + addEvent = function (elem, type, handler) { + if (elem.addEventListener) elem.addEventListener(type, handler, false); + else if (elem.attachEvent) elem.attachEvent("on" + type, handler); + else elem["on" + type] = handler; + }, + options = { + autoRun: true, + barThickness: 3, + barColors: { + 0: "rgba(26, 188, 156, .9)", + ".25": "rgba(52, 152, 219, .9)", + ".50": "rgba(241, 196, 15, .9)", + ".75": "rgba(230, 126, 34, .9)", + "1.0": "rgba(211, 84, 0, .9)", + }, + shadowBlur: 10, + shadowColor: "rgba(0, 0, 0, .6)", + className: null, + }, + repaint = function () { + canvas.width = window.innerWidth; + canvas.height = options.barThickness * 5; // need space for shadow + + var ctx = canvas.getContext("2d"); + ctx.shadowBlur = options.shadowBlur; + ctx.shadowColor = options.shadowColor; + + var lineGradient = ctx.createLinearGradient(0, 0, canvas.width, 0); + for (var stop in options.barColors) + lineGradient.addColorStop(stop, options.barColors[stop]); + ctx.lineWidth = options.barThickness; + ctx.beginPath(); + ctx.moveTo(0, options.barThickness / 2); + ctx.lineTo( + Math.ceil(currentProgress * canvas.width), + options.barThickness / 2 + ); + ctx.strokeStyle = lineGradient; + ctx.stroke(); + }, + createCanvas = function () { + canvas = document.createElement("canvas"); + var style = canvas.style; + style.position = "fixed"; + style.top = style.left = style.right = style.margin = style.padding = 0; + style.zIndex = 100001; + style.display = "none"; + if (options.className) canvas.classList.add(options.className); + document.body.appendChild(canvas); + addEvent(window, "resize", repaint); + }, + topbar = { + config: function (opts) { + for (var key in opts) + if (options.hasOwnProperty(key)) options[key] = opts[key]; + }, + show: function (delay) { + if (showing) return; + if (delay) { + if (delayTimerId) return; + delayTimerId = setTimeout(() => topbar.show(), delay); + } else { + showing = true; + if (fadeTimerId !== null) window.cancelAnimationFrame(fadeTimerId); + if (!canvas) createCanvas(); + canvas.style.opacity = 1; + canvas.style.display = "block"; + topbar.progress(0); + if (options.autoRun) { + (function loop() { + progressTimerId = window.requestAnimationFrame(loop); + topbar.progress( + "+" + 0.05 * Math.pow(1 - Math.sqrt(currentProgress), 2) + ); + })(); + } + } + }, + progress: function (to) { + if (typeof to === "undefined") return currentProgress; + if (typeof to === "string") { + to = + (to.indexOf("+") >= 0 || to.indexOf("-") >= 0 + ? currentProgress + : 0) + parseFloat(to); + } + currentProgress = to > 1 ? 1 : to; + repaint(); + return currentProgress; + }, + hide: function () { + clearTimeout(delayTimerId); + delayTimerId = null; + if (!showing) return; + showing = false; + if (progressTimerId != null) { + window.cancelAnimationFrame(progressTimerId); + progressTimerId = null; + } + (function loop() { + if (topbar.progress("+.1") >= 1) { + canvas.style.opacity -= 0.05; + if (canvas.style.opacity <= 0.05) { + canvas.style.display = "none"; + fadeTimerId = null; + return; + } + } + fadeTimerId = window.requestAnimationFrame(loop); + })(); + }, + }; + + if (typeof module === "object" && typeof module.exports === "object") { + module.exports = topbar; + } else if (typeof define === "function" && define.amd) { + define(function () { + return topbar; + }); + } else { + this.topbar = topbar; + } +}.call(this, window, document)); diff --git a/stt_recorder/config/config.exs b/stt_recorder/config/config.exs new file mode 100644 index 00000000..9f4bc9b6 --- /dev/null +++ b/stt_recorder/config/config.exs @@ -0,0 +1,65 @@ +# This file is responsible for configuring your application +# and its dependencies with the aid of the Config module. +# +# This configuration file is loaded before any dependency and +# is restricted to this project. + +# General application configuration +import Config + +config :stt_recorder, + generators: [timestamp_type: :utc_datetime] + +# Configures the endpoint +config :stt_recorder, SttRecorderWeb.Endpoint, + url: [host: "localhost"], + adapter: Bandit.PhoenixAdapter, + render_errors: [ + formats: [html: SttRecorderWeb.ErrorHTML, json: SttRecorderWeb.ErrorJSON], + layout: false + ], + pubsub_server: SttRecorder.PubSub, + live_view: [signing_salt: "4qUMg2FH"] + +# Configures the mailer +# +# By default it uses the "Local" adapter which stores the emails +# locally. You can see the emails in your browser, at "/dev/mailbox". +# +# For production it's recommended to configure a different adapter +# at the `config/runtime.exs`. +config :stt_recorder, SttRecorder.Mailer, adapter: Swoosh.Adapters.Local + +# Configure esbuild (the version is required) +config :esbuild, + version: "0.17.11", + stt_recorder: [ + args: + ~w(js/app.js --bundle --target=es2017 --outdir=../priv/static/assets --external:/fonts/* --external:/images/*), + cd: Path.expand("../assets", __DIR__), + env: %{"NODE_PATH" => Path.expand("../deps", __DIR__)} + ] + +# Configure tailwind (the version is required) +config :tailwind, + version: "3.4.3", + stt_recorder: [ + args: ~w( + --config=tailwind.config.js + --input=css/app.css + --output=../priv/static/assets/app.css + ), + cd: Path.expand("../assets", __DIR__) + ] + +# Configures Elixir's Logger +config :logger, :console, + format: "$time $metadata[$level] $message\n", + metadata: [:request_id] + +# Use Jason for JSON parsing in Phoenix +config :phoenix, :json_library, Jason + +# Import environment specific config. This must remain at the bottom +# of this file so it overrides the configuration defined above. +import_config "#{config_env()}.exs" diff --git a/stt_recorder/config/dev.exs b/stt_recorder/config/dev.exs new file mode 100644 index 00000000..efbe2f89 --- /dev/null +++ b/stt_recorder/config/dev.exs @@ -0,0 +1,75 @@ +import Config + +# For development, we disable any cache and enable +# debugging and code reloading. +# +# The watchers configuration can be used to run external +# watchers to your application. For example, we can use it +# to bundle .js and .css sources. +config :stt_recorder, SttRecorderWeb.Endpoint, + # Binding to loopback ipv4 address prevents access from other machines. + # Change to `ip: {0, 0, 0, 0}` to allow access from other machines. + http: [ip: {127, 0, 0, 1}, port: 4000], + check_origin: false, + code_reloader: true, + debug_errors: true, + secret_key_base: "Cndrc+8xBjEgDUgDJfPsBZv0RcxwRezDvNUxV06+etB09JXsY7JgHSeYdtKCtgXR", + watchers: [ + esbuild: {Esbuild, :install_and_run, [:stt_recorder, ~w(--sourcemap=inline --watch)]}, + tailwind: {Tailwind, :install_and_run, [:stt_recorder, ~w(--watch)]} + ] + +# ## SSL Support +# +# In order to use HTTPS in development, a self-signed +# certificate can be generated by running the following +# Mix task: +# +# mix phx.gen.cert +# +# Run `mix help phx.gen.cert` for more information. +# +# The `http:` config above can be replaced with: +# +# https: [ +# port: 4001, +# cipher_suite: :strong, +# keyfile: "priv/cert/selfsigned_key.pem", +# certfile: "priv/cert/selfsigned.pem" +# ], +# +# If desired, both `http:` and `https:` keys can be +# configured to run both http and https servers on +# different ports. + +# Watch static and templates for browser reloading. +config :stt_recorder, SttRecorderWeb.Endpoint, + live_reload: [ + patterns: [ + ~r"priv/static/(?!uploads/).*(js|css|png|jpeg|jpg|gif|svg)$", + ~r"priv/gettext/.*(po)$", + ~r"lib/stt_recorder_web/(controllers|live|components)/.*(ex|heex)$" + ] + ] + +# Enable dev routes for dashboard and mailbox +config :stt_recorder, dev_routes: true + +# Do not include metadata nor timestamps in development logs +config :logger, :console, format: "[$level] $message\n" + +# Set a higher stacktrace during development. Avoid configuring such +# in production as building large stacktraces may be expensive. +config :phoenix, :stacktrace_depth, 20 + +# Initialize plugs at runtime for faster development compilation +config :phoenix, :plug_init_mode, :runtime + +config :phoenix_live_view, + # Include HEEx debug annotations as HTML comments in rendered markup + debug_heex_annotations: true, + # Enable helpful, but potentially expensive runtime checks + enable_expensive_runtime_checks: true + +# Disable swoosh api client as it is only required for production adapters. +config :swoosh, :api_client, false diff --git a/stt_recorder/config/prod.exs b/stt_recorder/config/prod.exs new file mode 100644 index 00000000..3def4b03 --- /dev/null +++ b/stt_recorder/config/prod.exs @@ -0,0 +1,21 @@ +import Config + +# Note we also include the path to a cache manifest +# containing the digested version of static files. This +# manifest is generated by the `mix assets.deploy` task, +# which you should run after static files are built and +# before starting your production server. +config :stt_recorder, SttRecorderWeb.Endpoint, + cache_static_manifest: "priv/static/cache_manifest.json" + +# Configures Swoosh API Client +config :swoosh, api_client: Swoosh.ApiClient.Finch, finch_name: SttRecorder.Finch + +# Disable Swoosh Local Memory Storage +config :swoosh, local: false + +# Do not print debug messages in production +config :logger, level: :info + +# Runtime production configuration, including reading +# of environment variables, is done on config/runtime.exs. diff --git a/stt_recorder/config/runtime.exs b/stt_recorder/config/runtime.exs new file mode 100644 index 00000000..7d4f5f22 --- /dev/null +++ b/stt_recorder/config/runtime.exs @@ -0,0 +1,24 @@ +import Config + +if System.get_env("PHX_SERVER") do + config :stt_recorder, SttRecorderWeb.Endpoint, server: true +end + +if config_env() == :prod do + config :stt_recorder, SttRecorderWeb.Endpoint, + check_origin: false + + secret_key_base = System.get_env("SECRET_KEY_BASE") || "VGUXen8QezXuamrCHG6pTLtjJQVompq/BQK8ihF6jP1PG77G1Y8Ho6aFHKZQG07Z" + host = System.get_env("PHX_HOST") || "localhost" + port = String.to_integer(System.get_env("PORT") || "4000") + + config :stt_recorder, :dns_cluster_query, System.get_env("DNS_CLUSTER_QUERY") + + config :stt_recorder, SttRecorderWeb.Endpoint, + url: [host: host, port: port, scheme: "http"], + http: [ + ip: {0, 0, 0, 0}, + port: port + ], + secret_key_base: secret_key_base +end diff --git a/stt_recorder/config/test.exs b/stt_recorder/config/test.exs new file mode 100644 index 00000000..b6088bcf --- /dev/null +++ b/stt_recorder/config/test.exs @@ -0,0 +1,24 @@ +import Config + +# We don't run a server during test. If one is required, +# you can enable the server option below. +config :stt_recorder, SttRecorderWeb.Endpoint, + http: [ip: {127, 0, 0, 1}, port: 4002], + secret_key_base: "STk/67ZfxBsaykDyR6DCl1ZRZY2V/7DLR4gzKWi+mmmZNa1JkHLAlasIUV/SWO8B", + server: false + +# In test we don't send emails +config :stt_recorder, SttRecorder.Mailer, adapter: Swoosh.Adapters.Test + +# Disable swoosh api client as it is only required for production adapters +config :swoosh, :api_client, false + +# Print only warnings and errors during test +config :logger, level: :warning + +# Initialize plugs at runtime for faster test compilation +config :phoenix, :plug_init_mode, :runtime + +# Enable helpful, but potentially expensive runtime checks +config :phoenix_live_view, + enable_expensive_runtime_checks: true diff --git a/stt_recorder/lib/stt_recorder.ex b/stt_recorder/lib/stt_recorder.ex new file mode 100644 index 00000000..e73b4d03 --- /dev/null +++ b/stt_recorder/lib/stt_recorder.ex @@ -0,0 +1,9 @@ +defmodule SttRecorder do + @moduledoc """ + SttRecorder keeps the contexts that define your domain + and business logic. + + Contexts are also responsible for managing your data, regardless + if it comes from the database, an external API or others. + """ +end diff --git a/stt_recorder/lib/stt_recorder/application.ex b/stt_recorder/lib/stt_recorder/application.ex new file mode 100644 index 00000000..6ea50ed4 --- /dev/null +++ b/stt_recorder/lib/stt_recorder/application.ex @@ -0,0 +1,35 @@ +defmodule SttRecorder.Application do + # See https://hexdocs.pm/elixir/Application.html + # for more information on OTP Applications + @moduledoc false + + use Application + + @impl true + def start(_type, _args) do + children = [ + SttRecorderWeb.Telemetry, + {DNSCluster, query: Application.get_env(:stt_recorder, :dns_cluster_query) || :ignore}, + {Phoenix.PubSub, name: SttRecorder.PubSub}, + # Start the Finch HTTP client for sending emails + {Finch, name: SttRecorder.Finch}, + # Start a worker by calling: SttRecorder.Worker.start_link(arg) + # {SttRecorder.Worker, arg}, + # Start to serve requests, typically the last entry + SttRecorderWeb.Endpoint + ] + + # See https://hexdocs.pm/elixir/Supervisor.html + # for other strategies and supported options + opts = [strategy: :one_for_one, name: SttRecorder.Supervisor] + Supervisor.start_link(children, opts) + end + + # Tell Phoenix to update the endpoint configuration + # whenever the application is updated. + @impl true + def config_change(changed, _new, removed) do + SttRecorderWeb.Endpoint.config_change(changed, removed) + :ok + end +end diff --git a/stt_recorder/lib/stt_recorder/mailer.ex b/stt_recorder/lib/stt_recorder/mailer.ex new file mode 100644 index 00000000..2307a2fd --- /dev/null +++ b/stt_recorder/lib/stt_recorder/mailer.ex @@ -0,0 +1,3 @@ +defmodule SttRecorder.Mailer do + use Swoosh.Mailer, otp_app: :stt_recorder +end diff --git a/stt_recorder/lib/stt_recorder_web.ex b/stt_recorder/lib/stt_recorder_web.ex new file mode 100644 index 00000000..473782b4 --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web.ex @@ -0,0 +1,116 @@ +defmodule SttRecorderWeb do + @moduledoc """ + The entrypoint for defining your web interface, such + as controllers, components, channels, and so on. + + This can be used in your application as: + + use SttRecorderWeb, :controller + use SttRecorderWeb, :html + + The definitions below will be executed for every controller, + component, etc, so keep them short and clean, focused + on imports, uses and aliases. + + Do NOT define functions inside the quoted expressions + below. Instead, define additional modules and import + those modules here. + """ + + def static_paths, do: ~w(assets fonts images favicon.ico robots.txt) + + def router do + quote do + use Phoenix.Router, helpers: false + + # Import common connection and controller functions to use in pipelines + import Plug.Conn + import Phoenix.Controller + import Phoenix.LiveView.Router + end + end + + def channel do + quote do + use Phoenix.Channel + end + end + + def controller do + quote do + use Phoenix.Controller, + formats: [:html, :json], + layouts: [html: SttRecorderWeb.Layouts] + + use Gettext, backend: SttRecorderWeb.Gettext + + import Plug.Conn + + unquote(verified_routes()) + end + end + + def live_view do + quote do + use Phoenix.LiveView, + layout: {SttRecorderWeb.Layouts, :app} + + unquote(html_helpers()) + end + end + + def live_component do + quote do + use Phoenix.LiveComponent + + unquote(html_helpers()) + end + end + + def html do + quote do + use Phoenix.Component + + # Import convenience functions from controllers + import Phoenix.Controller, + only: [get_csrf_token: 0, view_module: 1, view_template: 1] + + # Include general helpers for rendering HTML + unquote(html_helpers()) + end + end + + defp html_helpers do + quote do + # Translation + use Gettext, backend: SttRecorderWeb.Gettext + + # HTML escaping functionality + import Phoenix.HTML + # Core UI components + import SttRecorderWeb.CoreComponents + + # Shortcut for generating JS commands + alias Phoenix.LiveView.JS + + # Routes generation with the ~p sigil + unquote(verified_routes()) + end + end + + def verified_routes do + quote do + use Phoenix.VerifiedRoutes, + endpoint: SttRecorderWeb.Endpoint, + router: SttRecorderWeb.Router, + statics: SttRecorderWeb.static_paths() + end + end + + @doc """ + When used, dispatch to the appropriate controller/live_view/etc. + """ + defmacro __using__(which) when is_atom(which) do + apply(__MODULE__, which, []) + end +end diff --git a/stt_recorder/lib/stt_recorder_web/components/core_components.ex b/stt_recorder/lib/stt_recorder_web/components/core_components.ex new file mode 100644 index 00000000..df968283 --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/components/core_components.ex @@ -0,0 +1,676 @@ +defmodule SttRecorderWeb.CoreComponents do + @moduledoc """ + Provides core UI components. + + At first glance, this module may seem daunting, but its goal is to provide + core building blocks for your application, such as modals, tables, and + forms. The components consist mostly of markup and are well-documented + with doc strings and declarative assigns. You may customize and style + them in any way you want, based on your application growth and needs. + + The default components use Tailwind CSS, a utility-first CSS framework. + See the [Tailwind CSS documentation](https://tailwindcss.com) to learn + how to customize them or feel free to swap in another framework altogether. + + Icons are provided by [heroicons](https://heroicons.com). See `icon/1` for usage. + """ + use Phoenix.Component + use Gettext, backend: SttRecorderWeb.Gettext + + alias Phoenix.LiveView.JS + + @doc """ + Renders a modal. + + ## Examples + + <.modal id="confirm-modal"> + This is a modal. + + + JS commands may be passed to the `:on_cancel` to configure + the closing/cancel event, for example: + + <.modal id="confirm" on_cancel={JS.navigate(~p"/posts")}> + This is another modal. + + + """ + attr :id, :string, required: true + attr :show, :boolean, default: false + attr :on_cancel, JS, default: %JS{} + slot :inner_block, required: true + + def modal(assigns) do + ~H""" + + """ + end + + def input(%{type: "select"} = assigns) do + ~H""" +
+ <.label for={@id}>{@label} + + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + def input(%{type: "textarea"} = assigns) do + ~H""" +
+ <.label for={@id}>{@label} + + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + # All other inputs text, datetime-local, url, password, etc. are handled here... + def input(assigns) do + ~H""" +
+ <.label for={@id}>{@label} + + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + @doc """ + Renders a label. + """ + attr :for, :string, default: nil + slot :inner_block, required: true + + def label(assigns) do + ~H""" + + """ + end + + @doc """ + Generates a generic error message. + """ + slot :inner_block, required: true + + def error(assigns) do + ~H""" +

+ <.icon name="hero-exclamation-circle-mini" class="mt-0.5 h-5 w-5 flex-none" /> + {render_slot(@inner_block)} +

+ """ + end + + @doc """ + Renders a header with title. + """ + attr :class, :string, default: nil + + slot :inner_block, required: true + slot :subtitle + slot :actions + + def header(assigns) do + ~H""" +
+
+

+ {render_slot(@inner_block)} +

+

+ {render_slot(@subtitle)} +

+
+
{render_slot(@actions)}
+
+ """ + end + + @doc ~S""" + Renders a table with generic styling. + + ## Examples + + <.table id="users" rows={@users}> + <:col :let={user} label="id">{user.id} + <:col :let={user} label="username">{user.username} + + """ + attr :id, :string, required: true + attr :rows, :list, required: true + attr :row_id, :any, default: nil, doc: "the function for generating the row id" + attr :row_click, :any, default: nil, doc: "the function for handling phx-click on each row" + + attr :row_item, :any, + default: &Function.identity/1, + doc: "the function for mapping each row before calling the :col and :action slots" + + slot :col, required: true do + attr :label, :string + end + + slot :action, doc: "the slot for showing user actions in the last table column" + + def table(assigns) do + assigns = + with %{rows: %Phoenix.LiveView.LiveStream{}} <- assigns do + assign(assigns, row_id: assigns.row_id || fn {id, _item} -> id end) + end + + ~H""" +
+ + + + + + + + + + + + + +
{col[:label]} + {gettext("Actions")} +
+
+ + + {render_slot(col, @row_item.(row))} + +
+
+
+ + + {render_slot(action, @row_item.(row))} + +
+
+
+ """ + end + + @doc """ + Renders a data list. + + ## Examples + + <.list> + <:item title="Title">{@post.title} + <:item title="Views">{@post.views} + + """ + slot :item, required: true do + attr :title, :string, required: true + end + + def list(assigns) do + ~H""" +
+
+
+
{item.title}
+
{render_slot(item)}
+
+
+
+ """ + end + + @doc """ + Renders a back navigation link. + + ## Examples + + <.back navigate={~p"/posts"}>Back to posts + """ + attr :navigate, :any, required: true + slot :inner_block, required: true + + def back(assigns) do + ~H""" +
+ <.link + navigate={@navigate} + class="text-sm font-semibold leading-6 text-zinc-900 hover:text-zinc-700" + > + <.icon name="hero-arrow-left-solid" class="h-3 w-3" /> + {render_slot(@inner_block)} + +
+ """ + end + + @doc """ + Renders a [Heroicon](https://heroicons.com). + + Heroicons come in three styles – outline, solid, and mini. + By default, the outline style is used, but solid and mini may + be applied by using the `-solid` and `-mini` suffix. + + You can customize the size and colors of the icons by setting + width, height, and background color classes. + + Icons are extracted from the `deps/heroicons` directory and bundled within + your compiled app.css by the plugin in your `assets/tailwind.config.js`. + + ## Examples + + <.icon name="hero-x-mark-solid" /> + <.icon name="hero-arrow-path" class="ml-1 w-3 h-3 animate-spin" /> + """ + attr :name, :string, required: true + attr :class, :string, default: nil + + def icon(%{name: "hero-" <> _} = assigns) do + ~H""" + + """ + end + + ## JS Commands + + def show(js \\ %JS{}, selector) do + JS.show(js, + to: selector, + time: 300, + transition: + {"transition-all transform ease-out duration-300", + "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95", + "opacity-100 translate-y-0 sm:scale-100"} + ) + end + + def hide(js \\ %JS{}, selector) do + JS.hide(js, + to: selector, + time: 200, + transition: + {"transition-all transform ease-in duration-200", + "opacity-100 translate-y-0 sm:scale-100", + "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95"} + ) + end + + def show_modal(js \\ %JS{}, id) when is_binary(id) do + js + |> JS.show(to: "##{id}") + |> JS.show( + to: "##{id}-bg", + time: 300, + transition: {"transition-all transform ease-out duration-300", "opacity-0", "opacity-100"} + ) + |> show("##{id}-container") + |> JS.add_class("overflow-hidden", to: "body") + |> JS.focus_first(to: "##{id}-content") + end + + def hide_modal(js \\ %JS{}, id) do + js + |> JS.hide( + to: "##{id}-bg", + transition: {"transition-all transform ease-in duration-200", "opacity-100", "opacity-0"} + ) + |> hide("##{id}-container") + |> JS.hide(to: "##{id}", transition: {"block", "block", "hidden"}) + |> JS.remove_class("overflow-hidden", to: "body") + |> JS.pop_focus() + end + + @doc """ + Translates an error message using gettext. + """ + def translate_error({msg, opts}) do + # When using gettext, we typically pass the strings we want + # to translate as a static argument: + # + # # Translate the number of files with plural rules + # dngettext("errors", "1 file", "%{count} files", count) + # + # However the error messages in our forms and APIs are generated + # dynamically, so we need to translate them by calling Gettext + # with our gettext backend as first argument. Translations are + # available in the errors.po file (as we use the "errors" domain). + if count = opts[:count] do + Gettext.dngettext(SttRecorderWeb.Gettext, "errors", msg, msg, count, opts) + else + Gettext.dgettext(SttRecorderWeb.Gettext, "errors", msg, opts) + end + end + + @doc """ + Translates the errors for a field from a keyword list of errors. + """ + def translate_errors(errors, field) when is_list(errors) do + for {^field, {msg, opts}} <- errors, do: translate_error({msg, opts}) + end +end diff --git a/stt_recorder/lib/stt_recorder_web/components/layouts.ex b/stt_recorder/lib/stt_recorder_web/components/layouts.ex new file mode 100644 index 00000000..b305c589 --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/components/layouts.ex @@ -0,0 +1,14 @@ +defmodule SttRecorderWeb.Layouts do + @moduledoc """ + This module holds different layouts used by your application. + + See the `layouts` directory for all templates available. + The "root" layout is a skeleton rendered as part of the + application router. The "app" layout is set as the default + layout on both `use SttRecorderWeb, :controller` and + `use SttRecorderWeb, :live_view`. + """ + use SttRecorderWeb, :html + + embed_templates "layouts/*" +end diff --git a/stt_recorder/lib/stt_recorder_web/components/layouts/app.html.heex b/stt_recorder/lib/stt_recorder_web/components/layouts/app.html.heex new file mode 100644 index 00000000..617ebdf3 --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/components/layouts/app.html.heex @@ -0,0 +1,6 @@ + +
+
+ {@inner_content} +
+
diff --git a/stt_recorder/lib/stt_recorder_web/components/layouts/root.html.heex b/stt_recorder/lib/stt_recorder_web/components/layouts/root.html.heex new file mode 100644 index 00000000..8072740a --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/components/layouts/root.html.heex @@ -0,0 +1,18 @@ + + + + + + + <.live_title default="SttRecorder" suffix=" · Phoenix Framework"> + {assigns[:page_title]} + + + + + + + {@inner_content} + + diff --git a/stt_recorder/lib/stt_recorder_web/controllers/error_html.ex b/stt_recorder/lib/stt_recorder_web/controllers/error_html.ex new file mode 100644 index 00000000..9bb1cc2a --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/controllers/error_html.ex @@ -0,0 +1,24 @@ +defmodule SttRecorderWeb.ErrorHTML do + @moduledoc """ + This module is invoked by your endpoint in case of errors on HTML requests. + + See config/config.exs. + """ + use SttRecorderWeb, :html + + # If you want to customize your error pages, + # uncomment the embed_templates/1 call below + # and add pages to the error directory: + # + # * lib/stt_recorder_web/controllers/error_html/404.html.heex + # * lib/stt_recorder_web/controllers/error_html/500.html.heex + # + # embed_templates "error_html/*" + + # The default is to render a plain text page based on + # the template name. For example, "404.html" becomes + # "Not Found". + def render(template, _assigns) do + Phoenix.Controller.status_message_from_template(template) + end +end diff --git a/stt_recorder/lib/stt_recorder_web/controllers/error_json.ex b/stt_recorder/lib/stt_recorder_web/controllers/error_json.ex new file mode 100644 index 00000000..ba79179f --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/controllers/error_json.ex @@ -0,0 +1,21 @@ +defmodule SttRecorderWeb.ErrorJSON do + @moduledoc """ + This module is invoked by your endpoint in case of errors on JSON requests. + + See config/config.exs. + """ + + # If you want to customize a particular status code, + # you may add your own clauses, such as: + # + # def render("500.json", _assigns) do + # %{errors: %{detail: "Internal Server Error"}} + # end + + # By default, Phoenix returns the status message from + # the template name. For example, "404.json" becomes + # "Not Found". + def render(template, _assigns) do + %{errors: %{detail: Phoenix.Controller.status_message_from_template(template)}} + end +end diff --git a/stt_recorder/lib/stt_recorder_web/controllers/page_controller.ex b/stt_recorder/lib/stt_recorder_web/controllers/page_controller.ex new file mode 100644 index 00000000..e1b596ad --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/controllers/page_controller.ex @@ -0,0 +1,9 @@ +defmodule SttRecorderWeb.PageController do + use SttRecorderWeb, :controller + + def home(conn, _params) do + # The home page is often custom made, + # so skip the default app layout. + render(conn, :home, layout: false) + end +end diff --git a/stt_recorder/lib/stt_recorder_web/controllers/page_html.ex b/stt_recorder/lib/stt_recorder_web/controllers/page_html.ex new file mode 100644 index 00000000..a4112847 --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/controllers/page_html.ex @@ -0,0 +1,10 @@ +defmodule SttRecorderWeb.PageHTML do + @moduledoc """ + This module contains pages rendered by PageController. + + See the `page_html` directory for all templates available. + """ + use SttRecorderWeb, :html + + embed_templates "page_html/*" +end diff --git a/stt_recorder/lib/stt_recorder_web/controllers/page_html/home.html.heex b/stt_recorder/lib/stt_recorder_web/controllers/page_html/home.html.heex new file mode 100644 index 00000000..d72b03c2 --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/controllers/page_html/home.html.heex @@ -0,0 +1,222 @@ +<.flash_group flash={@flash} /> + +
+
+ +

+ Phoenix Framework + + v{Application.spec(:phoenix, :vsn)} + +

+

+ Peace of mind from prototype to production. +

+

+ Build rich, interactive web applications quickly, with less code and fewer moving parts. Join our growing community of developers using Phoenix to craft APIs, HTML5 apps and more, for fun or at scale. +

+ +
+
diff --git a/stt_recorder/lib/stt_recorder_web/endpoint.ex b/stt_recorder/lib/stt_recorder_web/endpoint.ex new file mode 100644 index 00000000..9a289bef --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/endpoint.ex @@ -0,0 +1,52 @@ +defmodule SttRecorderWeb.Endpoint do + use Phoenix.Endpoint, otp_app: :stt_recorder + + # The session will be stored in the cookie and signed, + # this means its contents can be read but not tampered with. + # Set :encryption_salt if you would also like to encrypt it. + @session_options [ + store: :cookie, + key: "_stt_recorder_key", + signing_salt: "Xz3KHfj3", + same_site: "Lax" + ] + + socket "/live", Phoenix.LiveView.Socket, + websocket: [connect_info: [session: @session_options]], + longpoll: [connect_info: [session: @session_options]] + + # Serve at "/" the static files from "priv/static" directory. + # + # You should set gzip to true if you are running phx.digest + # when deploying your static files in production. + plug Plug.Static, + at: "/", + from: :stt_recorder, + gzip: false, + only: SttRecorderWeb.static_paths() + + # Code reloading can be explicitly enabled under the + # :code_reloader configuration of your endpoint. + if code_reloading? do + socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket + plug Phoenix.LiveReloader + plug Phoenix.CodeReloader + end + + plug Phoenix.LiveDashboard.RequestLogger, + param_key: "request_logger", + cookie_key: "request_logger" + + plug Plug.RequestId + plug Plug.Telemetry, event_prefix: [:phoenix, :endpoint] + + plug Plug.Parsers, + parsers: [:urlencoded, :multipart, :json], + pass: ["*/*"], + json_decoder: Phoenix.json_library() + + plug Plug.MethodOverride + plug Plug.Head + plug Plug.Session, @session_options + plug SttRecorderWeb.Router +end diff --git a/stt_recorder/lib/stt_recorder_web/gettext.ex b/stt_recorder/lib/stt_recorder_web/gettext.ex new file mode 100644 index 00000000..f691fda6 --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/gettext.ex @@ -0,0 +1,25 @@ +defmodule SttRecorderWeb.Gettext do + @moduledoc """ + A module providing Internationalization with a gettext-based API. + + By using [Gettext](https://hexdocs.pm/gettext), your module compiles translations + that you can use in your application. To use this Gettext backend module, + call `use Gettext` and pass it as an option: + + use Gettext, backend: SttRecorderWeb.Gettext + + # Simple translation + gettext("Here is the string to translate") + + # Plural translation + ngettext("Here is the string to translate", + "Here are the strings to translate", + 3) + + # Domain-based translation + dgettext("errors", "Here is the error message to translate") + + See the [Gettext Docs](https://hexdocs.pm/gettext) for detailed usage. + """ + use Gettext.Backend, otp_app: :stt_recorder +end diff --git a/stt_recorder/lib/stt_recorder_web/live/stt/test_recorder.ex b/stt_recorder/lib/stt_recorder_web/live/stt/test_recorder.ex new file mode 100644 index 00000000..195268b1 --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/live/stt/test_recorder.ex @@ -0,0 +1,160 @@ +defmodule SttRecorderWeb.Stt.TestRecorder do + use SttRecorderWeb, :live_view + + def mount(_params, _session, socket) do + {:ok, socket} + end + + def render(assigns) do + ~H""" +
+
Press "Start Recording"...
+ + +
+
+
+
+
+
+ +
+ """ + end +end diff --git a/stt_recorder/lib/stt_recorder_web/router.ex b/stt_recorder/lib/stt_recorder_web/router.ex new file mode 100644 index 00000000..ff72c50e --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/router.ex @@ -0,0 +1,47 @@ +defmodule SttRecorderWeb.Router do + use SttRecorderWeb, :router + + pipeline :browser do + plug :accepts, ["html"] + plug :fetch_session + plug :fetch_live_flash + plug :put_root_layout, html: {SttRecorderWeb.Layouts, :root} + plug :protect_from_forgery + plug :put_secure_browser_headers + end + + pipeline :api do + plug :accepts, ["json"] + end + + scope "/", SttRecorderWeb do + pipe_through :browser + + get "/", PageController, :home + live "/sttrecorder", Stt.SttLive + live "/test", Stt.TestRecorder + + end + + # Other scopes may use custom stacks. + # scope "/api", SttRecorderWeb do + # pipe_through :api + # end + + # Enable LiveDashboard and Swoosh mailbox preview in development + if Application.compile_env(:stt_recorder, :dev_routes) do + # If you want to use the LiveDashboard in production, you should put + # it behind authentication and allow only admins to access it. + # If your application does not have an admins-only section yet, + # you can use Plug.BasicAuth to set up some basic authentication + # as long as you are also using SSL (which you should anyway). + import Phoenix.LiveDashboard.Router + + scope "/dev" do + pipe_through :browser + + live_dashboard "/dashboard", metrics: SttRecorderWeb.Telemetry + forward "/mailbox", Plug.Swoosh.MailboxPreview + end + end +end diff --git a/stt_recorder/lib/stt_recorder_web/telemetry.ex b/stt_recorder/lib/stt_recorder_web/telemetry.ex new file mode 100644 index 00000000..31acde58 --- /dev/null +++ b/stt_recorder/lib/stt_recorder_web/telemetry.ex @@ -0,0 +1,70 @@ +defmodule SttRecorderWeb.Telemetry do + use Supervisor + import Telemetry.Metrics + + def start_link(arg) do + Supervisor.start_link(__MODULE__, arg, name: __MODULE__) + end + + @impl true + def init(_arg) do + children = [ + # Telemetry poller will execute the given period measurements + # every 10_000ms. Learn more here: https://hexdocs.pm/telemetry_metrics + {:telemetry_poller, measurements: periodic_measurements(), period: 10_000} + # Add reporters as children of your supervision tree. + # {Telemetry.Metrics.ConsoleReporter, metrics: metrics()} + ] + + Supervisor.init(children, strategy: :one_for_one) + end + + def metrics do + [ + # Phoenix Metrics + summary("phoenix.endpoint.start.system_time", + unit: {:native, :millisecond} + ), + summary("phoenix.endpoint.stop.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.start.system_time", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.exception.duration", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.stop.duration", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.socket_connected.duration", + unit: {:native, :millisecond} + ), + sum("phoenix.socket_drain.count"), + summary("phoenix.channel_joined.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.channel_handled_in.duration", + tags: [:event], + unit: {:native, :millisecond} + ), + + # VM Metrics + summary("vm.memory.total", unit: {:byte, :kilobyte}), + summary("vm.total_run_queue_lengths.total"), + summary("vm.total_run_queue_lengths.cpu"), + summary("vm.total_run_queue_lengths.io") + ] + end + + defp periodic_measurements do + [ + # A module, function and arguments to be invoked periodically. + # This function must call :telemetry.execute/3 and a metric must be added above. + # {SttRecorderWeb, :count_users, []} + ] + end +end diff --git a/stt_recorder/mix.exs b/stt_recorder/mix.exs new file mode 100644 index 00000000..d4b0df13 --- /dev/null +++ b/stt_recorder/mix.exs @@ -0,0 +1,79 @@ +defmodule SttRecorder.MixProject do + use Mix.Project + + def project do + [ + app: :stt_recorder, + version: "0.1.0", + elixir: "~> 1.14", + elixirc_paths: elixirc_paths(Mix.env()), + start_permanent: Mix.env() == :prod, + aliases: aliases(), + deps: deps() + ] + end + + # Configuration for the OTP application. + # + # Type `mix help compile.app` for more information. + def application do + [ + mod: {SttRecorder.Application, []}, + extra_applications: [:logger, :runtime_tools] + ] + end + + # Specifies which paths to compile per environment. + defp elixirc_paths(:test), do: ["lib", "test/support"] + defp elixirc_paths(_), do: ["lib"] + + # Specifies your project dependencies. + # + # Type `mix help deps` for examples and options. + defp deps do + [ + {:phoenix, "~> 1.7.21"}, + {:phoenix_html, "~> 4.1"}, + {:phoenix_live_reload, "~> 1.2", only: :dev}, + {:phoenix_live_view, "~> 1.0"}, + {:floki, ">= 0.30.0", only: :test}, + {:phoenix_live_dashboard, "~> 0.8.3"}, + {:esbuild, "~> 0.8", runtime: Mix.env() == :dev}, + {:tailwind, "~> 0.2.0", runtime: Mix.env() == :dev}, + {:heroicons, + github: "tailwindlabs/heroicons", + tag: "v2.1.1", + sparse: "optimized", + app: false, + compile: false, + depth: 1}, + {:swoosh, "~> 1.5"}, + {:finch, "~> 0.13"}, + {:telemetry_metrics, "~> 1.0"}, + {:telemetry_poller, "~> 1.0"}, + {:gettext, "~> 0.26"}, + {:jason, "~> 1.2"}, + {:dns_cluster, "~> 0.1.1"}, + {:bandit, "~> 1.5"} + ] + end + + # Aliases are shortcuts or tasks specific to the current project. + # For example, to install project dependencies and perform other setup tasks, run: + # + # $ mix setup + # + # See the documentation for `Mix` for more info on aliases. + defp aliases do + [ + setup: ["deps.get", "assets.setup", "assets.build"], + "assets.setup": ["tailwind.install --if-missing", "esbuild.install --if-missing"], + "assets.build": ["tailwind stt_recorder", "esbuild stt_recorder"], + "assets.deploy": [ + "tailwind stt_recorder --minify", + "esbuild stt_recorder --minify", + "phx.digest" + ] + ] + end +end diff --git a/stt_recorder/mix.lock b/stt_recorder/mix.lock new file mode 100644 index 00000000..42e5afc2 --- /dev/null +++ b/stt_recorder/mix.lock @@ -0,0 +1,35 @@ +%{ + "bandit": {:hex, :bandit, "1.7.0", "d1564f30553c97d3e25f9623144bb8df11f3787a26733f00b21699a128105c0c", [:mix], [{:hpax, "~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}, {:plug, "~> 1.18", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:thousand_island, "~> 1.0", [hex: :thousand_island, repo: "hexpm", optional: false]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "3e2f7a98c7a11f48d9d8c037f7177cd39778e74d55c7af06fe6227c742a8168a"}, + "castore": {:hex, :castore, "1.0.14", "4582dd7d630b48cf5e1ca8d3d42494db51e406b7ba704e81fbd401866366896a", [:mix], [], "hexpm", "7bc1b65249d31701393edaaac18ec8398d8974d52c647b7904d01b964137b9f4"}, + "dns_cluster": {:hex, :dns_cluster, "0.1.3", "0bc20a2c88ed6cc494f2964075c359f8c2d00e1bf25518a6a6c7fd277c9b0c66", [:mix], [], "hexpm", "46cb7c4a1b3e52c7ad4cbe33ca5079fbde4840dedeafca2baf77996c2da1bc33"}, + "esbuild": {:hex, :esbuild, "0.10.0", "b0aa3388a1c23e727c5a3e7427c932d89ee791746b0081bbe56103e9ef3d291f", [:mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "468489cda427b974a7cc9f03ace55368a83e1a7be12fba7e30969af78e5f8c70"}, + "expo": {:hex, :expo, "1.1.0", "f7b9ed7fb5745ebe1eeedf3d6f29226c5dd52897ac67c0f8af62a07e661e5c75", [:mix], [], "hexpm", "fbadf93f4700fb44c331362177bdca9eeb8097e8b0ef525c9cc501cb9917c960"}, + "file_system": {:hex, :file_system, "1.1.0", "08d232062284546c6c34426997dd7ef6ec9f8bbd090eb91780283c9016840e8f", [:mix], [], "hexpm", "bfcf81244f416871f2a2e15c1b515287faa5db9c6bcf290222206d120b3d43f6"}, + "finch": {:hex, :finch, "0.19.0", "c644641491ea854fc5c1bbaef36bfc764e3f08e7185e1f084e35e0672241b76d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "fc5324ce209125d1e2fa0fcd2634601c52a787aff1cd33ee833664a5af4ea2b6"}, + "floki": {:hex, :floki, "0.37.1", "d7aaee758c8a5b4a7495799a4260754fec5530d95b9c383c03b27359dea117cf", [:mix], [], "hexpm", "673d040cb594d31318d514590246b6dd587ed341d3b67e17c1c0eb8ce7ca6f04"}, + "gettext": {:hex, :gettext, "0.26.2", "5978aa7b21fada6deabf1f6341ddba50bc69c999e812211903b169799208f2a8", [:mix], [{:expo, "~> 0.5.1 or ~> 1.0", [hex: :expo, repo: "hexpm", optional: false]}], "hexpm", "aa978504bcf76511efdc22d580ba08e2279caab1066b76bb9aa81c4a1e0a32a5"}, + "heroicons": {:git, "https://github.com/tailwindlabs/heroicons.git", "88ab3a0d790e6a47404cba02800a6b25d2afae50", [tag: "v2.1.1", sparse: "optimized", depth: 1]}, + "hpax": {:hex, :hpax, "1.0.3", "ed67ef51ad4df91e75cc6a1494f851850c0bd98ebc0be6e81b026e765ee535aa", [:mix], [], "hexpm", "8eab6e1cfa8d5918c2ce4ba43588e894af35dbd8e91e6e55c817bca5847df34a"}, + "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, + "mime": {:hex, :mime, "2.0.7", "b8d739037be7cd402aee1ba0306edfdef982687ee7e9859bee6198c1e7e2f128", [:mix], [], "hexpm", "6171188e399ee16023ffc5b76ce445eb6d9672e2e241d2df6050f3c771e80ccd"}, + "mint": {:hex, :mint, "1.7.1", "113fdb2b2f3b59e47c7955971854641c61f378549d73e829e1768de90fc1abf1", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "fceba0a4d0f24301ddee3024ae116df1c3f4bb7a563a731f45fdfeb9d39a231b"}, + "nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"}, + "nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"}, + "phoenix": {:hex, :phoenix, "1.7.21", "14ca4f1071a5f65121217d6b57ac5712d1857e40a0833aff7a691b7870fc9a3b", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.7", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "336dce4f86cba56fed312a7d280bf2282c720abb6074bdb1b61ec8095bdd0bc9"}, + "phoenix_html": {:hex, :phoenix_html, "4.2.1", "35279e2a39140068fc03f8874408d58eef734e488fc142153f055c5454fd1c08", [:mix], [], "hexpm", "cff108100ae2715dd959ae8f2a8cef8e20b593f8dfd031c9cba92702cf23e053"}, + "phoenix_live_dashboard": {:hex, :phoenix_live_dashboard, "0.8.7", "405880012cb4b706f26dd1c6349125bfc903fb9e44d1ea668adaf4e04d4884b7", [:mix], [{:ecto, "~> 3.6.2 or ~> 3.7", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_mysql_extras, "~> 0.5", [hex: :ecto_mysql_extras, repo: "hexpm", optional: true]}, {:ecto_psql_extras, "~> 0.7", [hex: :ecto_psql_extras, repo: "hexpm", optional: true]}, {:ecto_sqlite3_extras, "~> 1.1.7 or ~> 1.2.0", [hex: :ecto_sqlite3_extras, repo: "hexpm", optional: true]}, {:mime, "~> 1.6 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:phoenix_live_view, "~> 0.19 or ~> 1.0", [hex: :phoenix_live_view, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "3a8625cab39ec261d48a13b7468dc619c0ede099601b084e343968309bd4d7d7"}, + "phoenix_live_reload": {:hex, :phoenix_live_reload, "1.6.0", "2791fac0e2776b640192308cc90c0dbcf67843ad51387ed4ecae2038263d708d", [:mix], [{:file_system, "~> 0.2.10 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.4", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm", "b3a1fa036d7eb2f956774eda7a7638cf5123f8f2175aca6d6420a7f95e598e1c"}, + "phoenix_live_view": {:hex, :phoenix_live_view, "1.0.14", "621f075577e286ff1e67d6de085ddf6f364f934d229c1c5564be1ef4c77908b9", [:mix], [{:floki, "~> 0.36", [hex: :floki, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.6.15 or ~> 1.7.0 or ~> 1.8.0-rc", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.3 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "b6dcb3f236044cd9d1c0d0996331bef72716b1991bbd8e0725a617c0d95a9483"}, + "phoenix_pubsub": {:hex, :phoenix_pubsub, "2.1.3", "3168d78ba41835aecad272d5e8cd51aa87a7ac9eb836eabc42f6e57538e3731d", [:mix], [], "hexpm", "bba06bc1dcfd8cb086759f0edc94a8ba2bc8896d5331a1e2c2902bf8e36ee502"}, + "phoenix_template": {:hex, :phoenix_template, "1.0.4", "e2092c132f3b5e5b2d49c96695342eb36d0ed514c5b252a77048d5969330d639", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "2c0c81f0e5c6753faf5cca2f229c9709919aba34fab866d3bc05060c9c444206"}, + "plug": {:hex, :plug, "1.18.0", "d78df36c41f7e798f2edf1f33e1727eae438e9dd5d809a9997c463a108244042", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "819f9e176d51e44dc38132e132fe0accaf6767eab7f0303431e404da8476cfa2"}, + "plug_crypto": {:hex, :plug_crypto, "2.1.1", "19bda8184399cb24afa10be734f84a16ea0a2bc65054e23a62bb10f06bc89491", [:mix], [], "hexpm", "6470bce6ffe41c8bd497612ffde1a7e4af67f36a15eea5f921af71cf3e11247c"}, + "swoosh": {:hex, :swoosh, "1.19.1", "77e839b27fc7af0704788e5854934c77d4dea7b437270c924a717513d598b8a4", [:mix], [{:bandit, ">= 1.0.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:cowboy, "~> 1.1 or ~> 2.4", [hex: :cowboy, repo: "hexpm", optional: true]}, {:ex_aws, "~> 2.1", [hex: :ex_aws, repo: "hexpm", optional: true]}, {:finch, "~> 0.6", [hex: :finch, repo: "hexpm", optional: true]}, {:gen_smtp, "~> 0.13 or ~> 1.0", [hex: :gen_smtp, repo: "hexpm", optional: true]}, {:hackney, "~> 1.9", [hex: :hackney, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mail, "~> 0.2", [hex: :mail, repo: "hexpm", optional: true]}, {:mime, "~> 1.1 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mua, "~> 0.2.3", [hex: :mua, repo: "hexpm", optional: true]}, {:multipart, "~> 0.4", [hex: :multipart, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: true]}, {:plug_cowboy, ">= 1.0.0", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:req, "~> 0.5.10 or ~> 0.6 or ~> 1.0", [hex: :req, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "eab57462d41a3330e82cb93a9d7640f5c79a85951f3457db25c1eb28fda193a6"}, + "tailwind": {:hex, :tailwind, "0.2.4", "5706ec47182d4e7045901302bf3a333e80f3d1af65c442ba9a9eed152fb26c2e", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}], "hexpm", "c6e4a82b8727bab593700c998a4d98cf3d8025678bfde059aed71d0000c3e463"}, + "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, + "telemetry_metrics": {:hex, :telemetry_metrics, "1.1.0", "5bd5f3b5637e0abea0426b947e3ce5dd304f8b3bc6617039e2b5a008adc02f8f", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e7b79e8ddfde70adb6db8a6623d1778ec66401f366e9a8f5dd0955c56bc8ce67"}, + "telemetry_poller": {:hex, :telemetry_poller, "1.2.0", "ba82e333215aed9dd2096f93bd1d13ae89d249f82760fcada0850ba33bac154b", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7216e21a6c326eb9aa44328028c34e9fd348fb53667ca837be59d0aa2a0156e8"}, + "thousand_island": {:hex, :thousand_island, "1.3.14", "ad45ebed2577b5437582bcc79c5eccd1e2a8c326abf6a3464ab6c06e2055a34a", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "d0d24a929d31cdd1d7903a4fe7f2409afeedff092d277be604966cd6aa4307ef"}, + "websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"}, + "websock_adapter": {:hex, :websock_adapter, "0.5.8", "3b97dc94e407e2d1fc666b2fb9acf6be81a1798a2602294aac000260a7c4a47d", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "315b9a1865552212b5f35140ad194e67ce31af45bcee443d4ecb96b5fd3f3782"}, +} diff --git a/stt_recorder/priv/gettext/en/LC_MESSAGES/errors.po b/stt_recorder/priv/gettext/en/LC_MESSAGES/errors.po new file mode 100644 index 00000000..cdec3a11 --- /dev/null +++ b/stt_recorder/priv/gettext/en/LC_MESSAGES/errors.po @@ -0,0 +1,11 @@ +## `msgid`s in this file come from POT (.pot) files. +## +## Do not add, change, or remove `msgid`s manually here as +## they're tied to the ones in the corresponding POT file +## (with the same domain). +## +## Use `mix gettext.extract --merge` or `mix gettext.merge` +## to merge POT files into PO files. +msgid "" +msgstr "" +"Language: en\n" diff --git a/stt_recorder/priv/gettext/errors.pot b/stt_recorder/priv/gettext/errors.pot new file mode 100644 index 00000000..d6f47fa8 --- /dev/null +++ b/stt_recorder/priv/gettext/errors.pot @@ -0,0 +1,10 @@ +## This is a PO Template file. +## +## `msgid`s here are often extracted from source code. +## Add new translations manually only if they're dynamic +## translations that can't be statically extracted. +## +## Run `mix gettext.extract` to bring this file up to +## date. Leave `msgstr`s empty as changing them here has no +## effect: edit them in PO (`.po`) files instead. + diff --git a/stt_recorder/priv/static/favicon-91f37b602a111216f1eef3aa337ad763.ico b/stt_recorder/priv/static/favicon-91f37b602a111216f1eef3aa337ad763.ico new file mode 100644 index 00000000..7f372bfc Binary files /dev/null and b/stt_recorder/priv/static/favicon-91f37b602a111216f1eef3aa337ad763.ico differ diff --git a/stt_recorder/priv/static/favicon.ico b/stt_recorder/priv/static/favicon.ico new file mode 100644 index 00000000..7f372bfc Binary files /dev/null and b/stt_recorder/priv/static/favicon.ico differ diff --git a/stt_recorder/priv/static/images/logo-06a11be1f2cdde2c851763d00bdd2e80.svg b/stt_recorder/priv/static/images/logo-06a11be1f2cdde2c851763d00bdd2e80.svg new file mode 100644 index 00000000..9f26baba --- /dev/null +++ b/stt_recorder/priv/static/images/logo-06a11be1f2cdde2c851763d00bdd2e80.svg @@ -0,0 +1,6 @@ + diff --git a/stt_recorder/priv/static/images/logo-06a11be1f2cdde2c851763d00bdd2e80.svg.gz b/stt_recorder/priv/static/images/logo-06a11be1f2cdde2c851763d00bdd2e80.svg.gz new file mode 100644 index 00000000..2929d3b1 Binary files /dev/null and b/stt_recorder/priv/static/images/logo-06a11be1f2cdde2c851763d00bdd2e80.svg.gz differ diff --git a/stt_recorder/priv/static/images/logo.svg b/stt_recorder/priv/static/images/logo.svg new file mode 100644 index 00000000..9f26baba --- /dev/null +++ b/stt_recorder/priv/static/images/logo.svg @@ -0,0 +1,6 @@ + diff --git a/stt_recorder/priv/static/images/logo.svg.gz b/stt_recorder/priv/static/images/logo.svg.gz new file mode 100644 index 00000000..2929d3b1 Binary files /dev/null and b/stt_recorder/priv/static/images/logo.svg.gz differ diff --git a/stt_recorder/priv/static/robots-9e2c81b0855bbff2baa8371bc4a78186.txt b/stt_recorder/priv/static/robots-9e2c81b0855bbff2baa8371bc4a78186.txt new file mode 100644 index 00000000..26e06b5f --- /dev/null +++ b/stt_recorder/priv/static/robots-9e2c81b0855bbff2baa8371bc4a78186.txt @@ -0,0 +1,5 @@ +# See https://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file +# +# To ban all spiders from the entire site uncomment the next two lines: +# User-agent: * +# Disallow: / diff --git a/stt_recorder/priv/static/robots-9e2c81b0855bbff2baa8371bc4a78186.txt.gz b/stt_recorder/priv/static/robots-9e2c81b0855bbff2baa8371bc4a78186.txt.gz new file mode 100644 index 00000000..24de2e14 Binary files /dev/null and b/stt_recorder/priv/static/robots-9e2c81b0855bbff2baa8371bc4a78186.txt.gz differ diff --git a/stt_recorder/priv/static/robots.txt b/stt_recorder/priv/static/robots.txt new file mode 100644 index 00000000..26e06b5f --- /dev/null +++ b/stt_recorder/priv/static/robots.txt @@ -0,0 +1,5 @@ +# See https://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file +# +# To ban all spiders from the entire site uncomment the next two lines: +# User-agent: * +# Disallow: / diff --git a/stt_recorder/priv/static/robots.txt.gz b/stt_recorder/priv/static/robots.txt.gz new file mode 100644 index 00000000..24de2e14 Binary files /dev/null and b/stt_recorder/priv/static/robots.txt.gz differ diff --git a/stt_recorder/test/stt_recorder_web/controllers/error_html_test.exs b/stt_recorder/test/stt_recorder_web/controllers/error_html_test.exs new file mode 100644 index 00000000..1c8128e8 --- /dev/null +++ b/stt_recorder/test/stt_recorder_web/controllers/error_html_test.exs @@ -0,0 +1,14 @@ +defmodule SttRecorderWeb.ErrorHTMLTest do + use SttRecorderWeb.ConnCase, async: true + + # Bring render_to_string/4 for testing custom views + import Phoenix.Template + + test "renders 404.html" do + assert render_to_string(SttRecorderWeb.ErrorHTML, "404", "html", []) == "Not Found" + end + + test "renders 500.html" do + assert render_to_string(SttRecorderWeb.ErrorHTML, "500", "html", []) == "Internal Server Error" + end +end diff --git a/stt_recorder/test/stt_recorder_web/controllers/error_json_test.exs b/stt_recorder/test/stt_recorder_web/controllers/error_json_test.exs new file mode 100644 index 00000000..9ae0051f --- /dev/null +++ b/stt_recorder/test/stt_recorder_web/controllers/error_json_test.exs @@ -0,0 +1,12 @@ +defmodule SttRecorderWeb.ErrorJSONTest do + use SttRecorderWeb.ConnCase, async: true + + test "renders 404" do + assert SttRecorderWeb.ErrorJSON.render("404.json", %{}) == %{errors: %{detail: "Not Found"}} + end + + test "renders 500" do + assert SttRecorderWeb.ErrorJSON.render("500.json", %{}) == + %{errors: %{detail: "Internal Server Error"}} + end +end diff --git a/stt_recorder/test/stt_recorder_web/controllers/page_controller_test.exs b/stt_recorder/test/stt_recorder_web/controllers/page_controller_test.exs new file mode 100644 index 00000000..43ddb342 --- /dev/null +++ b/stt_recorder/test/stt_recorder_web/controllers/page_controller_test.exs @@ -0,0 +1,8 @@ +defmodule SttRecorderWeb.PageControllerTest do + use SttRecorderWeb.ConnCase + + test "GET /", %{conn: conn} do + conn = get(conn, ~p"/") + assert html_response(conn, 200) =~ "Peace of mind from prototype to production" + end +end diff --git a/stt_recorder/test/support/conn_case.ex b/stt_recorder/test/support/conn_case.ex new file mode 100644 index 00000000..efe683af --- /dev/null +++ b/stt_recorder/test/support/conn_case.ex @@ -0,0 +1,37 @@ +defmodule SttRecorderWeb.ConnCase do + @moduledoc """ + This module defines the test case to be used by + tests that require setting up a connection. + + Such tests rely on `Phoenix.ConnTest` and also + import other functionality to make it easier + to build common data structures and query the data layer. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use SttRecorderWeb.ConnCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + # The default endpoint for testing + @endpoint SttRecorderWeb.Endpoint + + use SttRecorderWeb, :verified_routes + + # Import conveniences for testing with connections + import Plug.Conn + import Phoenix.ConnTest + import SttRecorderWeb.ConnCase + end + end + + setup _tags do + {:ok, conn: Phoenix.ConnTest.build_conn()} + end +end diff --git a/stt_recorder/test/test_helper.exs b/stt_recorder/test/test_helper.exs new file mode 100644 index 00000000..869559e7 --- /dev/null +++ b/stt_recorder/test/test_helper.exs @@ -0,0 +1 @@ +ExUnit.start()