"""
Whisper Speech-to-Text Module

Provides transcription using OpenAI's Whisper model with hallucination detection.
"""

import tempfile
from typing import Optional

import whisper

from ..config import (
    WHISPER_MODEL_NAME,
    WHISPER_LANGUAGE,
    WHISPER_FP16,
    SAMPLE_RATE_OUT,
    BYTES_PER_SAMPLE,
    log_stt,
    log_call,
)
from ..audio.processing import save_wav


class WhisperTranscriber:
    """
    Whisper-based speech transcription.

    Handles model loading, transcription, and prompt leak detection.
    """

    def __init__(
        self,
        model_name: str = WHISPER_MODEL_NAME,
        language: str = WHISPER_LANGUAGE,
        fp16: bool = WHISPER_FP16,
    ):
        """
        Initialize Whisper transcriber.

        Args:
            model_name: Whisper model size (tiny, base, small, medium, large)
            language: Target language code (e.g., 'en')
            fp16: Use FP16 inference (requires CUDA)
        """
        self.model_name = model_name
        self.language = language
        self.fp16 = fp16
        self._model = None

    def load_model(self):
        """Load Whisper model into memory"""
        if self._model is None:
            log_call.info("Loading Whisper model=%s", self.model_name)
            self._model = whisper.load_model(self.model_name)
            log_call.info("Whisper model loaded successfully")

    @property
    def model(self):
        """Get loaded model, loading if necessary"""
        if self._model is None:
            self.load_model()
        return self._model

    def transcribe(self, pcm_16k: bytes) -> str:
        """
        Transcribe 16kHz PCM audio to text.

        Args:
            pcm_16k: 16kHz 16-bit PCM audio data

        Returns:
            Transcribed text, or empty string on failure
        """
        # Skip very short audio (<250ms)
        min_bytes = int(SAMPLE_RATE_OUT * BYTES_PER_SAMPLE * 0.25)
        if not pcm_16k or len(pcm_16k) < min_bytes:
            return ""

        try:
            with tempfile.NamedTemporaryFile(suffix=".wav", delete=True) as tmp:
                save_wav(pcm_16k, tmp.name, SAMPLE_RATE_OUT)

                result = self.model.transcribe(
                    tmp.name,
                    language=self.language,
                    fp16=self.fp16,
                    verbose=False,
                    condition_on_previous_text=False,
                    initial_prompt="",
                    temperature=0.0,
                    beam_size=5,
                    no_speech_threshold=0.6,
                    logprob_threshold=-1.0,
                )

            text = (result.get("text") or "").strip()

            # Check for prompt leaks
            if is_prompt_leak(text):
                log_stt.warning("Dropped prompt-leak utterance")
                return ""

            return text

        except RuntimeError as e:
            log_stt.error("Whisper failed: %r", e)
            return ""

    def get_model_info(self) -> dict:
        """Get information about loaded model"""
        return {
            "model_name": self.model_name,
            "language": self.language,
            "fp16": self.fp16,
            "loaded": self._model is not None,
        }


def is_prompt_leak(text: str) -> bool:
    """
    Detect when Whisper hallucinates its own prompt instead of transcribing real speech.

    This is a conservative check - only drops obvious prompt leaks, not real user speech.

    Args:
        text: Transcribed text to check

    Returns:
        True if text appears to be a prompt leak
    """
    t = text.lower()

    # Only drop if it closely matches the initial prompt
    if "customer support phone call" in t and "technical or service-related issue" in t:
        return True

    # Drop if it's repeating prompt verbatim (>150 chars and mentions specific prompt keywords)
    if len(t) > 150 and all(k in t for k in ["customer", "support", "technical", "service"]):
        return True

    return False


# Global transcriber instance (lazy loaded)
_transcriber: Optional[WhisperTranscriber] = None


def get_transcriber() -> WhisperTranscriber:
    """Get global transcriber instance"""
    global _transcriber
    if _transcriber is None:
        _transcriber = WhisperTranscriber()
    return _transcriber
