"""LLM 客户端 - DeepSeek 集成"""
import httpx
from typing import AsyncGenerator, Optional, Dict, Any, List
from dataclasses import dataclass
from src.config import get_settings


@dataclass
class LLMResponse:
    """LLM 响应"""
    content: str
    model: str
    usage: Dict[str, int]
    cost: float


@dataclass
class LLMChoice:
    """LLM 选择"""
    index: int
    message: Dict[str, str]
    finish_reason: str


class DeepSeekClient:
    """DeepSeek LLM 客户端"""

    def __init__(self):
        settings = get_settings()
        self.api_key = getattr(settings, 'DEEPSEEK_API_KEY', '')
        self.base_url = getattr(settings, 'DEEPSEEK_BASE_URL', 'https://api.deepseek.com/v1')
        self.model = getattr(settings, 'DEEPSEEK_MODEL', 'deepseek-chat')
        self.enabled = bool(self.api_key)

    async def chat_completion(
        self,
        messages: List[Dict[str, str]],
        temperature: float = 0.7,
        max_tokens: Optional[int] = None,
        stream: bool = False,
    ) -> LLMResponse:
        """
        发送聊天完成请求

        Args:
            messages: 消息列表，格式 [{"role": "user", "content": "..."}]
            temperature: 采样温度
            max_tokens: 最大生成token数
            stream: 是否流式返回

        Returns:
            LLMResponse 对象
        """
        if not self.enabled:
            raise ValueError("DeepSeek API key not configured")

        url = f"{self.base_url}/chat/completions"
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json",
        }
        payload = {
            "model": self.model,
            "messages": messages,
            "temperature": temperature,
            "stream": stream,
        }
        if max_tokens:
            payload["max_tokens"] = max_tokens

        async with httpx.AsyncClient() as client:
            response = await client.post(url, headers=headers, json=payload, timeout=60.0)
            response.raise_for_status()
            data = response.json()

        # 计算成本（DeepSeek 价格）
        usage = data.get("usage", {})
        prompt_tokens = usage.get("prompt_tokens", 0)
        completion_tokens = usage.get("completion_tokens", 0)
        # DeepSeek-chat: 输入 ¥0.001/1K tokens, 输出 ¥0.002/1K tokens
        cost = (prompt_tokens * 0.000001 + completion_tokens * 0.000002)

        return LLMResponse(
            content=data["choices"][0]["message"]["content"],
            model=data.get("model", self.model),
            usage=usage,
            cost=cost,
        )

    async def chat_completion_stream(
        self,
        messages: List[Dict[str, str]],
        temperature: float = 0.7,
        max_tokens: Optional[int] = None,
    ) -> AsyncGenerator[str, None]:
        """
        流式聊天完成

        Args:
            messages: 消息列表
            temperature: 采样温度
            max_tokens: 最大生成token数

        Yields:
            生成的文本片段
        """
        if not self.enabled:
            raise ValueError("DeepSeek API key not configured")

        url = f"{self.base_url}/chat/completions"
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json",
        }
        payload = {
            "model": self.model,
            "messages": messages,
            "temperature": temperature,
            "stream": True,
        }
        if max_tokens:
            payload["max_tokens"] = max_tokens

        async with httpx.AsyncClient() as client:
            async with client.stream(
                "POST", url, headers=headers, json=payload, timeout=60.0
            ) as response:
                response.raise_for_status()
                async for line in response.aiter_lines():
                    if line.startswith("data: "):
                        data = line[6:]
                        if data == "[DONE]":
                            break
                        import json
                        try:
                            chunk = json.loads(data)
                            delta = chunk["choices"][0].get("delta", {})
                            if "content" in delta:
                                yield delta["content"]
                        except (json.JSONDecodeError, KeyError):
                            continue

    async def analyze_query(
        self,
        query: str,
        context: Optional[Dict] = None,
    ) -> Dict[str, Any]:
        """
        分析用户查询意图

        Args:
            query: 用户查询
            context: 上下文信息

        Returns:
            分析结果：intent, entities, required_skills, confidence
        """
        messages = [
            {
                "role": "system",
                "content": """你是一个企业智能助手，负责分析用户的查询意图。
请分析以下查询，返回JSON格式的结果：
{
    "intent": "查询意图（query_employee/query_department/analyze_data/other）",
    "entities": ["提取的实体"],
    "required_skills": ["需要的技能"],
    "needs_reasoning": true/false,
    "confidence": 0.0-1.0
}"""
            },
            {
                "role": "user",
                "content": f"查询：{query}\n上下文：{context or {}}"
            }
        ]

        response = await self.chat_completion(messages, temperature=0.3)
        content = response.content

        # 尝试解析 JSON
        import json
        import re
        try:
            # 尝试提取 JSON
            json_match = re.search(r'\{.*\}', content, re.DOTALL)
            if json_match:
                result = json.loads(json_match.group())
                return result
        except json.JSONDecodeError:
            pass

        # 如果解析失败，返回默认结构
        return {
            "intent": "other",
            "entities": [],
            "required_skills": ["data_query"],
            "needs_reasoning": "分析" in query or "建议" in query,
            "confidence": 0.5,
        }

    async def generate_response(
        self,
        query: str,
        data: Dict[str, Any],
        context: Optional[Dict] = None,
    ) -> str:
        """
        基于查询和数据生成回复

        Args:
            query: 原始查询
            data: 查询到的数据
            context: 上下文信息

        Returns:
            生成的回复文本
        """
        messages = [
            {
                "role": "system",
                "content": """你是一个专业的企业智能助手。请基于提供的数据回答用户的问题。
要求：
1. 回答简洁明了
2. 如果是数据分析，给出关键洞察
3. 标注数据来源
4. 如果涉及推断，明确标注[基于数据推断]"""
            },
            {
                "role": "user",
                "content": f"问题：{query}\n数据：{data}\n上下文：{context or {}}"
            }
        ]

        response = await self.chat_completion(messages, temperature=0.7)
        return response.content


# 全局客户端实例
_llm_client: Optional[DeepSeekClient] = None


def get_llm_client() -> DeepSeekClient:
    """获取全局 LLM 客户端（单例）"""
    global _llm_client
    if _llm_client is None:
        _llm_client = DeepSeekClient()
    return _llm_client
