#!/usr/bin/env python3
"""
AI研究员 - LLM 集成测试脚本
用于验证 DeepSeek LLM 是否配置正确
"""
import asyncio
import sys
import os

# 添加项目根目录到路径
sys.path.insert(0, '/home/jang/Projects/ai-researcher')

from src.integrations.llm_client import get_llm_client


async def test_llm_status():
    """测试 LLM 配置状态"""
    print("=" * 50)
    print("🧪 测试 LLM 集成")
    print("=" * 50)

    client = get_llm_client()

    print(f"\n📊 配置状态:")
    print(f"   已启用: {client.enabled}")
    print(f"   模型: {client.model}")
    print(f"   API URL: {client.base_url}")

    if not client.enabled:
        print("\n❌ 错误: LLM 未配置")
        print("   请检查 .env 文件中的 DEEPSEEK_API_KEY")
        return False

    return True


async def test_chat_completion():
    """测试聊天完成功能"""
    print("\n" + "=" * 50)
    print("💬 测试聊天功能")
    print("=" * 50)

    client = get_llm_client()

    messages = [
        {"role": "system", "content": "你是一个企业智能助手，简洁回答问题。"},
        {"role": "user", "content": "你好，请用一句话介绍自己"}
    ]

    try:
        print("\n📝 发送请求...")
        response = await client.chat_completion(
            messages=messages,
            temperature=0.7,
        )

        print(f"\n✅ 响应成功!")
        print(f"   模型: {response.model}")
        print(f"   Token 使用: {response.usage}")
        print(f"   成本: ${response.cost:.6f}")
        print(f"\n🤖 AI 回复:")
        print(f"   {response.content}")
        return True

    except Exception as e:
        print(f"\n❌ 错误: {e}")
        return False


async def test_query_analysis():
    """测试查询分析功能"""
    print("\n" + "=" * 50)
    print("🔍 测试查询意图分析")
    print("=" * 50)

    client = get_llm_client()

    test_queries = [
        "技术部有多少员工？",
        "对比销售部和市场部的业绩",
        "为什么最近员工离职率上升？",
    ]

    for query in test_queries:
        print(f"\n📝 查询: {query}")
        try:
            result = await client.analyze_query(query)
            print(f"   意图: {result['intent']}")
            print(f"   实体: {result['entities']}")
            print(f"   需要推理: {result['needs_reasoning']}")
            print(f"   置信度: {result['confidence']}")
        except Exception as e:
            print(f"   ❌ 错误: {e}")

    return True


async def test_stream():
    """测试流式响应"""
    print("\n" + "=" * 50)
    print("🌊 测试流式响应")
    print("=" * 50)

    client = get_llm_client()

    messages = [
        {"role": "user", "content": "用20个字描述春天"}
    ]

    try:
        print("\n📝 流式输出:")
        print("   ", end="", flush=True)

        async for chunk in client.chat_completion_stream(messages=messages):
            print(chunk, end="", flush=True)

        print("\n\n✅ 流式响应完成!")
        return True

    except Exception as e:
        print(f"\n❌ 错误: {e}")
        return False


async def main():
    """主函数"""
    print("\n" + "🚀" * 25)
    print("   AI研究员 - LLM 集成测试")
    print("🚀" * 25 + "\n")

    # 测试状态
    if not await test_llm_status():
        sys.exit(1)

    # 测试聊天
    if not await test_chat_completion():
        print("\n⚠️ 聊天测试失败，跳过后续测试")
        sys.exit(1)

    # 测试分析
    await test_query_analysis()

    # 测试流式（可选）
    await test_stream()

    print("\n" + "=" * 50)
    print("✅ 所有测试完成!")
    print("=" * 50)
    print("\n🎉 LLM 集成工作正常!")
    print("   你可以启动服务器进行更多测试:")
    print("   uv run python -m uvicorn src.main:app --reload")


if __name__ == "__main__":
    asyncio.run(main())
