
import json
import logging
from typing import Dict, Any

from .llm import chat_completion
import base64
from .prompts import system_prompt
from .tools import build_tool_registry   # imports the full toolbox
from .interfaces import CLIAdapter, UserInterfaceAdapter
from . import ui_state
import copy
import datetime

# -----------------------------------------------------------------
# Helper: parse possibly‑markdown‑wrapped JSON
# -----------------------------------------------------------------
def parse_json_response(text: str) -> Dict[str, Any]:
    """Extract raw JSON from optional markdown fences and parse it."""
    if text.startswith("```"):
        parts = text.split("```")
        if len(parts) >= 3:
            text = parts[1].strip()
            if text.lower().startswith("json"):
                text = text[4:].strip()
    # First try direct parse
    try:
        return json.loads(text)
    except json.JSONDecodeError:
        # Fallback: attempt to extract the first balanced JSON object in the text
        start = text.find("{")
        if start == -1:
            raise
        depth = 0
        for i in range(start, len(text)):
            if text[i] == '{':
                depth += 1
            elif text[i] == '}':
                depth -= 1
                if depth == 0:
                    candidate = text[start:i+1]
                    try:
                        return json.loads(candidate)
                    except json.JSONDecodeError:
                        break
        # If we couldn't recover, re-raise the original error for upstream handling
        raise

# -----------------------------------------------------------------
# The Agent class
# -----------------------------------------------------------------
class Agent:
    def __init__(self, model: str = None, max_steps: int = 100, ui_adapter: UserInterfaceAdapter = None):
        """
        Args:
            model: OpenAI model name (defaults to gpt-3.5-turbo).
            max_steps: safety cap – maximum number of tool‑call cycles.
        """
        self.model = model or "qwen/qwen-2.5-coder-32b-instruct"
        self.max_steps = max_steps
        self.ui_adapter = ui_adapter or CLIAdapter()
        self.tools = build_tool_registry(self.ui_adapter)               # includes file ops + ask_user
        self.system_msg = system_prompt(list(self.tools.values()))
    def _run_loop(self, messages, start_step=1):
        logger = logging.getLogger(__name__)
        logger.info("Agent run loop starting at step %s", start_step)
        for step in range(start_step, self.max_steps + 1):
            assistant_reply = chat_completion(messages, model=self.model)
            logger.info("Step %s – Model reply: %s", step, assistant_reply)
            print(f"\n🧠 Step {step} – Model reply:\n{assistant_reply}\n")

            # Always record the assistant reply immediately so the conversation
            # history reflects the model output before any tool runs or awaiting.
            messages.append({"role": "assistant", "content": assistant_reply})

            try:
                payload = parse_json_response(assistant_reply)
            except (json.JSONDecodeError, ValueError) as e:
                import re
                m_tool = re.search(r'"tool"\s*:\s*"([^\"]+)"', assistant_reply)
                if m_tool:
                    tool_name = m_tool.group(1)
                    payload = {"tool": tool_name}
                    m_expr = re.search(r'"expression"\s*:\s*"([^\"]+)"', assistant_reply, re.DOTALL)
                    if m_expr:
                        payload["args"] = {"expression": m_expr.group(1)}
                    else:
                        m_expr2 = re.search(r'"expression"\s*:\s*([^,\n\}]+)', assistant_reply)
                        if m_expr2:
                            expr = m_expr2.group(1).strip().strip('"').strip()
                            payload["args"] = {"expression": expr}
                        else:
                            payload["args"] = {}
                else:
                    logger.error("Failed to parse JSON from model: %s", e)
                    print(f"❌ Failed to parse JSON from model: {e}")
                    break

            if "final_answer" in payload:
                logger.info("Final answer found: %s", payload["final_answer"])
                print("\n✅ Task solved!\n")
                print("🗣️ Final answer:\n")
                print(payload["final_answer"])
                return payload["final_answer"]

            tool_name = payload.get("tool")
            tool_args = payload.get("args", {})

            if not tool_name or tool_name not in self.tools:
                print(f"❌ Invalid tool request: {payload}")
                break

            tool = self.tools[tool_name]
            logger.info("Calling tool %s with args: %s", tool_name, tool_args)
            print(f"🔧 Calling tool **{tool_name}** with args: {tool_args}")

            tool_result = tool.run(tool_args)

            # If tool_result signals awaiting user response, save pending state and return token
            if isinstance(tool_result, dict) and tool_result.get("awaiting"):
                token = tool_result["awaiting"]
                prompt = tool_result.get("prompt")
                # Append only a placeholder tool-result (assistant reply already recorded)
                messages.append({"role": "assistant", "content": f"Tool result for **{tool_name}**:\n(Awaiting user response token {token})"})
                # Save pending state for resume (deepcopy to freeze snapshot)
                ui_state.PENDING[token] = {
                    "messages": copy.deepcopy(messages),
                    "step": step,
                    "tool_name": tool_name,
                    "tool_args": tool_args,
                    "model": self.model,
                    "max_steps": self.max_steps,
                    "prompt": prompt,
                    "created_at": datetime.datetime.utcnow().isoformat() + 'Z',
                }
                logger.info("Saved pending token %s for tool %s prompt=%s", token, tool_name, (prompt or '')[:200])
                return {"awaiting": token, "prompt": prompt}

            # append tool result (assistant reply already recorded above)
            messages.append({"role": "assistant", "content": f"Tool result for **{tool_name}**:\n{tool_result}"})
            logger.info("Appended tool result for %s", tool_name)

        logger.warning("Reached maximum steps without a final answer")
        print("\n⚠️ Reached maximum steps without a final answer.")
        return None

    def run(self, user_instruction: str) -> None:
        messages = [
            {"role": "system", "content": self.system_msg},
            {"role": "user", "content": user_instruction},
        ]
        return self._run_loop(messages, start_step=1)

    def resume(self, token: str, user_answer: str):
        logger = logging.getLogger(__name__)
        pending = ui_state.PENDING.get(token)
        if not pending:
            logger.error("Resume called with unknown token %s", token)
            return {"error": "unknown token"}

        messages = pending["messages"]
        tool_name = pending["tool_name"]
        # append tool result as assistant message
        messages.append({"role": "assistant", "content": f"Tool result for **{tool_name}**:\n🗣️  User answered: {user_answer}"})
        logger.info("Resuming token %s; appended user answer", token)
        # remove pending
        del ui_state.PENDING[token]
        # continue loop from next step
        start_step = pending.get("step", 1) + 1
        return self._run_loop(messages, start_step=start_step)

# -----------------------------------------------------------------
# CLI entry point
# -----------------------------------------------------------------
if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(
        description="Demo AI agent that can call simple tools via OpenAI."
    )
    parser.add_argument(
        "instruction",
        nargs="*",
        help="User instruction (if omitted you will be prompted interactively).",
    )
    parser.add_argument(
        "--model",
        default=None,
        help="Explicit OpenAI model name (default = gpt-3.5-turbo).",
    )
    parser.add_argument(
        "--steps",
        type=int,
        default=100,
        help="Maximum tool‑call loops before giving up.",
    )
    args = parser.parse_args()

    if args.instruction:
        user_instr = " ".join(args.instruction)
    else:
        user_instr = input("🗨️  Enter your request: ").strip()

    agent = Agent(model=args.model, max_steps=args.steps)
    agent.run(user_instr)