Added Application HolidayViewer

This commit is contained in:
Kodjo Sossouvi
2025-06-27 07:26:58 +02:00
parent 66ea45f501
commit 9f4b8ab4d0
87 changed files with 3756 additions and 212 deletions

0
src/ai/__init__.py Normal file
View File

51
src/ai/debug_lmm.py Normal file
View File

@@ -0,0 +1,51 @@
import dataclasses
from dataclasses import field
from typing import Any
@dataclasses.dataclass
class DebugTool:
name: str
parameters: dict[str, Any]
result: Any = None
error: str = None
@dataclasses.dataclass
class DebugResponse:
extended_prompt: str
response: str = None
error_response: str = None
tools_requested: list[str] = None
tools_called: list[DebugTool] = None
@dataclasses.dataclass
class DebugRequestMetadata:
id: str
user_id: str
start_time: int
end_time: int
lmm_model: str
llm_mod: str
use_tools: bool
available_tools: list = None
@dataclasses.dataclass
class DebugRequest:
prompt: str
responses: list[DebugResponse] = field(default_factory=list)
metadata: DebugRequestMetadata = None
@dataclasses.dataclass
class DebugConversation:
id: str
start_time: int
initial_prompt: str
requests: list[DebugRequest] = field(default_factory=list)
def to_dict(self):
return dataclasses.asdict(self)

410
src/ai/mcp_client.py Normal file
View File

@@ -0,0 +1,410 @@
import json
import logging
from typing import Dict, Any, List
import httpx
from ai.debug_lmm import DebugRequest, DebugResponse, DebugTool
from ai.mcp_server import DummyMCPServer
logger = logging.getLogger("MCPClient")
class InProcessMCPClientNativeTools:
"""MCP Client that uses the MCP server directly in the same process"""
ID = "Native Tools"
def __init__(self, session, settings_manager, ollama_host: str):
self.mcp_server = DummyMCPServer(session, settings_manager)
self.ollama_host = ollama_host
self.available_tools = self.mcp_server.list_tools()
self.system_prompt_1 = (
"Tu es un assistant intelligent qui simplifie l'utilisation de l'application 'My Managing Tools'. "
"Tu va recevoir des rêquetes sur l'état actuel de l'application, comme la liste des 'repositories' ou la liste des 'tables'."
"Tu devras répondre avec une réponse simple et concise, si possible en une seule phrase. Tu ne dois pas indiquer comment obtenir la réponse. "
"Tu devras juste donner la réponse.")
self.system_prompt_2 = ("You are a helpful AI assistant that provides accurate and concise answers"
" based on the provided context. Do not respond in a long form. Do not hallucinate."
"If you don't know the answer, just say that you don't know.")
self.conversation_history = [
{"role": "system", "content": self.system_prompt_1}
]
async def generate_with_mcp_context(self, prompt: str, use_tools: bool = True) -> str:
"""Generate a response using native function calls"""
logger.debug(f"Calling LLM with prompt: {prompt}")
self.conversation_history.append({"role": "user", "content": prompt})
if use_tools and self.available_tools:
logger.debug(f" Using tools: {self.available_tools.keys()}")
tools = self._format_tools_for_mistral()
response = await self._call_ollama_with_tools(prompt, tools)
logger.debug(f" LLM response: {response}")
# Process tool calls if present
if "message" in response and "tool_calls" in response['message']:
tool_calls = response['message']["tool_calls"]
logger.debug(f" LLM requested tools: {[tool["function"]["name"] for tool in tool_calls]}")
tool_results = []
for tool_call in tool_calls:
result = await self.mcp_server.call_tool(tool_call["function"]["name"],
tool_call["function"]["arguments"])
tool_results.append(result)
logger.debug(f" MCP server tools calls: {tool_results}")
# Generate final response with tool results
final_response = await self._call_ollama_with_tool_results(prompt,
tools,
tool_calls,
tool_results)
logger.debug(f" LLM Final result: {final_response}")
final_response_text = final_response.get("message", {}).get("content", "")
else:
# no tool requested, just return the response
final_response_text = response.get("message", {}).get("content", "")
else:
logger.debug(f" Not using tools {use_tools=}, available_tools={self.available_tools.keys()}")
response = await self._call_ollama_simple(prompt)
logger.debug(f" LLM response: {response}")
final_response_text = response.get("message", {}).get("content", "")
self.conversation_history.append({"role": "assistant", "content": final_response_text})
return final_response_text
def _format_tools_for_mistral(self) -> List[Dict[str, Any]]:
"""Format MCP tools for Mistral's function calling API"""
tools = []
for tool_name, tool_info in self.available_tools.items():
tool_schema = {
"type": "function",
"function": {
"name": tool_name,
"description": tool_info["description"],
"parameters": {
"type": "object",
"properties": {}, # added later
"required": []
}
}
}
# Add parameters
schema_parameters = tool_schema["function"]["parameters"]
for param_name, param_info in tool_info["parameters"].items():
schema_parameters["properties"][param_name] = {
"type": param_info.get("type", "string"),
"description": param_info.get("description", f"Parameter {param_name}")
}
schema_parameters["required"].append(param_name)
tools.append(tool_schema)
return tools
async def _call_ollama_with_tools(self, prompt: str, tools: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Call Ollama with function calls support"""
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.post(
f"{self.ollama_host}/api/chat",
json={
"model": "mistral",
"messages": [
{"role": "system", "content": self.system_prompt_1},
{"role": "user", "content": prompt}
],
"tools": tools,
"stream": False,
"options": {
"temperature": 0.0
}
}
)
if response.status_code == 200:
return response.json()
else:
raise Exception(f"Ollama error: {response.status_code}")
async def _call_ollama_with_tool_results(self,
original_prompt: str,
tools: List[Dict[str, Any]],
tool_calls: List[Dict[str, Any]],
tool_results: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Call Ollama with function call results"""
messages = [
{"role": "system", "content": self.system_prompt_2},
{"role": "user", "content": original_prompt},
{"role": "assistant", "tool_calls": tool_calls}
]
# Add tool results
for i, (tool_call, result) in enumerate(zip(tool_calls, tool_results)):
messages.append({
"role": "tool",
"tool_call_id": tool_call.get("id", f"call_{i}"),
"content": json.dumps(result)
})
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.post(
f"{self.ollama_host}/api/chat",
json={
"model": "mistral",
"messages": messages,
"tools": tools,
"stream": False,
"options": {
"temperature": 0.0
}
}
)
if response.status_code == 200:
return response.json()
else:
raise Exception(f"Ollama error: {response.status_code}")
async def _call_ollama_simple(self, prompt: str) -> Dict[str, Any]:
"""Call Ollama for simple generation without tools"""
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.post(
f"{self.ollama_host}/api/chat",
json={
"model": "mistral",
"messages": [
{"role": "system", "content": self.system_prompt_2},
{"role": "user", "content": prompt}
],
"stream": False
}
)
if response.status_code == 200:
return response.json()
else:
raise Exception(f"Ollama error: {response.status_code}")
class InProcessMCPClientCustomTools:
"""MCP Client that uses the MCP server directly in the same process"""
ID = "Custom Tools"
def __init__(self, session, settings_manager, ollama_host: str, model: str = "mistral"):
self.mcp_server = DummyMCPServer(session, settings_manager)
self.ollama_host = ollama_host
self.model = model
self.available_tools = self.mcp_server.list_tools()
self.max_tool_iterations = 5 # Prevent infinite loops
async def generate_with_mcp_context(self, debug: DebugRequest, prompt: str, use_tools: bool = True) -> str:
"""Generate response with the LLM potentially using MCP tools"""
# Enhance prompt with available tools if requested
if use_tools and self.available_tools:
logger.debug(f"Using tools: {list(self.available_tools.keys())}")
debug.available_tools = list(self.available_tools.keys())
tools_description = self._format_tools_for_prompt()
enhanced_prompt = f"""
{prompt}
You have access to the following tools:
{tools_description}
If you need to use tools, you can use multiple tools in sequence. For each tool, respond with JSON format only:
{{"use_tool": true, "tool_name": "tool_name", "arguments": {{"param": "value"}}}}
You can chain multiple tool calls by putting each JSON on a separate line. For example:
{{"use_tool": true, "tool_name": "first_tool", "arguments": {{}}}}
{{"use_tool": true, "tool_name": "second_tool", "arguments": {{"param": "value_from_first_tool"}}}}
When you're done with tools, provide your final answer normally (not in JSON format).
"""
else:
logger.debug(f"Not using tools {use_tools=}, available_tools={list(self.available_tools.keys())}")
enhanced_prompt = prompt
debug.enhanced_prompt = enhanced_prompt
# Execute tool chain
if use_tools:
return await self._execute_tool_chain(debug, enhanced_prompt, prompt)
else:
debug_response = DebugResponse(enhanced_prompt)
debug.responses.append(debug_response)
return await self._call_ollama(debug_response, enhanced_prompt)
async def _execute_tool_chain(self, debug: DebugRequest, enhanced_prompt: str, original_prompt: str) -> str:
"""Execute a chain of tool calls until completion"""
current_prompt = enhanced_prompt
tool_results = []
iteration = 0
while iteration < self.max_tool_iterations:
logger.debug(f"Tool chain iteration {iteration}")
debug_response = DebugResponse(current_prompt)
debug.responses.append(debug_response)
# Call LLM
llm_response = await self._call_ollama(debug_response, current_prompt)
# Parse all tool requests from the response
tool_requests = self._parse_multiple_tool_requests(llm_response)
logger.debug(f"Tools requested : {[t["tool_name"] for t in tool_requests]}")
debug_response.tools_requested = [t["tool_name"] for t in tool_requests]
if not tool_requests:
# No more tools requested, this is the final response
return llm_response
# Execute all requested tools
current_results = []
debug_response.tools_called = []
for tool_request in tool_requests:
if tool_request["tool_name"] not in self.available_tools:
logger.warning(f"Requested tool not available: {tool_request['tool_name']}")
error_msg = f"Sorry, the requested tool '{tool_request['tool_name']}' is not available."
debug_response.error_response = error_msg
return error_msg
tool_name = tool_request["tool_name"]
tool_args = tool_request.get("arguments", {})
debug_tool = DebugTool(tool_name, tool_args)
debug_response.tools_called.append(debug_tool)
try:
tool_result = await self.mcp_server.call_tool(tool_name, tool_args)
current_results.append({"tool_name": tool_name, "arguments": tool_args, "result": tool_result})
debug_tool.result = tool_result
logger.debug(f"Tool {tool_request['tool_name']} executed successfully with result={tool_result}")
except Exception as e:
error_msg = f"Tool execution failed: {e}"
logger.error(error_msg)
debug_tool.error = error_msg
return f"Sorry, there was an error executing the tool {tool_request['tool_name']}: {str(e)}"
tool_results.extend(current_results)
# Prepare prompt for next iteration
current_prompt = self._build_continuation_prompt(original_prompt, tool_results)
iteration += 1
# Max iterations reached, generate final response
logger.warning(f"Max tool iterations ({self.max_tool_iterations}) reached")
final_prompt = self._build_final_prompt(original_prompt, tool_results)
return await self._call_ollama(final_prompt)
def _parse_multiple_tool_requests(self, response: str) -> List[Dict[str, Any]]:
"""Parse LLM response to detect multiple tool requests"""
tool_requests = []
try:
# Look for JSON in each line
for line in response.strip().split('\n'):
line = line.strip()
if line.startswith('{') and line.endswith('}'):
try:
parsed = json.loads(line)
if parsed.get("use_tool") and "tool_name" in parsed:
tool_requests.append(parsed)
except json.JSONDecodeError:
continue
except Exception as e:
logger.debug(f"Error parsing tool requests: {e}")
return tool_requests
def _build_continuation_prompt(self, original_prompt: str, tool_results: List[Dict]) -> str:
"""Build prompt for continuing the tool chain"""
results_text = ""
for result in tool_results:
results_text += f"\nTool: {result['tool_name']} with arguments {result['arguments']}\nResult: {result['result']}\n"
tools_description = self._format_tools_for_prompt()
return f"""
Original question: {original_prompt}
Previous tool results:{results_text}
Give the final result. It must be concise, with no explanation. A simple sentence is preferred.
"""
def _build_final_prompt(self, original_prompt: str, tool_results: List[Dict]) -> str:
"""Build prompt for final response generation"""
results_text = ""
for result in tool_results:
results_text += f"\nTool: {result['tool_name']} - Result: {result['result']}"
return f"""
Original question: {original_prompt}
Tool results used:{results_text}
Generate a concise final response based on this information.
Make it as short as possible. Do not mention the tools used, only the final result. One or two sentences preferred.
"""
async def _call_ollama(self, debug_response: DebugResponse, prompt: str) -> str:
"""Call Ollama to generate a response"""
logger.debug(f"LLM Request {prompt=}")
try:
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.post(
f"{self.ollama_host}/api/generate",
json={
"model": self.model,
"prompt": prompt,
"stream": False,
"options": {"temperature": 0.0}
}
)
response.raise_for_status()
result = response.json()
logger.debug(f"LLM Response {result=}")
llm_response = result.get("response", "")
debug_response.response = llm_response # Store the LLM response in the debug object
return llm_response
except httpx.TimeoutException:
error_msg = "Ollama request timeout"
logger.error(error_msg)
debug_response.error_response = error_msg
raise Exception(error_msg)
except httpx.HTTPStatusError as e:
error_msg = f"Ollama HTTP error: {e.response.status_code}"
logger.error(error_msg)
debug_response.error_response = error_msg
raise Exception(error_msg)
except Exception as e:
error_msg = f"Unexpected Ollama error: {e}"
logger.error(error_msg)
debug_response.error_response = error_msg
raise
def _format_tools_for_prompt(self) -> str:
"""Format available tools for the prompt"""
tools_list = []
for tool_name, tool_info in self.available_tools.items():
params_desc = ", ".join([f"{k}: {v.get('description', k)}" for k, v in tool_info['parameters'].items()])
tools_list.append(f"- {tool_name}: {tool_info['description']} (params: {params_desc})")
return "\n".join(tools_list)
MPC_CLIENTS_IDS = [InProcessMCPClientNativeTools.ID, InProcessMCPClientCustomTools.ID]

190
src/ai/mcp_server.py Normal file
View File

@@ -0,0 +1,190 @@
# Configuration MCP
import inspect
import logging
from typing import Dict, Any, Callable
from components.repositories.components.Repositories import Repositories
from core.instance_manager import InstanceManager
logger = logging.getLogger("MCPServer")
class DummyMCPServer:
def __init__(self, session, settings_manager):
self._session = session
self._settings_manager = settings_manager # will be needed to get info from the db
self.available_tools = {}
self.register_tool("get_current_time", self.get_current_time, "get the current time")
self.register_tool("list_repositories", self.list_repositories, "return the list of available repositories")
self.register_tool("list_tables", self.list_tables, "return the list of tables, for a given repository")
def list_tools(self):
return self.available_tools
async def call_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
logger.debug(f"Calling tool {tool_name} with {arguments=}")
if tool_name not in self.available_tools:
return {
"success": False,
"error": f"Tool '{tool_name}' not found",
"tool_name": tool_name
}
try:
tool_info = self.available_tools[tool_name]
handler_func = tool_info["handler"]
arguments = self._validate_arguments(arguments, tool_info)
logger.debug(f" validated arguments {arguments=}")
# Call handler function directly
if inspect.iscoroutinefunction(handler_func):
result = await handler_func(**arguments)
else:
result = handler_func(**arguments)
logger.debug(f" {result=}")
return {
"success": True,
"result": result,
"tool_name": tool_name
}
except Exception as e:
logger.error(f" error : {e}")
return {
"success": False,
"error": str(e),
"tool_name": tool_name
}
def register_tool(self, name: str, handler: Callable, description: str = None):
"""Register a tool directly with the client"""
if description is None:
description = f"Tool {name}"
# Extract function signature
sig = inspect.signature(handler)
params = {}
# Extract docstring to get parameter descriptions
docstring = inspect.getdoc(handler) or ""
param_docs = {}
# Simple docstring parser for parameters
# Looking for patterns like ":param param_name: Description" or "Args: param_name: Description"
lines = docstring.split('\n')
in_params_section = False
for line in lines:
line = line.strip()
# Check for sphinx-style param docs
if line.startswith(':param '):
parts = line[7:].split(':', 1)
if len(parts) == 2:
param_name = parts[0].strip()
param_docs[param_name] = parts[1].strip()
# Check for Google-style docstrings
elif line.lower() == 'args:' or line.lower() == 'parameters:':
in_params_section = True
elif in_params_section:
if not line or line.startswith('Returns') or line.startswith('Raises'):
in_params_section = False
else:
parts = line.split(':', 1)
if len(parts) == 2:
param_name = parts[0].strip()
param_docs[param_name] = parts[1].strip()
for param_name, param in sig.parameters.items():
param_type = "string"
# Use docstring description if available, otherwise use a default
param_desc = param_docs.get(param_name, f"Parameter {param_name}")
# Try to infer type from annotation
if param.annotation != inspect.Parameter.empty:
if param.annotation == int:
param_type = "integer"
elif param.annotation == float:
param_type = "number"
elif param.annotation == bool:
param_type = "boolean"
elif param.annotation == str:
param_type = "string"
params[param_name] = {
"type": param_type,
"description": param_desc
}
self.available_tools[name] = {
"name": name,
"handler": handler,
"parameters": params,
"description": description
}
logger.debug(f"Registered tool {name}, {params=}, {description=}")
return self # For chaining
@staticmethod
def _validate_arguments(arguments: Dict[str, Any], tool_info: Dict[str, Any]) -> Dict[str, Any]:
"""Validate arguments against the tool signature"""
return {p_name: p_type for p_name, p_type in arguments.items() if p_name in tool_info["parameters"]}
# **************************************
# PREDEFINED FUNCTIONS
# **************************************
@staticmethod
async def get_current_time(format: str = "iso") -> str:
"""
Gets the current time in the specified format. This method provides two format
options: ISO 8601 format or a more human-readable format. By default, it returns
ISO 8601 format, and the function is designed to handle asynchronous execution.
:param format: The format in which to return the current time.
Acceptable values are:
- "iso" for ISO 8601 format (default)
- "human" for a human-readable format (e.g., 'YYYY-MM-DD HH:MM:SS')
:return: The current time formatted as a string based on the specified input.
:rtype: str
"""
import datetime
now = datetime.datetime.now()
if format == "iso":
return now.isoformat()
elif format == "human":
return now.strftime("%Y-%m-%d %H:%M:%S")
else:
return str(now)
async def list_repositories(self):
"""
Retrieves a list of repository names.
This method interacts with the repository manager to fetch all available
repositories and returns their names in a list.
:return: A list of repository names.
:rtype: list[str]
"""
repository_instance = InstanceManager.get(self._session, Repositories.create_component_id(self._session))
return [repo.name for repo in repository_instance.db.get_repositories()]
async def list_tables(self, repository_name: str):
"""
Retrieves a list of tables from the specified repository.
This function interacts with the database repository
configured for the provided repository name, fetching and returning
a list of tables contained within that repository.
:param repository_name: The name of the repository from which to retrieve
the list of tables.
:type repository_name: str
:return: A list of tables from the specified repository.
:rtype: list
"""
repository_instance = InstanceManager.get(self._session, Repositories.create_component_id(self._session))
repo = repository_instance.db.get_repository(repository_name)
return repo.tables

10
src/ai/mcp_tools.py Normal file
View File

@@ -0,0 +1,10 @@
import logging
logger = logging.getLogger("MCPServerTools")
class MCPServerTools:
@staticmethod
def list_tools():
return ["get_current_time", "list_repositories", "list_tables"]