♻️(summary) extract LLMService class into dedicated module

Move LLMService class from existing file into separate dedicated
module to improve code organization.
This commit is contained in:
lebaudantoine
2025-12-11 16:32:20 +01:00
committed by aleb_the_flash
parent 4256eb403d
commit c81ef38005
2 changed files with 59 additions and 46 deletions

View File

@@ -7,7 +7,7 @@ import os
import tempfile import tempfile
import time import time
from pathlib import Path from pathlib import Path
from typing import Any, Mapping, Optional from typing import Optional
import openai import openai
import sentry_sdk import sentry_sdk
@@ -21,6 +21,7 @@ from urllib3.util import Retry
from summary.core.analytics import MetadataManager, get_analytics from summary.core.analytics import MetadataManager, get_analytics
from summary.core.config import get_settings from summary.core.config import get_settings
from summary.core.llm_service import LLMException, LLMService
from summary.core.prompt import ( from summary.core.prompt import (
FORMAT_NEXT_STEPS, FORMAT_NEXT_STEPS,
FORMAT_PLAN, FORMAT_PLAN,
@@ -83,51 +84,6 @@ def create_retry_session():
return session return session
class LLMException(Exception):
"""LLM call failed."""
class LLMService:
"""Service for performing calls to the LLM configured in the settings."""
def __init__(self):
"""Init the LLMService once."""
self._client = openai.OpenAI(
base_url=settings.llm_base_url,
api_key=settings.llm_api_key.get_secret_value(),
)
def call(
self,
system_prompt: str,
user_prompt: str,
response_format: Optional[Mapping[str, Any]] = None,
):
"""Call the LLM service.
Takes a system prompt and a user prompt, and returns the LLM's response
Returns None if the call fails.
"""
try:
params: dict[str, Any] = {
"model": settings.llm_model,
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
}
if response_format is not None:
params["response_format"] = response_format
response = self._client.chat.completions.create(**params)
return response.choices[0].message.content
except Exception as e:
logger.exception("LLM call failed: %s", e)
raise LLMException("LLM call failed: {e}") from e
def format_actions(llm_output: dict) -> str: def format_actions(llm_output: dict) -> str:
"""Format the actions from the LLM output into a markdown list. """Format the actions from the LLM output into a markdown list.

View File

@@ -0,0 +1,57 @@
"""LLM service to encapsulate LLM's calls."""
import logging
from typing import Any, Mapping, Optional
import openai
from summary.core.config import get_settings
settings = get_settings()
logger = logging.getLogger(__name__)
class LLMException(Exception):
"""LLM call failed."""
class LLMService:
"""Service for performing calls to the LLM configured in the settings."""
def __init__(self):
"""Init the LLMService once."""
self._client = openai.OpenAI(
base_url=settings.llm_base_url,
api_key=settings.llm_api_key.get_secret_value(),
)
def call(
self,
system_prompt: str,
user_prompt: str,
response_format: Optional[Mapping[str, Any]] = None,
):
"""Call the LLM service.
Takes a system prompt and a user prompt, and returns the LLM's response
Returns None if the call fails.
"""
try:
params: dict[str, Any] = {
"model": settings.llm_model,
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
}
if response_format is not None:
params["response_format"] = response_format
response = self._client.chat.completions.create(**params)
return response.choices[0].message.content
except Exception as e:
logger.exception("LLM call failed: %s", e)
raise LLMException("LLM call failed: {e}") from e