Bases: BaseService['LlmServiceMixin']
Source code in dandy/core/service/service.py
| def __init__(self, obj: Any = None):
if self.has_obj_service_instance(obj):
return
self._obj_type_name: str = str(
list(self.__class__.__annotations__.values())[0]
).split('.')[-1]
if obj is None:
return
self._obj_mro_type_names = [cls.__name__ for cls in obj.__class__.__mro__]
if self._obj_type_name not in self._obj_mro_type_names:
message = (
f'{self.__class__.__name__} was instantiated with obj type "{obj.__class__.__name__}" '
f'and failed as it was expecting "{self._obj_type_name}".'
)
raise ServiceCriticalException(message)
self._obj_type: type[T_co] = obj.__class__
if self._obj_type is None or self._obj_type is ...:
message = f'{self.__class__.__name__} top class attribute must have an annotated type.'
raise ServiceCriticalException(message)
self.obj: T_co = obj
if ABC not in self.__class__.__bases__ and not self._obj_is_valid:
message = f'{self._obj_type_name} failed to validate on {self.__class__.__name__}'
raise ServiceCriticalException(message)
self.__post_init__()
if not hasattr(obj, self.generate_service_instance_name(self.__class__)):
message = (
f'To use "{self.__class__.__name__}" can only be attached to an object with a '
f'"{self.generate_service_instance_name(self.__class__)}" attribute.'
)
raise ServiceCriticalException(message)
self.set_obj_service_instance(obj, self)
|
has_retry_attempts_available
property
__post_init__
Source code in dandy/llm/service.py
| def __post_init__(self):
self._event_id = generate_new_recorder_event_id()
if isinstance(self.obj.llm_config, str):
self._llm_config = llm_configs[self.obj.llm_config]
else:
self._llm_config = self.obj.llm_config
self._llm_options = self.obj.llm_config_options
self._intel = None
self._intel_json_schema = None
self._request_body = self._llm_config.generate_request_body(
max_input_tokens=self._llm_options.max_input_tokens,
max_output_tokens=self._llm_options.max_output_tokens,
seed=self._llm_options.seed,
temperature=self._llm_options.temperature,
)
self._add_system_message()
self._response_str = None
self._retry_max_attempts = 0
self._retry_attempt = 0
|
add_message
Source code in dandy/llm/service.py
| def add_message(
self, role: RoleLiteralStr, content: str, images: list[str] | None = None
):
self._request_body.add_message(role, content, images)
|
add_messages
Source code in dandy/llm/service.py
| def add_messages(
self, messages: Sequence[tuple[RoleLiteralStr, str, list[str] | None]]
):
for role, content, images in messages:
self.add_message(role, content, images)
|
prompt_to_intel
Source code in dandy/llm/service.py
| def prompt_to_intel(
self,
prompt: PromptOrStrOrNone = None,
intel_class: type[IntelType] | None = None,
intel_object: IntelType | None = None,
images: list[str] | None = None,
image_files: list[str | Path] | None = None,
include_fields: IncEx | None = None,
exclude_fields: IncEx | None = None,
message_history: MessageHistory | None = None,
) -> IntelType:
if intel_class and intel_object:
message = 'Cannot specify both intel_class and intel_object.'
raise LlmCriticalException(message)
if intel_class is None and intel_object is None:
if self.obj_class.llm_intel_class:
intel_class = self.obj_class.llm_intel_class
else:
message = 'Must specify either intel_class, intel_object or llm_intel_class on the processor.'
raise LlmCriticalException(message)
if image_files:
images = [] if images is None else images
for image_file in image_files:
images.append(encode_file_to_base64(image_file))
self._intel = intel_class or intel_object
self._intel_json_schema = IntelFactory.intel_to_json_inc_ex_schema(
intel=self._intel, include=include_fields, exclude=exclude_fields
)
self._request_body.set_format_to_json_schema(self._intel_json_schema)
if message_history:
for message in message_history.messages:
self._request_body.add_message(
role=message.role, content=message.content, images=message.images
)
if prompt is not None:
self._request_body.add_message(
role='user',
content=service_user_prompt(
prompt if isinstance(prompt, Prompt) else Prompt(prompt)
).to_str(),
images=images,
)
if len(self._request_body.messages) <= 1:
message = f'"{self.__class__.__name__}.llm.process_to_intel" method requires you to have a prompt or more than the system message.'
raise LlmCriticalException(message)
return self._request_to_intel()
|
reset_service
Source code in dandy/llm/service.py
| def reset_service(self):
self.reset_messages()
self._add_system_message()
|
reset_messages
Source code in dandy/llm/service.py
| def reset_messages(self):
self._request_body.reset_messages()
|
retry_request_to_intel
Source code in dandy/llm/service.py
| def retry_request_to_intel(
self,
retry_event_description: str,
retry_user_prompt: PromptOrStr,
) -> IntelType:
if self.has_retry_attempts_available:
self._retry_attempt += 1
recorder_add_llm_retry_event(
retry_event_description,
self._event_id,
remaining_attempts=self._llm_config.options.prompt_retry_count
- self._retry_attempt,
)
self._request_body.add_message(
role='user', content=Prompt(retry_user_prompt).to_str()
)
return self._request_to_intel()
message = f'Failed to get the correct response from the LlmService after {self._llm_config.options.prompt_retry_count} attempts.'
raise LlmRecoverableException(message)
|