Skip to content

LLM Connectivity Reference

Here we handle connections to various LLM services, proprietary and open source.

Handle connections to different LLM providers.

AnthropicConversation

Bases: Conversation

Conversation class for the Anthropic model.

Source code in biochatter/biochatter/llm_connect/anthropic.py
class AnthropicConversation(Conversation):
    """Conversation class for the Anthropic model."""

    def __init__(
        self,
        model_name: str,
        prompts: dict,
        correct: bool = False,
        split_correction: bool = False,
    ) -> None:
        """Connect to Anthropic's API and set up a conversation with the user.

        Also initialise a second conversational agent to provide corrections to
        the model output, if necessary.

        Args:
        ----
            model_name (str): The name of the model to use.

            prompts (dict): A dictionary of prompts to use for the conversation.

            split_correction (bool): Whether to correct the model output by
                splitting the output into sentences and correcting each
                sentence individually.

        """
        super().__init__(
            model_name=model_name,
            prompts=prompts,
            correct=correct,
            split_correction=split_correction,
        )

        self.ca_model_name = "claude-3-5-sonnet-20240620"
        # TODO make accessible by drop-down

    def set_api_key(self, api_key: str, user: str | None = None) -> bool:
        """Set the API key for the Anthropic API.

        If the key is valid, initialise the conversational agent. Optionally set
        the user for usage statistics.

        Args:
        ----
            api_key (str): The API key for the Anthropic API.

            user (str, optional): The user for usage statistics. If provided and
                equals "community", will track usage stats.

        Returns:
        -------
            bool: True if the API key is valid, False otherwise.

        """
        client = anthropic.Anthropic(
            api_key=api_key,
        )
        self.user = user

        try:
            client.count_tokens("Test connection")
            self.chat = ChatAnthropic(
                model_name=self.model_name,
                temperature=0,
                api_key=api_key,
            )
            self.ca_chat = ChatAnthropic(
                model_name=self.ca_model_name,
                temperature=0,
                api_key=api_key,
            )
            if user == "community":
                self.usage_stats = get_stats(user=user)

            return True

        except anthropic._exceptions.AuthenticationError:
            self._chat = None
            self._ca_chat = None
            return False

    def _primary_query(self) -> tuple:
        """Query the Anthropic API with the user's message.

        Return the response using the message history (flattery system messages,
        prior conversation) as context. Correct the response if necessary.

        Returns
        -------
            tuple: A tuple containing the response from the Anthropic API and
                the token usage.

        """
        try:
            history = self._create_history()
            response = self.chat.generate([history])
        except (
            anthropic._exceptions.APIError,
            anthropic._exceptions.AnthropicError,
            anthropic._exceptions.ConflictError,
            anthropic._exceptions.NotFoundError,
            anthropic._exceptions.APIStatusError,
            anthropic._exceptions.RateLimitError,
            anthropic._exceptions.APITimeoutError,
            anthropic._exceptions.BadRequestError,
            anthropic._exceptions.APIConnectionError,
            anthropic._exceptions.AuthenticationError,
            anthropic._exceptions.InternalServerError,
            anthropic._exceptions.PermissionDeniedError,
            anthropic._exceptions.UnprocessableEntityError,
            anthropic._exceptions.APIResponseValidationError,
        ) as e:
            return str(e), None

        msg = response.generations[0][0].text
        token_usage = response.llm_output.get("token_usage")

        self.append_ai_message(msg)

        return msg, token_usage

    def _create_history(self) -> list:
        """Create a history of messages for the Anthropic API.

        Returns
        -------
            list: A list of messages, with the last message being the most
                recent.

        """
        history = []
        # extract text components from message contents
        msg_texts = [m.content[0]["text"] if isinstance(m.content, list) else m.content for m in self.messages]

        # check if last message is an image message
        is_image_message = False
        if isinstance(self.messages[-1].content, list):
            is_image_message = self.messages[-1].content[1]["type"] == "image_url"

        # find location of last AI message (if any)
        last_ai_message = None
        for i, m in enumerate(self.messages):
            if isinstance(m, AIMessage):
                last_ai_message = i

        # Aggregate system messages into one message at the beginning
        system_messages = [m.content for m in self.messages if isinstance(m, SystemMessage)]
        if system_messages:
            history.append(
                SystemMessage(content="\n".join(system_messages)),
            )

        # concatenate all messages before the last AI message into one message
        if last_ai_message is not None:
            history.append(
                HumanMessage(
                    content="\n".join([m for m in msg_texts[:last_ai_message]]),
                ),
            )
            # then append the last AI message
            history.append(
                AIMessage(
                    content=msg_texts[last_ai_message],
                ),
            )

            # then concatenate all messages after that
            # into one HumanMessage
            history.append(
                HumanMessage(
                    content="\n".join(
                        [m for m in msg_texts[last_ai_message + 1 :]],
                    ),
                ),
            )

        # else add human message to history (without system messages)
        else:
            last_system_message = None
            for i, m in enumerate(self.messages):
                if isinstance(m, SystemMessage):
                    last_system_message = i
            history.append(
                HumanMessage(
                    content="\n".join(
                        [m for m in msg_texts[last_system_message + 1 :]],
                    ),
                ),
            )

        # if the last message is an image message, add the image to the history
        if is_image_message:
            history[-1].content = [
                {"type": "text", "text": history[-1].content},
                {
                    "type": "image_url",
                    "image_url": {
                        "url": self.messages[-1].content[1]["image_url"]["url"],
                    },
                },
            ]
        return history

    def _correct_response(self, msg: str) -> str:
        """Correct the response from the Anthropic API.

        Send the response to a secondary language model. Optionally split the
        response into single sentences and correct each sentence individually.
        Update usage stats.

        Args:
        ----
            msg (str): The response from the Anthropic API.

        Returns:
        -------
            str: The corrected response (or OK if no correction necessary).

        """
        ca_messages = self.ca_messages.copy()
        ca_messages.append(
            HumanMessage(
                content=msg,
            ),
        )
        ca_messages.append(
            SystemMessage(
                content="If there is nothing to correct, please respond with just 'OK', and nothing else!",
            ),
        )

        response = self.ca_chat.generate([ca_messages])

        correction = response.generations[0][0].text
        token_usage = response.llm_output.get("token_usage")

        return correction

__init__(model_name, prompts, correct=False, split_correction=False)

Connect to Anthropic's API and set up a conversation with the user.

Also initialise a second conversational agent to provide corrections to the model output, if necessary.


model_name (str): The name of the model to use.

prompts (dict): A dictionary of prompts to use for the conversation.

split_correction (bool): Whether to correct the model output by
    splitting the output into sentences and correcting each
    sentence individually.
Source code in biochatter/biochatter/llm_connect/anthropic.py
def __init__(
    self,
    model_name: str,
    prompts: dict,
    correct: bool = False,
    split_correction: bool = False,
) -> None:
    """Connect to Anthropic's API and set up a conversation with the user.

    Also initialise a second conversational agent to provide corrections to
    the model output, if necessary.

    Args:
    ----
        model_name (str): The name of the model to use.

        prompts (dict): A dictionary of prompts to use for the conversation.

        split_correction (bool): Whether to correct the model output by
            splitting the output into sentences and correcting each
            sentence individually.

    """
    super().__init__(
        model_name=model_name,
        prompts=prompts,
        correct=correct,
        split_correction=split_correction,
    )

    self.ca_model_name = "claude-3-5-sonnet-20240620"

set_api_key(api_key, user=None)

Set the API key for the Anthropic API.

If the key is valid, initialise the conversational agent. Optionally set the user for usage statistics.


api_key (str): The API key for the Anthropic API.

user (str, optional): The user for usage statistics. If provided and
    equals "community", will track usage stats.

bool: True if the API key is valid, False otherwise.
Source code in biochatter/biochatter/llm_connect/anthropic.py
def set_api_key(self, api_key: str, user: str | None = None) -> bool:
    """Set the API key for the Anthropic API.

    If the key is valid, initialise the conversational agent. Optionally set
    the user for usage statistics.

    Args:
    ----
        api_key (str): The API key for the Anthropic API.

        user (str, optional): The user for usage statistics. If provided and
            equals "community", will track usage stats.

    Returns:
    -------
        bool: True if the API key is valid, False otherwise.

    """
    client = anthropic.Anthropic(
        api_key=api_key,
    )
    self.user = user

    try:
        client.count_tokens("Test connection")
        self.chat = ChatAnthropic(
            model_name=self.model_name,
            temperature=0,
            api_key=api_key,
        )
        self.ca_chat = ChatAnthropic(
            model_name=self.ca_model_name,
            temperature=0,
            api_key=api_key,
        )
        if user == "community":
            self.usage_stats = get_stats(user=user)

        return True

    except anthropic._exceptions.AuthenticationError:
        self._chat = None
        self._ca_chat = None
        return False

AzureGptConversation

Bases: GptConversation

Conversation class for the Azure GPT model.

Source code in biochatter/biochatter/llm_connect/azure.py
class AzureGptConversation(GptConversation):
    """Conversation class for the Azure GPT model."""

    def __init__(
        self,
        deployment_name: str,
        model_name: str,
        prompts: dict,
        correct: bool = False,
        split_correction: bool = False,
        version: str | None = None,
        base_url: str | None = None,
        update_token_usage: Callable | None = None,
    ) -> None:
        """Connect to Azure's GPT API and set up a conversation with the user.

        Extends GptConversation.

        Args:
        ----
            deployment_name (str): The name of the Azure deployment to use.

            model_name (str): The name of the model to use. This is distinct
                from the deployment name.

            prompts (dict): A dictionary of prompts to use for the conversation.

            correct (bool): Whether to correct the model output.

            split_correction (bool): Whether to correct the model output by
                splitting the output into sentences and correcting each
                sentence individually.

            version (str): The version of the Azure API to use.

            base_url (str): The base URL of the Azure API to use.

            update_token_usage (Callable): A function to update the token usage
                statistics.

        """
        super().__init__(
            model_name=model_name,
            prompts=prompts,
            correct=correct,
            split_correction=split_correction,
            update_token_usage=update_token_usage,
        )

        self.version = version
        self.base_url = base_url
        self.deployment_name = deployment_name

    def set_api_key(self, api_key: str, user: str | None = None) -> bool:
        """Set the API key for the Azure API.

        If the key is valid, initialise the conversational agent. No user stats
        on Azure.

        Args:
        ----
            api_key (str): The API key for the Azure API.

            user (str, optional): The user for usage statistics.

        Returns:
        -------
            bool: True if the API key is valid, False otherwise.

        """
        try:
            self.chat = AzureChatOpenAI(
                deployment_name=self.deployment_name,
                model_name=self.model_name,
                openai_api_version=self.version,
                azure_endpoint=self.base_url,
                openai_api_key=api_key,
                temperature=0,
            )
            self.ca_chat = AzureChatOpenAI(
                deployment_name=self.deployment_name,
                model_name=self.model_name,
                openai_api_version=self.version,
                azure_endpoint=self.base_url,
                openai_api_key=api_key,
                temperature=0,
            )

            self.chat.generate([[HumanMessage(content="Hello")]])
            self.user = user if user is not None else "Azure Community"

            return True

        except openai._exceptions.AuthenticationError:
            self._chat = None
            self._ca_chat = None
            return False

    def _update_usage_stats(self, model: str, token_usage: dict) -> None:
        if self._update_token_usage is not None:
            self._update_token_usage(self.user, model, token_usage)

__init__(deployment_name, model_name, prompts, correct=False, split_correction=False, version=None, base_url=None, update_token_usage=None)

Connect to Azure's GPT API and set up a conversation with the user.

Extends GptConversation.


deployment_name (str): The name of the Azure deployment to use.

model_name (str): The name of the model to use. This is distinct
    from the deployment name.

prompts (dict): A dictionary of prompts to use for the conversation.

correct (bool): Whether to correct the model output.

split_correction (bool): Whether to correct the model output by
    splitting the output into sentences and correcting each
    sentence individually.

version (str): The version of the Azure API to use.

base_url (str): The base URL of the Azure API to use.

update_token_usage (Callable): A function to update the token usage
    statistics.
Source code in biochatter/biochatter/llm_connect/azure.py
def __init__(
    self,
    deployment_name: str,
    model_name: str,
    prompts: dict,
    correct: bool = False,
    split_correction: bool = False,
    version: str | None = None,
    base_url: str | None = None,
    update_token_usage: Callable | None = None,
) -> None:
    """Connect to Azure's GPT API and set up a conversation with the user.

    Extends GptConversation.

    Args:
    ----
        deployment_name (str): The name of the Azure deployment to use.

        model_name (str): The name of the model to use. This is distinct
            from the deployment name.

        prompts (dict): A dictionary of prompts to use for the conversation.

        correct (bool): Whether to correct the model output.

        split_correction (bool): Whether to correct the model output by
            splitting the output into sentences and correcting each
            sentence individually.

        version (str): The version of the Azure API to use.

        base_url (str): The base URL of the Azure API to use.

        update_token_usage (Callable): A function to update the token usage
            statistics.

    """
    super().__init__(
        model_name=model_name,
        prompts=prompts,
        correct=correct,
        split_correction=split_correction,
        update_token_usage=update_token_usage,
    )

    self.version = version
    self.base_url = base_url
    self.deployment_name = deployment_name

set_api_key(api_key, user=None)

Set the API key for the Azure API.

If the key is valid, initialise the conversational agent. No user stats on Azure.


api_key (str): The API key for the Azure API.

user (str, optional): The user for usage statistics.

bool: True if the API key is valid, False otherwise.
Source code in biochatter/biochatter/llm_connect/azure.py
def set_api_key(self, api_key: str, user: str | None = None) -> bool:
    """Set the API key for the Azure API.

    If the key is valid, initialise the conversational agent. No user stats
    on Azure.

    Args:
    ----
        api_key (str): The API key for the Azure API.

        user (str, optional): The user for usage statistics.

    Returns:
    -------
        bool: True if the API key is valid, False otherwise.

    """
    try:
        self.chat = AzureChatOpenAI(
            deployment_name=self.deployment_name,
            model_name=self.model_name,
            openai_api_version=self.version,
            azure_endpoint=self.base_url,
            openai_api_key=api_key,
            temperature=0,
        )
        self.ca_chat = AzureChatOpenAI(
            deployment_name=self.deployment_name,
            model_name=self.model_name,
            openai_api_version=self.version,
            azure_endpoint=self.base_url,
            openai_api_key=api_key,
            temperature=0,
        )

        self.chat.generate([[HumanMessage(content="Hello")]])
        self.user = user if user is not None else "Azure Community"

        return True

    except openai._exceptions.AuthenticationError:
        self._chat = None
        self._ca_chat = None
        return False

BloomConversation

Bases: Conversation

Conversation class for the Bloom model.

Source code in biochatter/biochatter/llm_connect/misc.py
class BloomConversation(Conversation):
    """Conversation class for the Bloom model."""

    def __init__(
        self,
        model_name: str,
        prompts: dict,
        split_correction: bool,
    ) -> None:
        """Initialise the BloomConversation class.

        DEPRECATED: Superceded by XinferenceConversation.
        """
        super().__init__(
            model_name=model_name,
            prompts=prompts,
            split_correction=split_correction,
        )

        self.messages = []

    def set_api_key(self, api_key: str, user: str | None = None) -> bool:
        """Set the API key for the HuggingFace API.

        If the key is valid, initialise the conversational agent.

        Args:
        ----
            api_key (str): The API key for the HuggingFace API.

            user (str): The user for usage statistics.

        Returns:
        -------
            bool: True if the API key is valid, False otherwise.

        """
        self.chat = HuggingFaceHub(
            repo_id=self.model_name,
            model_kwargs={"temperature": 1.0},  # "regular sampling"
            # as per https://huggingface.co/docs/api-inference/detailed_parameters
            huggingfacehub_api_token=api_key,
        )

        try:
            self.chat.generate(["Hello, I am a biomedical researcher."])
            return True
        except ValueError:
            return False

    def _cast_messages(self, messages: list) -> str:
        """Render the different roles of the chat-based conversation."""
        cast = ""
        for m in messages:
            if isinstance(m, SystemMessage):
                cast += f"System: {m.content}\n"
            elif isinstance(m, HumanMessage):
                cast += f"Human: {m.content}\n"
            elif isinstance(m, AIMessage):
                cast += f"AI: {m.content}\n"
            else:
                error_msg = f"Unknown message type: {type(m)}"
                raise TypeError(error_msg)

        return cast

    def _primary_query(self) -> tuple:
        response = self.chat.generate([self._cast_messages(self.messages)])

        msg = response.generations[0][0].text
        token_usage = {
            "prompt_tokens": 0,
            "completion_tokens": 0,
            "total_tokens": 0,
        }

        self.append_ai_message(msg)

        return msg, token_usage

    def _correct_response(self, msg: str) -> str:
        return "ok"

__init__(model_name, prompts, split_correction)

Initialise the BloomConversation class.

DEPRECATED: Superceded by XinferenceConversation.

Source code in biochatter/biochatter/llm_connect/misc.py
def __init__(
    self,
    model_name: str,
    prompts: dict,
    split_correction: bool,
) -> None:
    """Initialise the BloomConversation class.

    DEPRECATED: Superceded by XinferenceConversation.
    """
    super().__init__(
        model_name=model_name,
        prompts=prompts,
        split_correction=split_correction,
    )

    self.messages = []

set_api_key(api_key, user=None)

Set the API key for the HuggingFace API.

If the key is valid, initialise the conversational agent.


api_key (str): The API key for the HuggingFace API.

user (str): The user for usage statistics.

bool: True if the API key is valid, False otherwise.
Source code in biochatter/biochatter/llm_connect/misc.py
def set_api_key(self, api_key: str, user: str | None = None) -> bool:
    """Set the API key for the HuggingFace API.

    If the key is valid, initialise the conversational agent.

    Args:
    ----
        api_key (str): The API key for the HuggingFace API.

        user (str): The user for usage statistics.

    Returns:
    -------
        bool: True if the API key is valid, False otherwise.

    """
    self.chat = HuggingFaceHub(
        repo_id=self.model_name,
        model_kwargs={"temperature": 1.0},  # "regular sampling"
        # as per https://huggingface.co/docs/api-inference/detailed_parameters
        huggingfacehub_api_token=api_key,
    )

    try:
        self.chat.generate(["Hello, I am a biomedical researcher."])
        return True
    except ValueError:
        return False

Conversation

Bases: ABC

Use this class to set up a connection to an LLM API.

Can be used to set the user name and API key, append specific messages for system, user, and AI roles (if available), set up the general context as well as manual and tool-based data inputs, and finally to query the API with prompts made by the user.

The conversation class is expected to have a messages attribute to store the conversation, and a history attribute, which is a list of messages in a specific format for logging / printing.

Source code in biochatter/biochatter/llm_connect/conversation.py
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
class Conversation(ABC):
    """Use this class to set up a connection to an LLM API.

    Can be used to set the user name and API key, append specific messages for
    system, user, and AI roles (if available), set up the general context as
    well as manual and tool-based data inputs, and finally to query the API
    with prompts made by the user.

    The conversation class is expected to have a `messages` attribute to store
    the conversation, and a `history` attribute, which is a list of messages in
    a specific format for logging / printing.

    """

    def __init__(
        self,
        model_name: str,
        prompts: dict,
        correct: bool = False,
        split_correction: bool = False,
        use_ragagent_selector: bool = False,
        tools: list[Callable] = None,
        tool_call_mode: Literal["auto", "text"] = "auto",
        mcp: bool = False,
        additional_tools_instructions: str = None,
    ) -> None:
        super().__init__()
        self.model_name = model_name
        self.prompts = prompts
        self.correct = correct
        self.split_correction = split_correction
        self.rag_agents: list[RagAgent] = []
        self.history = []
        self.messages = []
        self.ca_messages = []
        self.tool_calls = deque()
        self.current_statements = []
        self._use_ragagent_selector = use_ragagent_selector
        self._chat = None
        self._ca_chat = None
        self.tools = tools
        self.tool_call_mode = tool_call_mode
        self.tools_prompt = None
        self.mcp = mcp
        self.additional_tools_instructions = additional_tools_instructions if additional_tools_instructions else ""

    @property
    def chat(self):
        """Access the chat attribute with error handling."""
        if self._chat is None:
            msg = "Chat attribute not initialized. Did you call set_api_key()?"
            logger.error(msg)
            raise AttributeError(msg)
        return self._chat

    @chat.setter
    def chat(self, value):
        """Set the chat attribute."""
        self._chat = value

    @property
    def ca_chat(self):
        """Access the correcting agent chat attribute with error handling."""
        if self._ca_chat is None:
            msg = "Correcting agent chat attribute not initialized. Did you call set_api_key()?"
            logger.error(msg)
            raise AttributeError(msg)
        return self._ca_chat

    @ca_chat.setter
    def ca_chat(self, value):
        """Set the correcting agent chat attribute."""
        self._ca_chat = value

    @property
    def use_ragagent_selector(self) -> bool:
        """Whether to use the ragagent selector."""
        return self._use_ragagent_selector

    @use_ragagent_selector.setter
    def use_ragagent_selector(self, val: bool) -> None:
        """Set the use_ragagent_selector attribute."""
        self._use_ragagent_selector = val

    def set_user_name(self, user_name: str) -> None:
        """Set the user name."""
        self.user_name = user_name

    def set_rag_agent(self, agent: RagAgent) -> None:
        """Update or insert rag_agent.

        If the rag_agent with the same mode already exists, it will be updated.
        Otherwise, the new rag_agent will be inserted.
        """
        i, _ = self.find_rag_agent(agent.mode)
        if i < 0:
            # insert
            self.rag_agents.append(agent)
        else:
            # update
            self.rag_agents[i] = agent

    def find_rag_agent(self, mode: str) -> tuple[int, RagAgent]:
        """Find the rag_agent with the given mode."""
        for i, val in enumerate(self.rag_agents):
            if val.mode == mode:
                return i, val
        return -1, None

    @abstractmethod
    def set_api_key(self, api_key: str, user: str | None = None) -> None:
        """Set the API key."""

    def get_prompts(self) -> dict:
        """Get the prompts."""
        return self.prompts

    def set_prompts(self, prompts: dict) -> None:
        """Set the prompts."""
        self.prompts = prompts

    def _tool_formatter(self, tools: list[Callable], mcp: bool = False) -> str:
        """Format the tools. Only for model not supporting tool calling."""
        tools_description = ""

        for idx, tool in enumerate(tools):
            tools_description += f"<tool_{idx}>\n"
            tools_description += f"Tool name: {tool.name}\n"
            tools_description += f"Tool description: {tool.description}\n"
            if mcp:
                tools_description += f"Tool call schema:\n {tool.tool_call_schema}\n"
            else:
                tools_description += f"Tool call schema:\n {tool.args}\n"
            tools_description += f"</tool_{idx}>\n"
        return tools_description

    def _create_tool_prompt(
        self, tools: list[Callable], additional_tools_instructions: str = None, mcp: bool = False
    ) -> str:
        """Create the tool prompt. Only for model not supporting tool calling."""
        prompt_template = ChatPromptTemplate.from_template(TOOL_USAGE_PROMPT)
        tools_description = self._tool_formatter(tools, mcp=mcp)
        new_message = prompt_template.invoke(
            {
                "user_question": self.messages[-1].content,
                "tools": tools_description,
                "additional_tools_instructions": additional_tools_instructions if additional_tools_instructions else "",
            }
        )
        return new_message.messages[0]

    def bind_tools(self, tools: list[Callable]) -> None:
        """Bind tools to the chat."""
        # Check if the model supports tool calling
        # (exploit the enum class in available_models.py)
        if self.model_name in TOOL_CALLING_MODELS and self.ca_chat:
            self.chat = self.chat.bind_tools(tools)
            self.ca_chat = self.ca_chat.bind_tools(tools)

        elif self.model_name in TOOL_CALLING_MODELS:
            self.chat = self.chat.bind_tools(tools)

        # elif self.model_name not in TOOL_CALLING_MODELS:
        #    self.tools_prompt = self._create_tool_prompt(tools, additional_instructions)

        # If not, fail gracefully
        # raise ValueError(f"Model {self.model_name} does not support tool calling.")

    def append_ai_message(self, message: str) -> None:
        """Add a message from the AI to the conversation.

        Args:
        ----
            message (str): The message from the AI.

        """
        self.messages.append(
            AIMessage(
                content=message,
            ),
        )

    def append_system_message(self, message: str) -> None:
        """Add a system message to the conversation.

        Args:
        ----
            message (str): The system message.

        """
        self.messages.append(
            SystemMessage(
                content=message,
            ),
        )

    def append_ca_message(self, message: str) -> None:
        """Add a message to the correcting agent conversation.

        Args:
        ----
            message (str): The message to the correcting agent.

        """
        self.ca_messages.append(
            SystemMessage(
                content=message,
            ),
        )

    def append_user_message(self, message: str) -> None:
        """Add a message from the user to the conversation.

        Args:
        ----
            message (str): The message from the user.

        """
        self.messages.append(
            HumanMessage(
                content=message,
            ),
        )

    def append_image_message(
        self,
        message: str,
        image_url: str,
        local: bool = False,
    ) -> None:
        """Add a user message with an image to the conversation.

        Also checks, in addition to the `local` flag, if the image URL is a
        local file path. If it is local, the image will be encoded as a base64
        string to be passed to the LLM.

        Args:
        ----
            message (str): The message from the user.
            image_url (str): The URL of the image.
            local (bool): Whether the image is local or not. If local, it will
                be encoded as a base64 string to be passed to the LLM.

        """
        parsed_url = urllib.parse.urlparse(image_url)
        if local or not parsed_url.netloc:
            image_url = f"data:image/jpeg;base64,{encode_image(image_url)}"
        else:
            image_url = f"data:image/jpeg;base64,{encode_image_from_url(image_url)}"

        self.messages.append(
            HumanMessage(
                content=[
                    {"type": "text", "text": message},
                    {"type": "image_url", "image_url": {"url": image_url}},
                ],
            ),
        )

    def setup(self, context: str) -> None:
        """Set up the conversation with general prompts and a context."""
        for msg in self.prompts["primary_model_prompts"]:
            if msg:
                self.append_system_message(msg)

        for msg in self.prompts["correcting_agent_prompts"]:
            if msg:
                self.append_ca_message(msg)

        self.context = context
        msg = f"The topic of the research is {context}."
        self.append_system_message(msg)

    def setup_data_input_manual(self, data_input: str) -> None:
        """Set up the data input manually."""
        self.data_input = data_input
        msg = f"The user has given information on the data input: {data_input}."
        self.append_system_message(msg)

    def setup_data_input_tool(self, df, input_file_name: str) -> None:
        """Set up the data input tool."""
        self.data_input_tool = df

        for tool_name in self.prompts["tool_prompts"]:
            if tool_name in input_file_name:
                msg = self.prompts["tool_prompts"][tool_name].format(df=df)
                self.append_system_message(msg)

    def query(
        self,
        text: str,
        image_url: str | None = None,
        structured_model: BaseModel | None = None,
        wrap_structured_output: bool | None = None,
        tools: list[Callable] | None = None,
        explain_tool_result: bool | None = None,
        additional_tools_instructions: str | None = None,
        general_instructions_tool_interpretation: str | None = None,
        additional_instructions_tool_interpretation: str | None = None,
        mcp: bool | None = None,
        return_tool_calls_as_ai_message: bool | None = None,
        **kwargs,
    ) -> tuple[str, dict | None, str | None]:
        """Query the LLM API using the user's query.

        Appends the most recent query to the conversation, optionally injects
        context from the RAG agent, and runs the primary query method of the
        child class.

        Args:
        ----
            text (str): The user query.

            image_url (str): The URL of an image to include in the conversation.
                Optional and only supported for models with vision capabilities.

            structured_model (BaseModel): The structured output model to use for the query.

            wrap_structured_output (bool): Whether to wrap the structured output in JSON quotes.

            tools (list[Callable]): The tools to use for the query.

            explain_tool_result (bool): Whether to explain the tool result.

            additional_tools_instructions (str): The additional instructions for the query.
                Mainly used for tools that do not support tool calling.

            general_instructions_tool_interpretation (str): The general
                instructions for the tool interpretation.
                Overrides the default prompt in `GENERAL_TOOL_RESULT_INTERPRETATION_PROMPT`.

            additional_instructions_tool_interpretation (str): The additional
                instructions for the tool interpretation.
                Overrides the default prompt in `ADDITIONAL_TOOL_RESULT_INTERPRETATION_PROMPT`.

            mcp (bool): If you want to use MCP mode, this should be set to True.

            return_tool_calls_as_ai_message (bool): If you want to return the tool calls as an AI message, this should be set to True.

            **kwargs: Additional keyword arguments.

        Returns:
        -------
            tuple: A tuple containing the response from the API, the token usage
                information, and the correction if necessary/desired.

        """
        if mcp:
            self.mcp = True

        # save the last human prompt that may be used for answer enhancement
        self.last_human_prompt = text

        # if additional_tools_instructions are provided, save them
        if additional_tools_instructions:
            self.additional_tools_instructions = additional_tools_instructions

        # override the default prompts if other provided
        self.general_instructions_tool_interpretation = (
            general_instructions_tool_interpretation
            if general_instructions_tool_interpretation
            else GENERAL_TOOL_RESULT_INTERPRETATION_PROMPT
        )
        self.additional_instructions_tool_interpretation = (
            additional_instructions_tool_interpretation
            if additional_instructions_tool_interpretation
            else ADDITIONAL_TOOL_RESULT_INTERPRETATION_PROMPT
        )
        if not image_url:
            self.append_user_message(text)
        else:
            self.append_image_message(text, image_url)

        self._inject_context(text)

        # tools passed at this step are used only for this message
        msg, token_usage = self._primary_query(
            tools=tools,
            explain_tool_result=explain_tool_result,
            return_tool_calls_as_ai_message=return_tool_calls_as_ai_message,
            structured_model=structured_model,
            wrap_structured_output=wrap_structured_output,
        )

        if not token_usage:
            # indicates error
            return (msg, token_usage, None)

        if not self.correct:
            return (msg, token_usage, None)

        cor_msg = "Correcting (using single sentences) ..." if self.split_correction else "Correcting ..."

        if st:
            with st.spinner(cor_msg):
                corrections = self._correct_query(text)
        else:
            corrections = self._correct_query(text)

        if not corrections:
            return (msg, token_usage, None)

        correction = "\n".join(corrections)
        return (msg, token_usage, correction)

    def _correct_query(self, msg: str) -> list[str]:
        corrections = []
        if self.split_correction:
            nltk.download("punkt")
            tokenizer = nltk.data.load("tokenizers/punkt/english.pickle")
            sentences = tokenizer.tokenize(msg)
            for sentence in sentences:
                correction = self._correct_response(sentence)

                if str(correction).lower() not in ["ok", "ok."]:
                    corrections.append(correction)
        else:
            correction = self._correct_response(msg)

            if str(correction).lower() not in ["ok", "ok."]:
                corrections.append(correction)

        return corrections

    @abstractmethod
    def _primary_query(self, text: str) -> tuple[str, dict | None]:
        """Run the primary query."""

    @abstractmethod
    def _correct_response(self, msg: str) -> str:
        """Correct the response."""

    def _process_manual_tool_call(
        self,
        tool_call: list[dict],
        available_tools: list[Callable],
        explain_tool_result: bool = False,
    ) -> str:
        """Process manual tool calls from the model response.

        This method handles the processing of tool calls for models that don't natively
        support tool calling. It takes the parsed JSON response and executes the
        appropriate tool.

        Args:
        ----
            tool_call (list[dict]): The parsed tool call information from the model response.
            available_tools (list[Callable]): The tools available for execution.
            explain_tool_result (bool): Whether to explain the tool result.

        Returns:
        -------
            str: The processed message containing the tool name, arguments, and result.

        """
        tool_name = tool_call["tool_name"]
        tool_func = next((t for t in available_tools if t.name == tool_name), None)

        # Remove the tool name from the tool call in order to invoke the tool
        # This is beacause tool_name is not a valid argument for the tool
        del tool_call["tool_name"]

        # Execute the tool based on whether we're in async context or not
        if self.mcp:
            loop = asyncio.get_running_loop()
            tool_result = loop.run_until_complete(tool_func.ainvoke(tool_call))
        else:
            tool_result = tool_func.invoke(tool_call)

        msg = f"Tool: {tool_name}\nArguments: {tool_call}\nTool result: {tool_result}"

        if explain_tool_result:
            tool_result_interpretation = self.chat.invoke(
                TOOL_RESULT_INTERPRETATION_PROMPT.format(
                    original_question=self.last_human_prompt,
                    tool_result=tool_result,
                    general_instructions=self.general_instructions_tool_interpretation,
                    additional_instructions=self.additional_instructions_tool_interpretation,
                )
            )
            msg += f"\nTool result interpretation: {tool_result_interpretation.content}"

        self.append_ai_message(msg)

        return msg

    def _process_tool_calls(
        self,
        tool_calls: list[dict],
        available_tools: list[Callable],
        response_content: str,
        explain_tool_result: bool = False,
        return_tool_calls_as_ai_message: bool = False,
    ) -> str:
        """Process tool calls from the model response.

        This method handles the processing of tool calls returned by the model.
        It can either automatically execute the tools and return their results,
        or format the tool calls as text.

        Args:
        ----
            tool_calls: The tool calls from the model response.
            response_content: The text content of the response (used as fallback).
            available_tools: The tools available in the chat.
            explain_tool_result (bool): Whether to explain the tool result.
            return_tool_calls_as_ai_message (bool): If you want to return the tool calls as an AI message, this should be set to True.

        Returns:
        -------
            str: The processed message, either tool results or formatted tool calls.

        """
        if not tool_calls:
            return response_content

        msg = ""

        if self.tool_call_mode == "auto":
            for idx, tool_call in enumerate(tool_calls):
                # Extract tool name and arguments
                tool_name = tool_call["name"]
                tool_args = tool_call["args"]
                tool_call_id = tool_call["id"]

                # Find the matching tool function
                tool_func = next((t for t in available_tools if t.name == tool_name), None)

                if tool_func:
                    # Execute the tool
                    try:
                        if self.mcp:
                            loop = asyncio.get_running_loop()
                            tool_result = loop.run_until_complete(tool_func.ainvoke(tool_args))
                        else:
                            tool_result = tool_func.invoke(tool_args)
                        # Add the tool result to the conversation
                        if return_tool_calls_as_ai_message:
                            self.append_ai_message(f"Tool call ({tool_name}) \nResult: {tool_result!s}")
                            self.tool_calls.append({"name": tool_name, "args": tool_args, "id": tool_call_id})
                        else:
                            self.messages.append(
                                ToolMessage(content=str(tool_result), name=tool_name, tool_call_id=tool_call_id)
                            )

                        if idx > 0:
                            msg += "\n"
                        msg += f"Tool call ({tool_name}) result: {tool_result!s}"

                        if explain_tool_result:
                            tool_result_interpretation = self.chat.invoke(
                                TOOL_RESULT_INTERPRETATION_PROMPT.format(
                                    original_question=self.last_human_prompt,
                                    tool_result=tool_result,
                                    general_instructions=self.general_instructions_tool_interpretation,
                                    additional_instructions=self.additional_instructions_tool_interpretation,
                                )
                            )
                            self.append_ai_message(tool_result_interpretation.content)
                            msg += f"\nTool result interpretation: {tool_result_interpretation.content}"

                    except Exception as e:
                        # Handle tool execution errors
                        error_message = f"Error executing tool {tool_name}: {e!s}"
                        self.messages.append(
                            ToolMessage(content=error_message, name=tool_name, tool_call_id=tool_call_id)
                        )
                        msg = error_message
            return msg

        if self.tool_call_mode == "text":
            # Join all tool calls in a text format
            tool_calls_text = []
            for tool_call in tool_calls:
                tool_name = tool_call["name"]
                tool_args = tool_call["args"]
                tool_call_id = tool_call["id"]
                tool_calls_text.append(f"Tool: {tool_name} - Arguments: {tool_args} - Tool call id: {tool_call_id}")

            # Join with line breaks and set as the message
            msg = "\n".join(tool_calls_text)

            # Append the formatted tool calls as an AI message
            self.append_ai_message(msg)
            return msg

        # Invalid tool call mode, log warning and return original content
        logger.warning(f"Invalid tool call mode: {self.tool_call_mode}. Using original response content.")
        return response_content

    def _inject_context_by_ragagent_selector(self, text: str) -> list[str]:
        """Inject the context generated by RagAgentSelector.

        The RagAgentSelector will choose the appropriate rag agent to generate
        context according to user's question.

        Args:
        ----
            text (str): The user query to be used for choosing rag agent

        """
        rag_agents: list[RagAgent] = [agent for agent in self.rag_agents if agent.use_prompt]
        decider_agent = RagAgentSelector(
            rag_agents=rag_agents,
            conversation_factory=lambda: self,
        )
        result = decider_agent.execute(text)
        if result.tool_result is not None and len(result.tool_result) > 0:
            return result.tool_result
        # find rag agent selected
        rag_agent = next(
            [agent for agent in rag_agents if agent.mode == result.answer],
            None,
        )
        if rag_agent is None:
            return None
        return rag_agent.generate_responses(text)

    def _inject_context(self, text: str) -> None:
        """Inject the context received from the RAG agent into the prompt.

        The RAG agent will find the most similar n text fragments and add them
        to the message history object for usage in the next prompt. Uses the
        document summarisation prompt set to inject the context. The ultimate
        prompt should include the placeholder for the statements, `{statements}`
        (used for formatting the string).

        Args:
        ----
            text (str): The user query to be used for similarity search.

        """
        sim_msg = "Performing similarity search to inject fragments ..."

        if st:
            with st.spinner(sim_msg):
                statements = []
                if self.use_ragagent_selector:
                    statements = self._inject_context_by_ragagent_selector(text)
                else:
                    for agent in self.rag_agents:
                        try:
                            docs = agent.generate_responses(text)
                            statements = statements + [doc[0] for doc in docs]
                        except ValueError as e:
                            logger.warning(e)

        else:
            statements = []
            if self.use_ragagent_selector:
                statements = self._inject_context_by_ragagent_selector(text)
            else:
                for agent in self.rag_agents:
                    try:
                        docs = agent.generate_responses(text)
                        statements = statements + [doc[0] for doc in docs]
                    except ValueError as e:
                        logger.warning(e)

        if statements and len(statements) > 0:
            prompts = self.prompts["rag_agent_prompts"]
            self.current_statements = statements
            for i, prompt in enumerate(prompts):
                # if last prompt, format the statements into the prompt
                if i == len(prompts) - 1:
                    self.append_system_message(
                        prompt.format(statements=statements),
                    )
                else:
                    self.append_system_message(prompt)

    def get_last_injected_context(self) -> list[dict]:
        """Get a formatted list of the last context.

        Get the last context injected into the conversation. Contains one
        dictionary for each RAG mode.

        Returns
        -------
            List[dict]: A list of dictionaries containing the mode and context
            for each RAG agent.

        """
        return [{"mode": agent.mode, "context": agent.last_response} for agent in self.rag_agents]

    def get_msg_json(self) -> str:
        """Return a JSON representation of the conversation.

        Returns a list of dicts of the messages in the conversation in JSON
        format. The keys of the dicts are the roles, the values are the
        messages.

        Returns
        -------
            str: A JSON representation of the messages in the conversation.

        """
        d = []
        for msg in self.messages:
            if isinstance(msg, SystemMessage):
                role = "system"
            elif isinstance(msg, HumanMessage):
                role = "user"
            elif isinstance(msg, AIMessage):
                role = "ai"
            else:
                error_msg = f"Unknown message type: {type(msg)}"
                raise TypeError(error_msg)

            d.append({role: msg.content})

        return json.dumps(d)

    def reset(self) -> None:
        """Reset the conversation to the initial state."""
        self.history = []
        self.messages = []
        self.ca_messages = []
        self.current_statements = []

ca_chat property writable

Access the correcting agent chat attribute with error handling.

chat property writable

Access the chat attribute with error handling.

use_ragagent_selector property writable

Whether to use the ragagent selector.

append_ai_message(message)

Add a message from the AI to the conversation.


message (str): The message from the AI.
Source code in biochatter/biochatter/llm_connect/conversation.py
def append_ai_message(self, message: str) -> None:
    """Add a message from the AI to the conversation.

    Args:
    ----
        message (str): The message from the AI.

    """
    self.messages.append(
        AIMessage(
            content=message,
        ),
    )

append_ca_message(message)

Add a message to the correcting agent conversation.


message (str): The message to the correcting agent.
Source code in biochatter/biochatter/llm_connect/conversation.py
def append_ca_message(self, message: str) -> None:
    """Add a message to the correcting agent conversation.

    Args:
    ----
        message (str): The message to the correcting agent.

    """
    self.ca_messages.append(
        SystemMessage(
            content=message,
        ),
    )

append_image_message(message, image_url, local=False)

Add a user message with an image to the conversation.

Also checks, in addition to the local flag, if the image URL is a local file path. If it is local, the image will be encoded as a base64 string to be passed to the LLM.


message (str): The message from the user.
image_url (str): The URL of the image.
local (bool): Whether the image is local or not. If local, it will
    be encoded as a base64 string to be passed to the LLM.
Source code in biochatter/biochatter/llm_connect/conversation.py
def append_image_message(
    self,
    message: str,
    image_url: str,
    local: bool = False,
) -> None:
    """Add a user message with an image to the conversation.

    Also checks, in addition to the `local` flag, if the image URL is a
    local file path. If it is local, the image will be encoded as a base64
    string to be passed to the LLM.

    Args:
    ----
        message (str): The message from the user.
        image_url (str): The URL of the image.
        local (bool): Whether the image is local or not. If local, it will
            be encoded as a base64 string to be passed to the LLM.

    """
    parsed_url = urllib.parse.urlparse(image_url)
    if local or not parsed_url.netloc:
        image_url = f"data:image/jpeg;base64,{encode_image(image_url)}"
    else:
        image_url = f"data:image/jpeg;base64,{encode_image_from_url(image_url)}"

    self.messages.append(
        HumanMessage(
            content=[
                {"type": "text", "text": message},
                {"type": "image_url", "image_url": {"url": image_url}},
            ],
        ),
    )

append_system_message(message)

Add a system message to the conversation.


message (str): The system message.
Source code in biochatter/biochatter/llm_connect/conversation.py
def append_system_message(self, message: str) -> None:
    """Add a system message to the conversation.

    Args:
    ----
        message (str): The system message.

    """
    self.messages.append(
        SystemMessage(
            content=message,
        ),
    )

append_user_message(message)

Add a message from the user to the conversation.


message (str): The message from the user.
Source code in biochatter/biochatter/llm_connect/conversation.py
def append_user_message(self, message: str) -> None:
    """Add a message from the user to the conversation.

    Args:
    ----
        message (str): The message from the user.

    """
    self.messages.append(
        HumanMessage(
            content=message,
        ),
    )

bind_tools(tools)

Bind tools to the chat.

Source code in biochatter/biochatter/llm_connect/conversation.py
def bind_tools(self, tools: list[Callable]) -> None:
    """Bind tools to the chat."""
    # Check if the model supports tool calling
    # (exploit the enum class in available_models.py)
    if self.model_name in TOOL_CALLING_MODELS and self.ca_chat:
        self.chat = self.chat.bind_tools(tools)
        self.ca_chat = self.ca_chat.bind_tools(tools)

    elif self.model_name in TOOL_CALLING_MODELS:
        self.chat = self.chat.bind_tools(tools)

find_rag_agent(mode)

Find the rag_agent with the given mode.

Source code in biochatter/biochatter/llm_connect/conversation.py
def find_rag_agent(self, mode: str) -> tuple[int, RagAgent]:
    """Find the rag_agent with the given mode."""
    for i, val in enumerate(self.rag_agents):
        if val.mode == mode:
            return i, val
    return -1, None

get_last_injected_context()

Get a formatted list of the last context.

Get the last context injected into the conversation. Contains one dictionary for each RAG mode.

Returns
List[dict]: A list of dictionaries containing the mode and context
for each RAG agent.
Source code in biochatter/biochatter/llm_connect/conversation.py
def get_last_injected_context(self) -> list[dict]:
    """Get a formatted list of the last context.

    Get the last context injected into the conversation. Contains one
    dictionary for each RAG mode.

    Returns
    -------
        List[dict]: A list of dictionaries containing the mode and context
        for each RAG agent.

    """
    return [{"mode": agent.mode, "context": agent.last_response} for agent in self.rag_agents]

get_msg_json()

Return a JSON representation of the conversation.

Returns a list of dicts of the messages in the conversation in JSON format. The keys of the dicts are the roles, the values are the messages.

Returns
str: A JSON representation of the messages in the conversation.
Source code in biochatter/biochatter/llm_connect/conversation.py
def get_msg_json(self) -> str:
    """Return a JSON representation of the conversation.

    Returns a list of dicts of the messages in the conversation in JSON
    format. The keys of the dicts are the roles, the values are the
    messages.

    Returns
    -------
        str: A JSON representation of the messages in the conversation.

    """
    d = []
    for msg in self.messages:
        if isinstance(msg, SystemMessage):
            role = "system"
        elif isinstance(msg, HumanMessage):
            role = "user"
        elif isinstance(msg, AIMessage):
            role = "ai"
        else:
            error_msg = f"Unknown message type: {type(msg)}"
            raise TypeError(error_msg)

        d.append({role: msg.content})

    return json.dumps(d)

get_prompts()

Get the prompts.

Source code in biochatter/biochatter/llm_connect/conversation.py
def get_prompts(self) -> dict:
    """Get the prompts."""
    return self.prompts

query(text, image_url=None, structured_model=None, wrap_structured_output=None, tools=None, explain_tool_result=None, additional_tools_instructions=None, general_instructions_tool_interpretation=None, additional_instructions_tool_interpretation=None, mcp=None, return_tool_calls_as_ai_message=None, **kwargs)

Query the LLM API using the user's query.

Appends the most recent query to the conversation, optionally injects context from the RAG agent, and runs the primary query method of the child class.


text (str): The user query.

image_url (str): The URL of an image to include in the conversation.
    Optional and only supported for models with vision capabilities.

structured_model (BaseModel): The structured output model to use for the query.

wrap_structured_output (bool): Whether to wrap the structured output in JSON quotes.

tools (list[Callable]): The tools to use for the query.

explain_tool_result (bool): Whether to explain the tool result.

additional_tools_instructions (str): The additional instructions for the query.
    Mainly used for tools that do not support tool calling.

general_instructions_tool_interpretation (str): The general
    instructions for the tool interpretation.
    Overrides the default prompt in `GENERAL_TOOL_RESULT_INTERPRETATION_PROMPT`.

additional_instructions_tool_interpretation (str): The additional
    instructions for the tool interpretation.
    Overrides the default prompt in `ADDITIONAL_TOOL_RESULT_INTERPRETATION_PROMPT`.

mcp (bool): If you want to use MCP mode, this should be set to True.

return_tool_calls_as_ai_message (bool): If you want to return the tool calls as an AI message, this should be set to True.

**kwargs: Additional keyword arguments.

tuple: A tuple containing the response from the API, the token usage
    information, and the correction if necessary/desired.
Source code in biochatter/biochatter/llm_connect/conversation.py
def query(
    self,
    text: str,
    image_url: str | None = None,
    structured_model: BaseModel | None = None,
    wrap_structured_output: bool | None = None,
    tools: list[Callable] | None = None,
    explain_tool_result: bool | None = None,
    additional_tools_instructions: str | None = None,
    general_instructions_tool_interpretation: str | None = None,
    additional_instructions_tool_interpretation: str | None = None,
    mcp: bool | None = None,
    return_tool_calls_as_ai_message: bool | None = None,
    **kwargs,
) -> tuple[str, dict | None, str | None]:
    """Query the LLM API using the user's query.

    Appends the most recent query to the conversation, optionally injects
    context from the RAG agent, and runs the primary query method of the
    child class.

    Args:
    ----
        text (str): The user query.

        image_url (str): The URL of an image to include in the conversation.
            Optional and only supported for models with vision capabilities.

        structured_model (BaseModel): The structured output model to use for the query.

        wrap_structured_output (bool): Whether to wrap the structured output in JSON quotes.

        tools (list[Callable]): The tools to use for the query.

        explain_tool_result (bool): Whether to explain the tool result.

        additional_tools_instructions (str): The additional instructions for the query.
            Mainly used for tools that do not support tool calling.

        general_instructions_tool_interpretation (str): The general
            instructions for the tool interpretation.
            Overrides the default prompt in `GENERAL_TOOL_RESULT_INTERPRETATION_PROMPT`.

        additional_instructions_tool_interpretation (str): The additional
            instructions for the tool interpretation.
            Overrides the default prompt in `ADDITIONAL_TOOL_RESULT_INTERPRETATION_PROMPT`.

        mcp (bool): If you want to use MCP mode, this should be set to True.

        return_tool_calls_as_ai_message (bool): If you want to return the tool calls as an AI message, this should be set to True.

        **kwargs: Additional keyword arguments.

    Returns:
    -------
        tuple: A tuple containing the response from the API, the token usage
            information, and the correction if necessary/desired.

    """
    if mcp:
        self.mcp = True

    # save the last human prompt that may be used for answer enhancement
    self.last_human_prompt = text

    # if additional_tools_instructions are provided, save them
    if additional_tools_instructions:
        self.additional_tools_instructions = additional_tools_instructions

    # override the default prompts if other provided
    self.general_instructions_tool_interpretation = (
        general_instructions_tool_interpretation
        if general_instructions_tool_interpretation
        else GENERAL_TOOL_RESULT_INTERPRETATION_PROMPT
    )
    self.additional_instructions_tool_interpretation = (
        additional_instructions_tool_interpretation
        if additional_instructions_tool_interpretation
        else ADDITIONAL_TOOL_RESULT_INTERPRETATION_PROMPT
    )
    if not image_url:
        self.append_user_message(text)
    else:
        self.append_image_message(text, image_url)

    self._inject_context(text)

    # tools passed at this step are used only for this message
    msg, token_usage = self._primary_query(
        tools=tools,
        explain_tool_result=explain_tool_result,
        return_tool_calls_as_ai_message=return_tool_calls_as_ai_message,
        structured_model=structured_model,
        wrap_structured_output=wrap_structured_output,
    )

    if not token_usage:
        # indicates error
        return (msg, token_usage, None)

    if not self.correct:
        return (msg, token_usage, None)

    cor_msg = "Correcting (using single sentences) ..." if self.split_correction else "Correcting ..."

    if st:
        with st.spinner(cor_msg):
            corrections = self._correct_query(text)
    else:
        corrections = self._correct_query(text)

    if not corrections:
        return (msg, token_usage, None)

    correction = "\n".join(corrections)
    return (msg, token_usage, correction)

reset()

Reset the conversation to the initial state.

Source code in biochatter/biochatter/llm_connect/conversation.py
def reset(self) -> None:
    """Reset the conversation to the initial state."""
    self.history = []
    self.messages = []
    self.ca_messages = []
    self.current_statements = []

set_api_key(api_key, user=None) abstractmethod

Set the API key.

Source code in biochatter/biochatter/llm_connect/conversation.py
@abstractmethod
def set_api_key(self, api_key: str, user: str | None = None) -> None:
    """Set the API key."""

set_prompts(prompts)

Set the prompts.

Source code in biochatter/biochatter/llm_connect/conversation.py
def set_prompts(self, prompts: dict) -> None:
    """Set the prompts."""
    self.prompts = prompts

set_rag_agent(agent)

Update or insert rag_agent.

If the rag_agent with the same mode already exists, it will be updated. Otherwise, the new rag_agent will be inserted.

Source code in biochatter/biochatter/llm_connect/conversation.py
def set_rag_agent(self, agent: RagAgent) -> None:
    """Update or insert rag_agent.

    If the rag_agent with the same mode already exists, it will be updated.
    Otherwise, the new rag_agent will be inserted.
    """
    i, _ = self.find_rag_agent(agent.mode)
    if i < 0:
        # insert
        self.rag_agents.append(agent)
    else:
        # update
        self.rag_agents[i] = agent

set_user_name(user_name)

Set the user name.

Source code in biochatter/biochatter/llm_connect/conversation.py
def set_user_name(self, user_name: str) -> None:
    """Set the user name."""
    self.user_name = user_name

setup(context)

Set up the conversation with general prompts and a context.

Source code in biochatter/biochatter/llm_connect/conversation.py
def setup(self, context: str) -> None:
    """Set up the conversation with general prompts and a context."""
    for msg in self.prompts["primary_model_prompts"]:
        if msg:
            self.append_system_message(msg)

    for msg in self.prompts["correcting_agent_prompts"]:
        if msg:
            self.append_ca_message(msg)

    self.context = context
    msg = f"The topic of the research is {context}."
    self.append_system_message(msg)

setup_data_input_manual(data_input)

Set up the data input manually.

Source code in biochatter/biochatter/llm_connect/conversation.py
def setup_data_input_manual(self, data_input: str) -> None:
    """Set up the data input manually."""
    self.data_input = data_input
    msg = f"The user has given information on the data input: {data_input}."
    self.append_system_message(msg)

setup_data_input_tool(df, input_file_name)

Set up the data input tool.

Source code in biochatter/biochatter/llm_connect/conversation.py
def setup_data_input_tool(self, df, input_file_name: str) -> None:
    """Set up the data input tool."""
    self.data_input_tool = df

    for tool_name in self.prompts["tool_prompts"]:
        if tool_name in input_file_name:
            msg = self.prompts["tool_prompts"][tool_name].format(df=df)
            self.append_system_message(msg)

GeminiConversation

Bases: Conversation

Conversation class for the Google Gemini model.

Source code in biochatter/biochatter/llm_connect/gemini.py
class GeminiConversation(Conversation):
    """Conversation class for the Google Gemini model."""

    def __init__(
        self,
        model_name: str,
        prompts: dict,
        correct: bool = False,
        split_correction: bool = False,
        tools: list[Callable] = None,
        tool_call_mode: Literal["auto", "text"] = "auto",
    ) -> None:
        """Initialise the GeminiConversation class.

        Connect to Google's Gemini API and set up a conversation with the user.
        Also initialise a second conversational agent to provide corrections to
        the model output, if necessary.

        Args:
        ----
            model_name (str): The name of the model to use.

            prompts (dict): A dictionary of prompts to use for the conversation.

            correct (bool): Whether to correct the model output.

            split_correction (bool): Whether to correct the model output by
                splitting the output into sentences and correcting each
                sentence individually.

            tools (list[Callable]): List of tool functions to use with the model.

            tool_call_mode (str): The mode to use for tool calls.
                "auto": Automatically call tools.
                "text": Only return text output of the tool call.

        """
        super().__init__(
            model_name=model_name,
            prompts=prompts,
            correct=correct,
            split_correction=split_correction,
            tools=tools,
            tool_call_mode=tool_call_mode,
        )

        self.ca_model_name = "gemini-2.0-flash"

    def set_api_key(self, api_key: str, user: str | None = None) -> bool:
        """Set the API key for the Google Gemini API.

        If the key is valid, initialise the conversational agent. Optionally set
        the user for usage statistics.

        Args:
        ----
            api_key (str): The API key for the Google Gemini API.

            user (str, optional): The user for usage statistics. If provided and
                equals "community", will track usage stats.

        Returns:
        -------
            bool: True if the API key is valid, False otherwise.

        """
        self.user = user

        try:
            self.chat = ChatGoogleGenerativeAI(
                model=self.model_name,
                temperature=0,
                google_api_key=api_key,
            )
            self.ca_chat = ChatGoogleGenerativeAI(
                model=self.ca_model_name,
                temperature=0,
                google_api_key=api_key,
            )

            # if binding happens here, tools will be available for all messages
            if self.tools:
                self.bind_tools(self.tools)

            return True

        except Exception:  # Google Genai doesn't expose specific exception types
            self._chat = None
            self._ca_chat = None
            return False

    def _primary_query(self, tools: list[Callable] | None = None, **kwargs) -> tuple:
        """Query the Google Gemini API with the user's message.

        Return the response using the message history (flattery system messages,
        prior conversation) as context. Correct the response if necessary.

        Args:
        ----
            tools (list[Callable]): The tools to use for the query. Tools
            passed at this step are used only for this message and not stored
            as part of the conversation object.

            **kwargs: Additional keyword arguments.

        Returns:
        -------
            tuple: A tuple containing the response from the Gemini API and
                the token usage.

        """
        # bind tools to the chat if provided in the query
        chat = self.chat.bind_tools(tools) if (tools and self.model_name in TOOL_CALLING_MODELS) else self.chat

        try:
            response = chat.invoke(self.messages)
        except Exception as e:
            return str(e), None

        # Process tool calls if present
        if response.tool_calls:
            msg = self._process_tool_calls(response.tool_calls, tools, response.content)
        else:
            msg = response.content
            self.append_ai_message(msg)

        token_usage = response.usage_metadata["total_tokens"]

        return msg, token_usage

    def _correct_response(self, msg: str) -> str:
        """Correct the response from the Gemini API.

        Send the response to a secondary language model. Optionally split the
        response into single sentences and correct each sentence individually.
        Update usage stats.

        Args:
        ----
            msg (str): The response from the Gemini API.

        Returns:
        -------
            str: The corrected response (or OK if no correction necessary).

        """
        ca_messages = self.ca_messages.copy()

        ca_messages.append(
            SystemMessage(
                content="If there is nothing to correct, please respond with just 'OK', and nothing else!",
            ),
        )

        ca_messages.append(
            HumanMessage(
                content=msg,
            ),
        )

        response = self.ca_chat.invoke(ca_messages)

        correction = response.content
        token_usage = response.usage_metadata["total_tokens"]

        return correction

__init__(model_name, prompts, correct=False, split_correction=False, tools=None, tool_call_mode='auto')

Initialise the GeminiConversation class.

Connect to Google's Gemini API and set up a conversation with the user. Also initialise a second conversational agent to provide corrections to the model output, if necessary.


model_name (str): The name of the model to use.

prompts (dict): A dictionary of prompts to use for the conversation.

correct (bool): Whether to correct the model output.

split_correction (bool): Whether to correct the model output by
    splitting the output into sentences and correcting each
    sentence individually.

tools (list[Callable]): List of tool functions to use with the model.

tool_call_mode (str): The mode to use for tool calls.
    "auto": Automatically call tools.
    "text": Only return text output of the tool call.
Source code in biochatter/biochatter/llm_connect/gemini.py
def __init__(
    self,
    model_name: str,
    prompts: dict,
    correct: bool = False,
    split_correction: bool = False,
    tools: list[Callable] = None,
    tool_call_mode: Literal["auto", "text"] = "auto",
) -> None:
    """Initialise the GeminiConversation class.

    Connect to Google's Gemini API and set up a conversation with the user.
    Also initialise a second conversational agent to provide corrections to
    the model output, if necessary.

    Args:
    ----
        model_name (str): The name of the model to use.

        prompts (dict): A dictionary of prompts to use for the conversation.

        correct (bool): Whether to correct the model output.

        split_correction (bool): Whether to correct the model output by
            splitting the output into sentences and correcting each
            sentence individually.

        tools (list[Callable]): List of tool functions to use with the model.

        tool_call_mode (str): The mode to use for tool calls.
            "auto": Automatically call tools.
            "text": Only return text output of the tool call.

    """
    super().__init__(
        model_name=model_name,
        prompts=prompts,
        correct=correct,
        split_correction=split_correction,
        tools=tools,
        tool_call_mode=tool_call_mode,
    )

    self.ca_model_name = "gemini-2.0-flash"

set_api_key(api_key, user=None)

Set the API key for the Google Gemini API.

If the key is valid, initialise the conversational agent. Optionally set the user for usage statistics.


api_key (str): The API key for the Google Gemini API.

user (str, optional): The user for usage statistics. If provided and
    equals "community", will track usage stats.

bool: True if the API key is valid, False otherwise.
Source code in biochatter/biochatter/llm_connect/gemini.py
def set_api_key(self, api_key: str, user: str | None = None) -> bool:
    """Set the API key for the Google Gemini API.

    If the key is valid, initialise the conversational agent. Optionally set
    the user for usage statistics.

    Args:
    ----
        api_key (str): The API key for the Google Gemini API.

        user (str, optional): The user for usage statistics. If provided and
            equals "community", will track usage stats.

    Returns:
    -------
        bool: True if the API key is valid, False otherwise.

    """
    self.user = user

    try:
        self.chat = ChatGoogleGenerativeAI(
            model=self.model_name,
            temperature=0,
            google_api_key=api_key,
        )
        self.ca_chat = ChatGoogleGenerativeAI(
            model=self.ca_model_name,
            temperature=0,
            google_api_key=api_key,
        )

        # if binding happens here, tools will be available for all messages
        if self.tools:
            self.bind_tools(self.tools)

        return True

    except Exception:  # Google Genai doesn't expose specific exception types
        self._chat = None
        self._ca_chat = None
        return False

GptConversation

Bases: Conversation

Conversation class for the OpenAI GPT model.

Source code in biochatter/biochatter/llm_connect/openai.py
class GptConversation(Conversation):
    """Conversation class for the OpenAI GPT model."""

    def __init__(
        self,
        model_name: str,
        prompts: dict,
        correct: bool = False,
        split_correction: bool = False,
        base_url: str = None,
        update_token_usage: Callable | None = None,
    ) -> None:
        """Connect to OpenAI's GPT API and set up a conversation with the user.

        Also initialise a second conversational agent to provide corrections to
        the model output, if necessary.

        Args:
        ----
            model_name (str): The name of the model to use.

            prompts (dict): A dictionary of prompts to use for the conversation.

            split_correction (bool): Whether to correct the model output by
                splitting the output into sentences and correcting each
                sentence individually.

            base_url (str): Optional OpenAI base_url value to use custom
                endpoint URL instead of default

        """
        super().__init__(
            model_name=model_name,
            prompts=prompts,
            correct=correct,
            split_correction=split_correction,
        )
        self.base_url = base_url
        self.ca_model_name = "gpt-3.5-turbo"
        # TODO make accessible by drop-down

        self._update_token_usage = update_token_usage

    def set_api_key(self, api_key: str, user: str | None = None) -> bool:
        """Set the API key for the OpenAI API.

        If the key is valid, initialise the conversational agent. Optionally set
        the user for usage statistics.

        Args:
        ----
            api_key (str): The API key for the OpenAI API.

            user (str, optional): The user for usage statistics. If provided and
                equals "community", will track usage stats.

        Returns:
        -------
            bool: True if the API key is valid, False otherwise.

        """
        client = openai.OpenAI(
            api_key=api_key,
            base_url=self.base_url,
        )
        self.user = user

        try:
            client.models.list()
            self.chat = ChatOpenAI(
                model_name=self.model_name,
                temperature=0,
                openai_api_key=api_key,
                base_url=self.base_url,
            )
            self.ca_chat = ChatOpenAI(
                model_name=self.ca_model_name,
                temperature=0,
                openai_api_key=api_key,
                base_url=self.base_url,
            )
            if user == "community":
                self.usage_stats = get_stats(user=user)

            return True

        except openai._exceptions.AuthenticationError:
            self._chat = None
            self._ca_chat = None
            return False

    def _primary_query(self) -> tuple:
        """Query the OpenAI API with the user's message.

        Return the response using the message history (flattery system messages,
        prior conversation) as context. Correct the response if necessary.

        Returns
        -------
            tuple: A tuple containing the response from the OpenAI API and the
                token usage.

        """
        try:
            response = self.chat.generate([self.messages])
        except (
            openai._exceptions.APIError,
            openai._exceptions.OpenAIError,
            openai._exceptions.ConflictError,
            openai._exceptions.NotFoundError,
            openai._exceptions.APIStatusError,
            openai._exceptions.RateLimitError,
            openai._exceptions.APITimeoutError,
            openai._exceptions.BadRequestError,
            openai._exceptions.APIConnectionError,
            openai._exceptions.AuthenticationError,
            openai._exceptions.InternalServerError,
            openai._exceptions.PermissionDeniedError,
            openai._exceptions.UnprocessableEntityError,
            openai._exceptions.APIResponseValidationError,
        ) as e:
            return str(e), None

        msg = response.generations[0][0].text
        token_usage = response.llm_output.get("token_usage")

        self._update_usage_stats(self.model_name, token_usage)

        self.append_ai_message(msg)

        return msg, token_usage

    def _correct_response(self, msg: str) -> str:
        """Correct the response from the OpenAI API.

        Send the response to a secondary language model. Optionally split the
        response into single sentences and correct each sentence individually.
        Update usage stats.

        Args:
        ----
            msg (str): The response from the OpenAI API.

        Returns:
        -------
            str: The corrected response (or OK if no correction necessary).

        """
        ca_messages = self.ca_messages.copy()
        ca_messages.append(
            HumanMessage(
                content=msg,
            ),
        )
        ca_messages.append(
            SystemMessage(
                content="If there is nothing to correct, please respond with just 'OK', and nothing else!",
            ),
        )

        response = self.ca_chat.generate([ca_messages])

        correction = response.generations[0][0].text
        token_usage = response.llm_output.get("token_usage")

        self._update_usage_stats(self.ca_model_name, token_usage)

        return correction

    def _update_usage_stats(self, model: str, token_usage: dict) -> None:
        """Update redis database with token usage statistics.

        Use the usage_stats object with the increment method.

        Args:
        ----
            model (str): The model name.

            token_usage (dict): The token usage statistics.

        """
        if self.user == "community":
            # Only process integer values
            stats_dict = {f"{k}:{model}": v for k, v in token_usage.items() if isinstance(v, int | float)}
            self.usage_stats.increment(
                "usage:[date]:[user]",
                stats_dict,
            )

        if self._update_token_usage is not None:
            self._update_token_usage(self.user, model, token_usage)

__init__(model_name, prompts, correct=False, split_correction=False, base_url=None, update_token_usage=None)

Connect to OpenAI's GPT API and set up a conversation with the user.

Also initialise a second conversational agent to provide corrections to the model output, if necessary.


model_name (str): The name of the model to use.

prompts (dict): A dictionary of prompts to use for the conversation.

split_correction (bool): Whether to correct the model output by
    splitting the output into sentences and correcting each
    sentence individually.

base_url (str): Optional OpenAI base_url value to use custom
    endpoint URL instead of default
Source code in biochatter/biochatter/llm_connect/openai.py
def __init__(
    self,
    model_name: str,
    prompts: dict,
    correct: bool = False,
    split_correction: bool = False,
    base_url: str = None,
    update_token_usage: Callable | None = None,
) -> None:
    """Connect to OpenAI's GPT API and set up a conversation with the user.

    Also initialise a second conversational agent to provide corrections to
    the model output, if necessary.

    Args:
    ----
        model_name (str): The name of the model to use.

        prompts (dict): A dictionary of prompts to use for the conversation.

        split_correction (bool): Whether to correct the model output by
            splitting the output into sentences and correcting each
            sentence individually.

        base_url (str): Optional OpenAI base_url value to use custom
            endpoint URL instead of default

    """
    super().__init__(
        model_name=model_name,
        prompts=prompts,
        correct=correct,
        split_correction=split_correction,
    )
    self.base_url = base_url
    self.ca_model_name = "gpt-3.5-turbo"
    # TODO make accessible by drop-down

    self._update_token_usage = update_token_usage

set_api_key(api_key, user=None)

Set the API key for the OpenAI API.

If the key is valid, initialise the conversational agent. Optionally set the user for usage statistics.


api_key (str): The API key for the OpenAI API.

user (str, optional): The user for usage statistics. If provided and
    equals "community", will track usage stats.

bool: True if the API key is valid, False otherwise.
Source code in biochatter/biochatter/llm_connect/openai.py
def set_api_key(self, api_key: str, user: str | None = None) -> bool:
    """Set the API key for the OpenAI API.

    If the key is valid, initialise the conversational agent. Optionally set
    the user for usage statistics.

    Args:
    ----
        api_key (str): The API key for the OpenAI API.

        user (str, optional): The user for usage statistics. If provided and
            equals "community", will track usage stats.

    Returns:
    -------
        bool: True if the API key is valid, False otherwise.

    """
    client = openai.OpenAI(
        api_key=api_key,
        base_url=self.base_url,
    )
    self.user = user

    try:
        client.models.list()
        self.chat = ChatOpenAI(
            model_name=self.model_name,
            temperature=0,
            openai_api_key=api_key,
            base_url=self.base_url,
        )
        self.ca_chat = ChatOpenAI(
            model_name=self.ca_model_name,
            temperature=0,
            openai_api_key=api_key,
            base_url=self.base_url,
        )
        if user == "community":
            self.usage_stats = get_stats(user=user)

        return True

    except openai._exceptions.AuthenticationError:
        self._chat = None
        self._ca_chat = None
        return False

LangChainConversation

Bases: Conversation

Conversation class for a generic LangChain model.

Source code in biochatter/biochatter/llm_connect/langchain.py
class LangChainConversation(Conversation):
    """Conversation class for a generic LangChain model."""

    def __init__(
        self,
        model_name: str,
        model_provider: str,
        prompts: dict,
        correct: bool = False,
        split_correction: bool = False,
        tools: list[Callable] = None,
        tool_call_mode: Literal["auto", "text"] = "auto",
        async_mode: bool = False,
        mcp: bool = False,
    ) -> None:
        """Initialise the LangChainConversation class.

        Connect to a generic LangChain model and set up a conversation with the
        user. Also initialise a second conversational agent to provide
        corrections to the model output, if necessary.

        Args:
        ----
            model_name (str): The name of the model to use.
            model_provider (str): The provider of the model to use.
            prompts (dict): A dictionary of prompts to use for the conversation.
            correct (bool): Whether to correct the model output.
            split_correction (bool): Whether to correct the model output by
                splitting the output into sentences and correcting each
                sentence individually.
            tools (list[Callable]): List of tool functions to use with the
                model.
            tool_call_mode (str): The mode to use for tool calls.
                "auto": Automatically call tools.
                "text": Only return text output of the tool call.
            async_mode (bool): Whether to run in async mode. Defaults to False.
            mcp (bool): If you want to use MCP mode, this should be set to True.

        """
        super().__init__(
            model_name=model_name,
            prompts=prompts,
            correct=correct,
            split_correction=split_correction,
            tools=tools,
            tool_call_mode=tool_call_mode,
            mcp=mcp,
        )

        self.model_name = model_name
        self.model_provider = model_provider
        self.async_mode = async_mode

    # TODO: the name of this method is overloaded, since the api key is loaded
    # from the environment variables and not as an argument
    def set_api_key(self, api_key: str | None = None, user: str | None = None) -> bool:
        """Set the API key for the model provider.

        If the key is valid, initialise the conversational agent. Optionally set
        the user for usage statistics.

        Args:
        ----
            api_key (str): The API key for the model provider.

            user (str, optional): The user for usage statistics. If provided and
                equals "community", will track usage stats.

        Returns:
        -------
            bool: True if the API key is valid, False otherwise.

        """
        self.user = user

        try:
            self.chat = init_chat_model(
                model=self.model_name,
                model_provider=self.model_provider,
                temperature=0,
            )
            self.ca_chat = init_chat_model(
                model=self.model_name,
                model_provider=self.model_provider,
                temperature=0,
            )

            # if binding happens here, tools will be available for all messages
            if self.tools:
                self.bind_tools(self.tools)

            return True

        except Exception:  # Google Genai doesn't expose specific exception types
            self._chat = None
            self._ca_chat = None
            return False

    def _primary_query(
        self,
        tools: list[Callable] | None = None,
        explain_tool_result: bool = False,
        return_tool_calls_as_ai_message: bool = False,
        structured_model: BaseModel | None = None,
        wrap_structured_output: bool = False,
    ) -> tuple:
        """Run the primary query.

        Args:
        ----
            tools (list[Callable], optional): Additional tools to use for this specific query.
            explain_tool_result (bool, optional): Whether to explain the tool result.
            return_tool_calls_as_ai_message (bool, optional): Whether to return tool calls as an AI message.
            structured_model (BaseModel, optional): The structured output model to use.
            wrap_structured_output (bool, optional): Whether to wrap the structured output in JSON quotes.

        Returns:
        -------
            tuple: A tuple containing the response message and token usage information.

        """
        token_usage = None  # Initialize token_usage
        msg = None  # Initialize msg

        starting_tools = self.tools if self.tools else []
        in_chat_tools = tools if tools else []
        available_tools = starting_tools + in_chat_tools

        if structured_model and len(available_tools) > 0:
            raise ValueError("Structured output and tools cannot be used together at the moment.")

        if self.model_name in STRUCTURED_OUTPUT_MODELS and structured_model:
            chat = self.chat.with_structured_output(structured_model)
        elif (
            structured_model and self.model_name not in STRUCTURED_OUTPUT_MODELS
        ):  
            # add to the end of the prompt an instruction to return a structured output
            chat = self.chat
            self.messages[-1].content = (
                self.messages[-1].content
                + "\n\nPlease return a structured output following this schema: "
                + str(structured_model.model_json_schema())
                + (
                    " Just return the JSON object wrapped in ```json tags and nothing else."
                    if wrap_structured_output
                    else " Just return the JSON object and nothing else."
                )
            )

        if self.model_name in TOOL_CALLING_MODELS and not structured_model:
            chat = self.chat.bind_tools(available_tools)
        elif self.model_name not in TOOL_CALLING_MODELS and len(available_tools) > 0:
            self.tools_prompt = self._create_tool_prompt(
                tools=available_tools,
                additional_instructions=self.additional_tools_instructions,
            )
            if not self.messages:
                msg = "No messages available in the conversation"
                raise ValueError(msg)
            self.messages[-1] = self.tools_prompt
            chat = self.chat
        elif len(available_tools) == 0 and not structured_model:
            chat = self.chat

        try:
            response = chat.invoke(self.messages)
        except Exception as e:
            return str(e), None

        # Structured output don't have tool calls attribute
        if hasattr(response, "tool_calls"):
            token_usage = None
            # case in which the model called tools
            if len(response.tool_calls) > 0:
                msg = self._process_tool_calls(
                    tool_calls=response.tool_calls,
                    available_tools=available_tools,
                    response_content=response.content,
                    explain_tool_result=explain_tool_result,
                    return_tool_calls_as_ai_message=return_tool_calls_as_ai_message,
                )
            # case where the model does not support tool calling natively, called a tool and we need manual processing
            elif self.model_name not in TOOL_CALLING_MODELS and self.tools_prompt:
                cleaned_content = (
                    response.content.replace('"""', "").replace("json", "").replace("`", "").replace("\n", "").strip()
                )
                try:
                    tool_call_data = json.loads(cleaned_content)
                    msg = self._process_manual_tool_call(
                        tool_call=tool_call_data,
                        available_tools=available_tools,
                        explain_tool_result=explain_tool_result,
                    )
                    # token_usage remains None (from line 176) as per successful tool call path logic
                except json.JSONDecodeError:
                    # If JSON parsing fails, the model didn't return a valid tool call.
                    # Treat as a regular message from the LLM.
                    msg = response.content  # Use original content
                    # Update token_usage, similar to 'no tool calls' or 'manual structured output' paths
                    token_usage = response.usage_metadata.get("total_tokens") if response.usage_metadata else None
            # case where the model does not support structured output but the user has provided a structured model
            elif self.model_name not in STRUCTURED_OUTPUT_MODELS and structured_model:
                # check that the output conforms to the structured model
                pydantic_manual_validator(response.content, structured_model)
                msg = response.content
                token_usage = response.usage_metadata["total_tokens"]

            # no tool calls
            else:
                msg = response.content
                token_usage = response.usage_metadata["total_tokens"]

        # even if there are no tool calls, the standard langchain output has a tool_calls attribute
        # therefore, this case only happens when the returned ouput from the invoke is a structured output
        else:
            msg = response.model_dump_json()
            if wrap_structured_output:
                msg = "```json\n" + msg + "\n```"
            token_usage = None

        self.append_ai_message(msg)

        return msg, token_usage

    def _correct_response(self, msg: str) -> str:
        """Correct the response from the Gemini API.

        Send the response to a secondary language model. Optionally split the
        response into single sentences and correct each sentence individually.
        Update usage stats.

        Args:
        ----
            msg (str): The response from the Gemini API.

        Returns:
        -------
            str: The corrected response (or OK if no correction necessary).

        """
        ca_messages = self.ca_messages.copy()
        ca_messages.append(
            HumanMessage(
                content=msg,
            ),
        )
        ca_messages.append(
            SystemMessage(
                content="If there is nothing to correct, please respond with just 'OK', and nothing else!",
            ),
        )

        response = self.ca_chat.invoke(ca_messages)

        correction = response.content
        token_usage = response.usage_metadata["total_tokens"]

        return correction, token_usage

__init__(model_name, model_provider, prompts, correct=False, split_correction=False, tools=None, tool_call_mode='auto', async_mode=False, mcp=False)

Initialise the LangChainConversation class.

Connect to a generic LangChain model and set up a conversation with the user. Also initialise a second conversational agent to provide corrections to the model output, if necessary.


model_name (str): The name of the model to use.
model_provider (str): The provider of the model to use.
prompts (dict): A dictionary of prompts to use for the conversation.
correct (bool): Whether to correct the model output.
split_correction (bool): Whether to correct the model output by
    splitting the output into sentences and correcting each
    sentence individually.
tools (list[Callable]): List of tool functions to use with the
    model.
tool_call_mode (str): The mode to use for tool calls.
    "auto": Automatically call tools.
    "text": Only return text output of the tool call.
async_mode (bool): Whether to run in async mode. Defaults to False.
mcp (bool): If you want to use MCP mode, this should be set to True.
Source code in biochatter/biochatter/llm_connect/langchain.py
def __init__(
    self,
    model_name: str,
    model_provider: str,
    prompts: dict,
    correct: bool = False,
    split_correction: bool = False,
    tools: list[Callable] = None,
    tool_call_mode: Literal["auto", "text"] = "auto",
    async_mode: bool = False,
    mcp: bool = False,
) -> None:
    """Initialise the LangChainConversation class.

    Connect to a generic LangChain model and set up a conversation with the
    user. Also initialise a second conversational agent to provide
    corrections to the model output, if necessary.

    Args:
    ----
        model_name (str): The name of the model to use.
        model_provider (str): The provider of the model to use.
        prompts (dict): A dictionary of prompts to use for the conversation.
        correct (bool): Whether to correct the model output.
        split_correction (bool): Whether to correct the model output by
            splitting the output into sentences and correcting each
            sentence individually.
        tools (list[Callable]): List of tool functions to use with the
            model.
        tool_call_mode (str): The mode to use for tool calls.
            "auto": Automatically call tools.
            "text": Only return text output of the tool call.
        async_mode (bool): Whether to run in async mode. Defaults to False.
        mcp (bool): If you want to use MCP mode, this should be set to True.

    """
    super().__init__(
        model_name=model_name,
        prompts=prompts,
        correct=correct,
        split_correction=split_correction,
        tools=tools,
        tool_call_mode=tool_call_mode,
        mcp=mcp,
    )

    self.model_name = model_name
    self.model_provider = model_provider
    self.async_mode = async_mode

set_api_key(api_key=None, user=None)

Set the API key for the model provider.

If the key is valid, initialise the conversational agent. Optionally set the user for usage statistics.


api_key (str): The API key for the model provider.

user (str, optional): The user for usage statistics. If provided and
    equals "community", will track usage stats.

bool: True if the API key is valid, False otherwise.
Source code in biochatter/biochatter/llm_connect/langchain.py
def set_api_key(self, api_key: str | None = None, user: str | None = None) -> bool:
    """Set the API key for the model provider.

    If the key is valid, initialise the conversational agent. Optionally set
    the user for usage statistics.

    Args:
    ----
        api_key (str): The API key for the model provider.

        user (str, optional): The user for usage statistics. If provided and
            equals "community", will track usage stats.

    Returns:
    -------
        bool: True if the API key is valid, False otherwise.

    """
    self.user = user

    try:
        self.chat = init_chat_model(
            model=self.model_name,
            model_provider=self.model_provider,
            temperature=0,
        )
        self.ca_chat = init_chat_model(
            model=self.model_name,
            model_provider=self.model_provider,
            temperature=0,
        )

        # if binding happens here, tools will be available for all messages
        if self.tools:
            self.bind_tools(self.tools)

        return True

    except Exception:  # Google Genai doesn't expose specific exception types
        self._chat = None
        self._ca_chat = None
        return False

LiteLLMConversation

Bases: Conversation

A unified interface for multiple LLM models using LiteLLM.

This class implements the abstract methods from the Conversation parent class and provides a unified way to interact with different LLM providers through LiteLLM, which supports models from OpenAI, Anthropic, HuggingFace, and more.

Attributes:

Name Type Description
model_name str

The name of the model to use.

prompts dict

Dictionary containing various prompts used in the conversation.

correct bool

Whether to use a correcting agent.

split_correction bool

Whether to split corrections by sentence.

rag_agents list

List of RAG agents available for context enhancement.

history list

Conversation history for logging/printing.

messages list

Messages in the conversation.

ca_messages list

Messages for the correcting agent.

api_key str

API key for the LLM provider.

user str

Username for the API, if required.

Source code in biochatter/biochatter/llm_connect/llmlite.py
class LiteLLMConversation(Conversation):
    """A unified interface for multiple LLM models using LiteLLM.

    This class implements the abstract methods from the Conversation parent class
    and provides a unified way to interact with different LLM providers through
    LiteLLM, which supports models from OpenAI, Anthropic, HuggingFace, and more.

    Attributes:
        model_name (str): The name of the model to use.
        prompts (dict): Dictionary containing various prompts used in the conversation.
        correct (bool): Whether to use a correcting agent.
        split_correction (bool): Whether to split corrections by sentence.
        rag_agents (list): List of RAG agents available for context enhancement.
        history (list): Conversation history for logging/printing.
        messages (list): Messages in the conversation.
        ca_messages (list): Messages for the correcting agent.
        api_key (str): API key for the LLM provider.
        user (str): Username for the API, if required.

    """

    def __init__(
        self,
        model_name: str,
        prompts: dict,
        correct: bool = False,
        split_correction: bool = False,
        use_ragagent_selector: bool = False,
        update_token_usage: Callable | None =None
    ) -> None:
        """Initialize a UnifiedConversation instance.

        Args:
            model_name (str): The name of the model to use.
            prompts (dict): Dictionary containing various prompts used in the conversation.
            correct (bool): Whether to use a correcting agent. Defaults to False.
            split_correction (bool): Whether to split corrections by sentence. Defaults to False.
            use_ragagent_selector (bool): Whether to use RagAgentSelector. Defaults to False.
            update_token_usage (Callable): A function to update the token usage statistics.

        """
        super().__init__(
            model_name=model_name,
            prompts=prompts,
            correct=correct,
            split_correction=split_correction,
            use_ragagent_selector=use_ragagent_selector,
        )
        self.api_key = None
        self.user = None
        self.ca_model_name=model_name
        self._update_token_usage=update_token_usage

    def get_litellm_object(self, api_key: str, model: str) -> ChatLiteLLM:
        """Get a LiteLLM object for the specified model and API key.

        Args:
            api_key (str): The API key for the LLM provider.
            model (str): The name of the model to use.

        Returns:
            ChatLiteLLM: An instance of ChatLiteLLM configured with the specified model, temperature, max tokens and API key.

        Raises:
            ValueError: If the API key is None.
            litellm.exceptions.AuthenticationError: If there is an authentication error.
            litellm.exceptions.InvalidRequestError: If the request is invalid.
            litellm.exceptions.RateLimitError: If the rate limit is exceeded.
            litellm.exceptions.ServiceUnavailableError: If the service is unavailable.
            litellm.exceptions.APIError: If there is a general API error.
            litellm.exceptions.Timeout: If the request times out.
            litellm.exceptions.APIConnectionError: If there is a connection error.
            litellm.exceptions.InternalServerError: If there is an internal server error.
            Exception: If there is an unexpected error.

        """
        if api_key is None:
            raise ValueError("API key must not be None")

        try:
            max_tokens=self.get_model_max_tokens(model)
        except:
            max_tokens= None

        kwargs={
            "temperature":0,
            "max_token": max_tokens,
            "model_name": model
        }

        if self.model_name.startswith("gpt-"):
            api_key_kwarg= "openai_api_key"
        elif self.model_name.startswith("claude-"):
            api_key_kwarg= "anthropic_api_key"
        elif self.model_name.startswith("azure/"):
            api_key_kwarg= "azure_api_key"
        elif (self.model_name.startswith("mistral/") or
                self.model_name in ["mistral-tiny", "mistral-small", "mistral-medium", "mistral-large-latest"]):
            api_key_kwarg= "api_key"
        else:
            api_key_kwarg= "api_key"

        kwargs[api_key_kwarg]= api_key
        try:
            return ChatLiteLLM(**kwargs)

        except (litellm.exceptions.AuthenticationError,
                litellm.exceptions.InvalidRequestError,
                litellm.exceptions.RateLimitError,
                litellm.exceptions.ServiceUnavailableError,
                litellm.exceptions.APIError,
                litellm.exceptions.Timeout,
                litellm.exceptions.APIConnectionError,
                litellm.exceptions.InternalServerError) as api_setup_error:
            raise api_setup_error
        except Exception as e:
            raise e

    def set_api_key(self, api_key: str, user: str | None = None) -> bool:
        """Set the API key for the LLM provider.

        Args:
            api_key (str): The API key for the LLM provider.
            user (Union[str, None]): The username

        Returns:
            bool: True if the API key is successfully set, False otherwise.

        Raises:
            ValueError: If the model name or correction model name is not set.
            TypeError: If the LiteLLM object initialization fails.
            Exception: If there is an unexpected error.

        """
        try:
            if self.model_name is None:
                raise ValueError("Primary Model name is not set.")

            if self.ca_model_name is None:
                raise ValueError("Correction Model name is not set.")

            self.chat=self.get_litellm_object(api_key,self.model_name)
            if self.chat is None:
                raise TypeError("Failed to intialize primary agent chat object.")

            self.ca_chat=self.get_litellm_object(api_key,self.ca_model_name)
            if self.ca_chat is None:
                raise TypeError("Failed to intialize correcting agent chat object.")

            self.user=user
            if user == "community":
                self.usage_stats = get_stats(user=user)
            return True

        except (ValueError, TypeError):
            self.chat = None
            self.ca_chat = None
            return False
        except Exception:
            self.chat = None
            self.ca_chat = None
            return False

    def json_serializable(self,obj):
        """Convert non-serializable objects to serializable format."""
        if obj is None:
            raise ValueError("Object is None")
        if hasattr(obj, "__dict__"):
            return obj.__dict__
        if hasattr(obj, "dict") and callable(obj.dict):
            return obj.dict()
        try:
            return str(obj)
        except:
            return repr(obj)

    def parse_llm_response(self,response) -> dict | None:
        """Parse the response from the LLM."""
        try:
            full_json = json.loads(json.dumps(response, default=self.json_serializable))

            if not full_json.get("generations"):
                return None

            generations = full_json["generations"]
            if not generations or not generations[0]:
                return None

            first_generation = generations[0][0]
            if not first_generation or not first_generation.get("message"):
                return None

            message = first_generation["message"]
            if not message.get("response_metadata"):
                return None

            response_metadata = message["response_metadata"]
            if not response_metadata.get("token_usage"):
                return None

            return response_metadata["token_usage"]

        except (KeyError, IndexError, TypeError, json.JSONDecodeError) as e:
            print(f"Error parsing LLM response: {e}")
            return None

        except Exception as e:
            print(f"Unexpected error while parsing LLM response: {e}")
            return None

    def _primary_query(self) -> tuple:
        """Query the LLM API with the user's message.

        Return the response using the message history (flattery system messages,
        prior conversation) as context. Correct the response if necessary.

        Returns:
            tuple: A tuple containing the response from the LLM API and the token usage.

        """
        try:
            response=self.chat.generate([self.messages])
        except (
            AttributeError,
            litellm.exceptions.APIError,
            litellm.exceptions.OpenAIError,
            litellm.exceptions.RateLimitError,
            litellm.exceptions.APIConnectionError,
            litellm.exceptions.BadRequestError,
            litellm.exceptions.AuthenticationError,
            litellm.exceptions.InternalServerError,
            litellm.exceptions.PermissionDeniedError,
            litellm.exceptions.UnprocessableEntityError,
            litellm.exceptions.APIResponseValidationError,
            litellm.exceptions.BudgetExceededError,
            litellm.exceptions.RejectedRequestError,
            litellm.exceptions.ServiceUnavailableError,
            litellm.exceptions.Timeout,
        ) as e:
            return e, None
        except Exception as e:
            return e, None

        msg = response.generations[0][0].text
        token_usage = self.parse_llm_response(response)

        self.append_ai_message(msg)

        self._update_usage_stats(self.model_name,token_usage)

        return msg, token_usage

    def _correct_response(self, msg: str) -> str:
        """Correct the response from the LLM.

        Args:
            msg (str): The response message to correct.

        Returns:
            str: The corrected response message.

        """
        ca_messages = self.ca_messages.copy()
        ca_messages.append(
            HumanMessage(
                content=msg,
            ),
        )
        ca_messages.append(
            SystemMessage(
                content="If there is nothing to correct, please respond with just 'OK', and nothing else!",
            ),
        )

        response = self.ca_chat.generate([ca_messages])

        correction = response.generations[0][0].text
        token_usage = self.parse_llm_response(response)

        self._update_usage_stats(self.ca_model_name, token_usage)

        return correction

    def _update_usage_stats(self, model: str, token_usage: dict) -> None:
        """Update the usage statistics.

        Args:
            model (str): The model name.
            token_usage (dict): The token usage information.

        """
        if self.user == "community" and model:
            stats_dict = {f"{k}:{model}": v for k, v in token_usage.items() if isinstance(v, int | float)}
            self.usage_stats.increment(
                "usage:[date]:[user]",
                stats_dict,
            )

        if self._update_token_usage is not None:
            self._update_token_usage(self.user, model, token_usage)

    def get_all_model_list(self) -> list:
        """Get a list of all available models."""
        return litellm.model_list

    def get_models_by_provider(self):
        """Get a dictionary of models grouped by their provider."""
        return litellm.models_by_provider

    def get_all_model_info(self) -> dict:
        """Get information about all available models."""
        return litellm.model_cost

    def get_model_info(self, model: str) -> dict:
        """Get information about a specific model.

        Args:
            model (str): The name of the model.

        Returns:
            dict: A dictionary containing information about the specified model.

        """
        models_info: dict = self.get_all_model_info()
        if model not in models_info:
            raise litellm.exceptions.NotFoundError(f"{model} model's information is not available.",
                                                   model=model,
                                                   llm_provider="Unknown")
        return models_info[model]

    def get_model_max_tokens(self, model: str) -> int:
        """Get the maximum number of tokens for a specific model.

        Args:
            model (str): The name of the model.

        Returns:
            int: The maximum number of tokens for the specified model.

        """
        try:
            model_info=self.get_model_info(model)
            if "max_tokens" not in model_info:
                raise litellm.exceptions.NotFoundError(f"Max token information for {model} is not available.",
                                                       model=model,
                                                       llm_provider="Unknown")
            return model_info["max_tokens"]
        except litellm.exceptions.NotFoundError as e:
            raise e

__init__(model_name, prompts, correct=False, split_correction=False, use_ragagent_selector=False, update_token_usage=None)

Initialize a UnifiedConversation instance.

Parameters:

Name Type Description Default
model_name str

The name of the model to use.

required
prompts dict

Dictionary containing various prompts used in the conversation.

required
correct bool

Whether to use a correcting agent. Defaults to False.

False
split_correction bool

Whether to split corrections by sentence. Defaults to False.

False
use_ragagent_selector bool

Whether to use RagAgentSelector. Defaults to False.

False
update_token_usage Callable

A function to update the token usage statistics.

None
Source code in biochatter/biochatter/llm_connect/llmlite.py
def __init__(
    self,
    model_name: str,
    prompts: dict,
    correct: bool = False,
    split_correction: bool = False,
    use_ragagent_selector: bool = False,
    update_token_usage: Callable | None =None
) -> None:
    """Initialize a UnifiedConversation instance.

    Args:
        model_name (str): The name of the model to use.
        prompts (dict): Dictionary containing various prompts used in the conversation.
        correct (bool): Whether to use a correcting agent. Defaults to False.
        split_correction (bool): Whether to split corrections by sentence. Defaults to False.
        use_ragagent_selector (bool): Whether to use RagAgentSelector. Defaults to False.
        update_token_usage (Callable): A function to update the token usage statistics.

    """
    super().__init__(
        model_name=model_name,
        prompts=prompts,
        correct=correct,
        split_correction=split_correction,
        use_ragagent_selector=use_ragagent_selector,
    )
    self.api_key = None
    self.user = None
    self.ca_model_name=model_name
    self._update_token_usage=update_token_usage

get_all_model_info()

Get information about all available models.

Source code in biochatter/biochatter/llm_connect/llmlite.py
def get_all_model_info(self) -> dict:
    """Get information about all available models."""
    return litellm.model_cost

get_all_model_list()

Get a list of all available models.

Source code in biochatter/biochatter/llm_connect/llmlite.py
def get_all_model_list(self) -> list:
    """Get a list of all available models."""
    return litellm.model_list

get_litellm_object(api_key, model)

Get a LiteLLM object for the specified model and API key.

Parameters:

Name Type Description Default
api_key str

The API key for the LLM provider.

required
model str

The name of the model to use.

required

Returns:

Name Type Description
ChatLiteLLM ChatLiteLLM

An instance of ChatLiteLLM configured with the specified model, temperature, max tokens and API key.

Raises:

Type Description
ValueError

If the API key is None.

AuthenticationError

If there is an authentication error.

InvalidRequestError

If the request is invalid.

RateLimitError

If the rate limit is exceeded.

ServiceUnavailableError

If the service is unavailable.

APIError

If there is a general API error.

Timeout

If the request times out.

APIConnectionError

If there is a connection error.

InternalServerError

If there is an internal server error.

Exception

If there is an unexpected error.

Source code in biochatter/biochatter/llm_connect/llmlite.py
def get_litellm_object(self, api_key: str, model: str) -> ChatLiteLLM:
    """Get a LiteLLM object for the specified model and API key.

    Args:
        api_key (str): The API key for the LLM provider.
        model (str): The name of the model to use.

    Returns:
        ChatLiteLLM: An instance of ChatLiteLLM configured with the specified model, temperature, max tokens and API key.

    Raises:
        ValueError: If the API key is None.
        litellm.exceptions.AuthenticationError: If there is an authentication error.
        litellm.exceptions.InvalidRequestError: If the request is invalid.
        litellm.exceptions.RateLimitError: If the rate limit is exceeded.
        litellm.exceptions.ServiceUnavailableError: If the service is unavailable.
        litellm.exceptions.APIError: If there is a general API error.
        litellm.exceptions.Timeout: If the request times out.
        litellm.exceptions.APIConnectionError: If there is a connection error.
        litellm.exceptions.InternalServerError: If there is an internal server error.
        Exception: If there is an unexpected error.

    """
    if api_key is None:
        raise ValueError("API key must not be None")

    try:
        max_tokens=self.get_model_max_tokens(model)
    except:
        max_tokens= None

    kwargs={
        "temperature":0,
        "max_token": max_tokens,
        "model_name": model
    }

    if self.model_name.startswith("gpt-"):
        api_key_kwarg= "openai_api_key"
    elif self.model_name.startswith("claude-"):
        api_key_kwarg= "anthropic_api_key"
    elif self.model_name.startswith("azure/"):
        api_key_kwarg= "azure_api_key"
    elif (self.model_name.startswith("mistral/") or
            self.model_name in ["mistral-tiny", "mistral-small", "mistral-medium", "mistral-large-latest"]):
        api_key_kwarg= "api_key"
    else:
        api_key_kwarg= "api_key"

    kwargs[api_key_kwarg]= api_key
    try:
        return ChatLiteLLM(**kwargs)

    except (litellm.exceptions.AuthenticationError,
            litellm.exceptions.InvalidRequestError,
            litellm.exceptions.RateLimitError,
            litellm.exceptions.ServiceUnavailableError,
            litellm.exceptions.APIError,
            litellm.exceptions.Timeout,
            litellm.exceptions.APIConnectionError,
            litellm.exceptions.InternalServerError) as api_setup_error:
        raise api_setup_error
    except Exception as e:
        raise e

get_model_info(model)

Get information about a specific model.

Parameters:

Name Type Description Default
model str

The name of the model.

required

Returns:

Name Type Description
dict dict

A dictionary containing information about the specified model.

Source code in biochatter/biochatter/llm_connect/llmlite.py
def get_model_info(self, model: str) -> dict:
    """Get information about a specific model.

    Args:
        model (str): The name of the model.

    Returns:
        dict: A dictionary containing information about the specified model.

    """
    models_info: dict = self.get_all_model_info()
    if model not in models_info:
        raise litellm.exceptions.NotFoundError(f"{model} model's information is not available.",
                                               model=model,
                                               llm_provider="Unknown")
    return models_info[model]

get_model_max_tokens(model)

Get the maximum number of tokens for a specific model.

Parameters:

Name Type Description Default
model str

The name of the model.

required

Returns:

Name Type Description
int int

The maximum number of tokens for the specified model.

Source code in biochatter/biochatter/llm_connect/llmlite.py
def get_model_max_tokens(self, model: str) -> int:
    """Get the maximum number of tokens for a specific model.

    Args:
        model (str): The name of the model.

    Returns:
        int: The maximum number of tokens for the specified model.

    """
    try:
        model_info=self.get_model_info(model)
        if "max_tokens" not in model_info:
            raise litellm.exceptions.NotFoundError(f"Max token information for {model} is not available.",
                                                   model=model,
                                                   llm_provider="Unknown")
        return model_info["max_tokens"]
    except litellm.exceptions.NotFoundError as e:
        raise e

get_models_by_provider()

Get a dictionary of models grouped by their provider.

Source code in biochatter/biochatter/llm_connect/llmlite.py
def get_models_by_provider(self):
    """Get a dictionary of models grouped by their provider."""
    return litellm.models_by_provider

json_serializable(obj)

Convert non-serializable objects to serializable format.

Source code in biochatter/biochatter/llm_connect/llmlite.py
def json_serializable(self,obj):
    """Convert non-serializable objects to serializable format."""
    if obj is None:
        raise ValueError("Object is None")
    if hasattr(obj, "__dict__"):
        return obj.__dict__
    if hasattr(obj, "dict") and callable(obj.dict):
        return obj.dict()
    try:
        return str(obj)
    except:
        return repr(obj)

parse_llm_response(response)

Parse the response from the LLM.

Source code in biochatter/biochatter/llm_connect/llmlite.py
def parse_llm_response(self,response) -> dict | None:
    """Parse the response from the LLM."""
    try:
        full_json = json.loads(json.dumps(response, default=self.json_serializable))

        if not full_json.get("generations"):
            return None

        generations = full_json["generations"]
        if not generations or not generations[0]:
            return None

        first_generation = generations[0][0]
        if not first_generation or not first_generation.get("message"):
            return None

        message = first_generation["message"]
        if not message.get("response_metadata"):
            return None

        response_metadata = message["response_metadata"]
        if not response_metadata.get("token_usage"):
            return None

        return response_metadata["token_usage"]

    except (KeyError, IndexError, TypeError, json.JSONDecodeError) as e:
        print(f"Error parsing LLM response: {e}")
        return None

    except Exception as e:
        print(f"Unexpected error while parsing LLM response: {e}")
        return None

set_api_key(api_key, user=None)

Set the API key for the LLM provider.

Parameters:

Name Type Description Default
api_key str

The API key for the LLM provider.

required
user Union[str, None]

The username

None

Returns:

Name Type Description
bool bool

True if the API key is successfully set, False otherwise.

Raises:

Type Description
ValueError

If the model name or correction model name is not set.

TypeError

If the LiteLLM object initialization fails.

Exception

If there is an unexpected error.

Source code in biochatter/biochatter/llm_connect/llmlite.py
def set_api_key(self, api_key: str, user: str | None = None) -> bool:
    """Set the API key for the LLM provider.

    Args:
        api_key (str): The API key for the LLM provider.
        user (Union[str, None]): The username

    Returns:
        bool: True if the API key is successfully set, False otherwise.

    Raises:
        ValueError: If the model name or correction model name is not set.
        TypeError: If the LiteLLM object initialization fails.
        Exception: If there is an unexpected error.

    """
    try:
        if self.model_name is None:
            raise ValueError("Primary Model name is not set.")

        if self.ca_model_name is None:
            raise ValueError("Correction Model name is not set.")

        self.chat=self.get_litellm_object(api_key,self.model_name)
        if self.chat is None:
            raise TypeError("Failed to intialize primary agent chat object.")

        self.ca_chat=self.get_litellm_object(api_key,self.ca_model_name)
        if self.ca_chat is None:
            raise TypeError("Failed to intialize correcting agent chat object.")

        self.user=user
        if user == "community":
            self.usage_stats = get_stats(user=user)
        return True

    except (ValueError, TypeError):
        self.chat = None
        self.ca_chat = None
        return False
    except Exception:
        self.chat = None
        self.ca_chat = None
        return False

OllamaConversation

Bases: Conversation

Conversation class for the Ollama model.

Source code in biochatter/biochatter/llm_connect/ollama.py
class OllamaConversation(Conversation):
    """Conversation class for the Ollama model."""

    def set_api_key(self, api_key: str, user: str | None = None) -> bool:
        """Set the API key for the Ollama API. Not implemented.

        Args:
        ----
            api_key (str): The API key for the Ollama API.

            user (str): The user for usage statistics.

        Returns:
        -------
            bool: True if the API key is valid, False otherwise.

        """
        err = "Ollama does not require an API key."
        raise NotImplementedError(err)

    def __init__(
        self,
        base_url: str,
        prompts: dict,
        model_name: str = "llama3",
        correct: bool = False,
        split_correction: bool = False,
    ) -> None:
        """Connect to an Ollama LLM via the Ollama/Langchain library.

        Set up a conversation with the user. Also initialise a second
        conversational agent to provide corrections to the model output, if
        necessary.

        Args:
        ----
            base_url (str): The base URL of the Ollama instance.

            prompts (dict): A dictionary of prompts to use for the conversation.

            model_name (str): The name of the model to use. Can be any model
                name available in your Ollama instance.

            correct (bool): Whether to correct the model output.

            split_correction (bool): Whether to correct the model output by
                splitting the output into sentences and correcting each sentence
                individually.

        """
        super().__init__(
            model_name=model_name,
            prompts=prompts,
            correct=correct,
            split_correction=split_correction,
        )
        self.model_name = model_name
        self.model = ChatOllama(
            base_url=base_url,
            model=self.model_name,
            temperature=0.0,
        )

        self.ca_model_name = "mixtral:latest"

        self.ca_model = ChatOllama(
            base_url=base_url,
            model_name=self.ca_model_name,
            temperature=0.0,
        )

    def append_system_message(self, message: str) -> None:
        """Override the system message addition.

        Ollama does not accept multiple system messages. Concatenate them if
        there are multiple.

        Args:
        ----
            message (str): The message to append.

        """
        # if there is not already a system message in self.messages
        if not any(isinstance(m, SystemMessage) for m in self.messages):
            self.messages.append(
                SystemMessage(
                    content=message,
                ),
            )
        else:
            # if there is a system message, append to the last one
            for i, msg in enumerate(self.messages):
                if isinstance(msg, SystemMessage):
                    self.messages[i].content += f"\n{message}"
                    break

    def append_ca_message(self, message: str) -> None:
        """Override the system message addition for the correcting agent.

        Ollama does not accept multiple system messages. Concatenate them if
        there are multiple.

        TODO this currently assumes that the correcting agent is the same model
        as the primary one.

        Args:
        ----
            message (str): The message to append.

        """
        # if there is not already a system message in self.messages
        if not any(isinstance(m, SystemMessage) for m in self.ca_messages):
            self.ca_messages.append(
                SystemMessage(
                    content=message,
                ),
            )
        else:
            # if there is a system message, append to the last one
            for i, msg in enumerate(self.ca_messages):
                if isinstance(msg, SystemMessage):
                    self.ca_messages[i].content += f"\n{message}"
                    break

    def _primary_query(self, **kwargs) -> tuple:
        """Query the Ollama client API with the user's message.

        Return the response using the message history (flattery system messages,
        prior conversation) as context. Correct the response if necessary.

        Returns
        -------
            tuple: A tuple containing the response from the Ollama API
            (formatted similarly to responses from the OpenAI API) and the token
            usage.

        """
        try:
            messages = self._create_history(self.messages)
            response = self.model.invoke(
                messages,
                # ,generate_config={"max_tokens": 2048, "temperature": 0},
            )
        except (
            openai._exceptions.APIError,
            openai._exceptions.OpenAIError,
            openai._exceptions.ConflictError,
            openai._exceptions.NotFoundError,
            openai._exceptions.APIStatusError,
            openai._exceptions.RateLimitError,
            openai._exceptions.APITimeoutError,
            openai._exceptions.BadRequestError,
            openai._exceptions.APIConnectionError,
            openai._exceptions.AuthenticationError,
            openai._exceptions.InternalServerError,
            openai._exceptions.PermissionDeniedError,
            openai._exceptions.UnprocessableEntityError,
            openai._exceptions.APIResponseValidationError,
        ) as e:
            return str(e), None
        response_dict = response.dict()
        msg = response_dict["content"]
        token_usage = response_dict["response_metadata"]["eval_count"]

        self._update_usage_stats(self.model_name, token_usage)

        self.append_ai_message(msg)

        return msg, token_usage

    def _create_history(self, messages: list) -> list:
        history = []
        for _, m in enumerate(messages):
            if isinstance(m, AIMessage):
                history.append(AIMessage(content=m.content))
            elif isinstance(m, HumanMessage):
                history.append(HumanMessage(content=m.content))
            elif isinstance(m, SystemMessage):
                history.append(SystemMessage(content=m.content))

        return history

    def _correct_response(self, msg: str) -> str:
        """Correct the response from the Ollama API.

        Send the response to a secondary language model. Optionally split the
        response into single sentences and correct each sentence individually.
        Update usage stats.

        Args:
        ----
            msg (str): The response from the model.

        Returns:
        -------
            str: The corrected response (or OK if no correction necessary).

        """
        ca_messages = self.ca_messages.copy()
        ca_messages.append(
            HumanMessage(
                content=msg,
            ),
        )
        ca_messages.append(
            SystemMessage(
                content="If there is nothing to correct, please respond with just 'OK', and nothing else!",
            ),
        )
        response = self.ca_model.invoke(
            chat_history=self._create_history(self.messages),
        ).dict()
        correction = response["content"]
        token_usage = response["eval_count"]

        self._update_usage_stats(self.ca_model_name, token_usage)

        return correction

    def _update_usage_stats(self, model: str, token_usage: dict) -> None:
        """Update redis database with token usage statistics.

        Use the usage_stats object with the increment method.

        Args:
        ----
            model (str): The model name.

            token_usage (dict): The token usage statistics.

        """

__init__(base_url, prompts, model_name='llama3', correct=False, split_correction=False)

Connect to an Ollama LLM via the Ollama/Langchain library.

Set up a conversation with the user. Also initialise a second conversational agent to provide corrections to the model output, if necessary.


base_url (str): The base URL of the Ollama instance.

prompts (dict): A dictionary of prompts to use for the conversation.

model_name (str): The name of the model to use. Can be any model
    name available in your Ollama instance.

correct (bool): Whether to correct the model output.

split_correction (bool): Whether to correct the model output by
    splitting the output into sentences and correcting each sentence
    individually.
Source code in biochatter/biochatter/llm_connect/ollama.py
def __init__(
    self,
    base_url: str,
    prompts: dict,
    model_name: str = "llama3",
    correct: bool = False,
    split_correction: bool = False,
) -> None:
    """Connect to an Ollama LLM via the Ollama/Langchain library.

    Set up a conversation with the user. Also initialise a second
    conversational agent to provide corrections to the model output, if
    necessary.

    Args:
    ----
        base_url (str): The base URL of the Ollama instance.

        prompts (dict): A dictionary of prompts to use for the conversation.

        model_name (str): The name of the model to use. Can be any model
            name available in your Ollama instance.

        correct (bool): Whether to correct the model output.

        split_correction (bool): Whether to correct the model output by
            splitting the output into sentences and correcting each sentence
            individually.

    """
    super().__init__(
        model_name=model_name,
        prompts=prompts,
        correct=correct,
        split_correction=split_correction,
    )
    self.model_name = model_name
    self.model = ChatOllama(
        base_url=base_url,
        model=self.model_name,
        temperature=0.0,
    )

    self.ca_model_name = "mixtral:latest"

    self.ca_model = ChatOllama(
        base_url=base_url,
        model_name=self.ca_model_name,
        temperature=0.0,
    )

append_ca_message(message)

Override the system message addition for the correcting agent.

Ollama does not accept multiple system messages. Concatenate them if there are multiple.

TODO this currently assumes that the correcting agent is the same model as the primary one.


message (str): The message to append.
Source code in biochatter/biochatter/llm_connect/ollama.py
def append_ca_message(self, message: str) -> None:
    """Override the system message addition for the correcting agent.

    Ollama does not accept multiple system messages. Concatenate them if
    there are multiple.

    TODO this currently assumes that the correcting agent is the same model
    as the primary one.

    Args:
    ----
        message (str): The message to append.

    """
    # if there is not already a system message in self.messages
    if not any(isinstance(m, SystemMessage) for m in self.ca_messages):
        self.ca_messages.append(
            SystemMessage(
                content=message,
            ),
        )
    else:
        # if there is a system message, append to the last one
        for i, msg in enumerate(self.ca_messages):
            if isinstance(msg, SystemMessage):
                self.ca_messages[i].content += f"\n{message}"
                break

append_system_message(message)

Override the system message addition.

Ollama does not accept multiple system messages. Concatenate them if there are multiple.


message (str): The message to append.
Source code in biochatter/biochatter/llm_connect/ollama.py
def append_system_message(self, message: str) -> None:
    """Override the system message addition.

    Ollama does not accept multiple system messages. Concatenate them if
    there are multiple.

    Args:
    ----
        message (str): The message to append.

    """
    # if there is not already a system message in self.messages
    if not any(isinstance(m, SystemMessage) for m in self.messages):
        self.messages.append(
            SystemMessage(
                content=message,
            ),
        )
    else:
        # if there is a system message, append to the last one
        for i, msg in enumerate(self.messages):
            if isinstance(msg, SystemMessage):
                self.messages[i].content += f"\n{message}"
                break

set_api_key(api_key, user=None)

Set the API key for the Ollama API. Not implemented.


api_key (str): The API key for the Ollama API.

user (str): The user for usage statistics.

bool: True if the API key is valid, False otherwise.
Source code in biochatter/biochatter/llm_connect/ollama.py
def set_api_key(self, api_key: str, user: str | None = None) -> bool:
    """Set the API key for the Ollama API. Not implemented.

    Args:
    ----
        api_key (str): The API key for the Ollama API.

        user (str): The user for usage statistics.

    Returns:
    -------
        bool: True if the API key is valid, False otherwise.

    """
    err = "Ollama does not require an API key."
    raise NotImplementedError(err)

WasmConversation

Bases: Conversation

Conversation class for the wasm model.

Source code in biochatter/biochatter/llm_connect/misc.py
class WasmConversation(Conversation):
    """Conversation class for the wasm model."""

    def __init__(
        self,
        model_name: str,
        prompts: dict,
        correct: bool = False,
        split_correction: bool = False,
    ) -> None:
        """Initialize the WasmConversation class.

        This class is used to return the complete query as a string to be used
        in the frontend running the wasm model. It does not call the API itself,
        but updates the message history similarly to the other conversation
        classes. It overrides the `query` method from the `Conversation` class
        to return a plain string that contains the entire message for the model
        as the first element of the tuple. The second and third elements are
        `None` as there is no token usage or correction for the wasm model.

        """
        super().__init__(
            model_name=model_name,
            prompts=prompts,
            correct=correct,
            split_correction=split_correction,
        )

    def query(self, text: str) -> tuple:
        """Return the entire message history as a single string.

        This is the message that is sent to the wasm model.

        Args:
        ----
            text (str): The user query.

        Returns:
        -------
            tuple: A tuple containing the message history as a single string,
                and `None` for the second and third elements of the tuple.

        """
        self.append_user_message(text)

        self._inject_context(text)

        return (self._primary_query(), None, None)

    def _primary_query(self):
        """Concatenate all messages in the conversation.

        Build a single string from all messages in the conversation.
        Currently discards information about roles (system, user).

        Returns
        -------
            str: A single string from all messages in the conversation.

        """
        return "\n".join([m.content for m in self.messages])

    def _correct_response(self, msg: str) -> str:
        """Do not use for the wasm model."""
        return "ok"

    def set_api_key(self, api_key: str, user: str | None = None) -> bool:
        """Do not use for the wasm model."""
        return True

__init__(model_name, prompts, correct=False, split_correction=False)

Initialize the WasmConversation class.

This class is used to return the complete query as a string to be used in the frontend running the wasm model. It does not call the API itself, but updates the message history similarly to the other conversation classes. It overrides the query method from the Conversation class to return a plain string that contains the entire message for the model as the first element of the tuple. The second and third elements are None as there is no token usage or correction for the wasm model.

Source code in biochatter/biochatter/llm_connect/misc.py
def __init__(
    self,
    model_name: str,
    prompts: dict,
    correct: bool = False,
    split_correction: bool = False,
) -> None:
    """Initialize the WasmConversation class.

    This class is used to return the complete query as a string to be used
    in the frontend running the wasm model. It does not call the API itself,
    but updates the message history similarly to the other conversation
    classes. It overrides the `query` method from the `Conversation` class
    to return a plain string that contains the entire message for the model
    as the first element of the tuple. The second and third elements are
    `None` as there is no token usage or correction for the wasm model.

    """
    super().__init__(
        model_name=model_name,
        prompts=prompts,
        correct=correct,
        split_correction=split_correction,
    )

query(text)

Return the entire message history as a single string.

This is the message that is sent to the wasm model.


text (str): The user query.

tuple: A tuple containing the message history as a single string,
    and `None` for the second and third elements of the tuple.
Source code in biochatter/biochatter/llm_connect/misc.py
def query(self, text: str) -> tuple:
    """Return the entire message history as a single string.

    This is the message that is sent to the wasm model.

    Args:
    ----
        text (str): The user query.

    Returns:
    -------
        tuple: A tuple containing the message history as a single string,
            and `None` for the second and third elements of the tuple.

    """
    self.append_user_message(text)

    self._inject_context(text)

    return (self._primary_query(), None, None)

set_api_key(api_key, user=None)

Do not use for the wasm model.

Source code in biochatter/biochatter/llm_connect/misc.py
def set_api_key(self, api_key: str, user: str | None = None) -> bool:
    """Do not use for the wasm model."""
    return True

XinferenceConversation

Bases: Conversation

Conversation class for the Xinference deployment.

Source code in biochatter/biochatter/llm_connect/xinference.py
class XinferenceConversation(Conversation):
    """Conversation class for the Xinference deployment."""

    def __init__(
        self,
        base_url: str,
        prompts: dict,
        model_name: str = "auto",
        correct: bool = False,
        split_correction: bool = False,
    ) -> None:
        """Connect to an open-source LLM via the Xinference client.

        Connect to a running Xinference deployment and set up a conversation
        with the user. Also initialise a second conversational agent to
        provide corrections to the model output, if necessary.

        Args:
        ----
            base_url (str): The base URL of the Xinference instance (should not
            include the /v1 part).

            prompts (dict): A dictionary of prompts to use for the conversation.

            model_name (str): The name of the model to use. Will be mapped to
            the according uid from the list of available models. Can be set to
            "auto" to use the first available model.

            correct (bool): Whether to correct the model output.

            split_correction (bool): Whether to correct the model output by
            splitting the output into sentences and correcting each sentence
            individually.

        """
        # Shaohong: Please keep this xinference importing code here, so that,
        # we don't need to depend on xinference if we dont need it (xinference
        # is expensive to install)
        from xinference.client import Client

        super().__init__(
            model_name=model_name,
            prompts=prompts,
            correct=correct,
            split_correction=split_correction,
        )
        self.client = Client(base_url=base_url)

        self.models = {}
        self.load_models()

        self.ca_model_name = model_name

        self.set_api_key()

        # TODO make accessible by drop-down

    def load_models(self) -> None:
        """Load the models from the Xinference client."""
        for id, model in self.client.list_models().items():
            model["id"] = id
            self.models[model["model_name"]] = model

    def append_system_message(self, message: str) -> None:
        """Override the system message addition.

        Xinference does not accept multiple system messages. We concatenate them
        if there are multiple.

        Args:
        ----
            message (str): The message to append.

        """
        # if there is not already a system message in self.messages
        if not any(isinstance(m, SystemMessage) for m in self.messages):
            self.messages.append(
                SystemMessage(
                    content=message,
                ),
            )
        else:
            # if there is a system message, append to the last one
            for i, msg in enumerate(self.messages):
                if isinstance(msg, SystemMessage):
                    self.messages[i].content += f"\n{message}"
                    break

    def append_ca_message(self, message: str) -> None:
        """Override the system message addition for the correcting agent.

        Xinference does not accept multiple system messages. We concatenate them
        if there are multiple.

        TODO this currently assumes that the correcting agent is the same model
        as the primary one.

        Args:
        ----
            message (str): The message to append.

        """
        # if there is not already a system message in self.messages
        if not any(isinstance(m, SystemMessage) for m in self.ca_messages):
            self.ca_messages.append(
                SystemMessage(
                    content=message,
                ),
            )
        else:
            # if there is a system message, append to the last one
            for i, msg in enumerate(self.ca_messages):
                if isinstance(msg, SystemMessage):
                    self.ca_messages[i].content += f"\n{message}"
                    break

    def _primary_query(self, **kwargs) -> tuple:
        """Query the Xinference client API.

        Use the user's message and return the response using the message history
        (flattery system messages, prior conversation) as context. Correct the
        response if necessary.

        LLaMA2 architecture does not accept separate system messages, so we
        concatenate the system message with the user message to form the prompt.
        'LLaMA enforces a strict rule that chats should alternate
        user/assistant/user/assistant, and the system message, if present,
        should be embedded into the first user message.' (from
        https://discuss.huggingface.co/t/issue-with-llama-2-chat-template-and-out-of-date-documentation/61645/3)

        Returns
        -------
            tuple: A tuple containing the response from the Xinference API
            (formatted similarly to responses from the OpenAI API) and the token
            usage.

        """
        try:
            history = self._create_history()
            # TODO this is for LLaMA2 arch, may be different for newer models
            prompt = history.pop()
            response = self.model.chat(
                prompt=prompt["content"],
                chat_history=history,
                generate_config={"max_tokens": 2048, "temperature": 0},
            )
        except (
            openai._exceptions.APIError,
            openai._exceptions.OpenAIError,
            openai._exceptions.ConflictError,
            openai._exceptions.NotFoundError,
            openai._exceptions.APIStatusError,
            openai._exceptions.RateLimitError,
            openai._exceptions.APITimeoutError,
            openai._exceptions.BadRequestError,
            openai._exceptions.APIConnectionError,
            openai._exceptions.AuthenticationError,
            openai._exceptions.InternalServerError,
            openai._exceptions.PermissionDeniedError,
            openai._exceptions.UnprocessableEntityError,
            openai._exceptions.APIResponseValidationError,
        ) as e:
            return str(e), None

        msg = response["choices"][0]["message"]["content"]
        token_usage = response["usage"]

        self._update_usage_stats(self.model_name, token_usage)

        self.append_ai_message(msg)

        return msg, token_usage

    def _create_history(self) -> list:
        """Create a history of messages from the conversation.

        Returns
        -------
            list: A list of messages from the conversation.

        """
        history = []
        # extract text components from message contents
        msg_texts = [m.content[0]["text"] if isinstance(m.content, list) else m.content for m in self.messages]

        # check if last message is an image message
        is_image_message = False
        if isinstance(self.messages[-1].content, list):
            is_image_message = self.messages[-1].content[1]["type"] == "image_url"

        # find location of last AI message (if any)
        last_ai_message = None
        for i, m in enumerate(self.messages):
            if isinstance(m, AIMessage):
                last_ai_message = i

        # concatenate all messages before the last AI message into one message
        if last_ai_message:
            history.append(
                {
                    "role": "user",
                    "content": "\n".join(
                        [m for m in msg_texts[:last_ai_message]],
                    ),
                },
            )
            # then append the last AI message
            history.append(
                {
                    "role": "assistant",
                    "content": msg_texts[last_ai_message],
                },
            )

            # then concatenate all messages after that
            # into one HumanMessage
            history.append(
                {
                    "role": "user",
                    "content": "\n".join(
                        [m for m in msg_texts[last_ai_message + 1 :]],
                    ),
                },
            )

        # if there is no AI message, concatenate all messages into one user
        # message
        else:
            history.append(
                {
                    "role": "user",
                    "content": "\n".join([m for m in msg_texts[:]]),
                },
            )

        # if the last message is an image message, add the image to the history
        if is_image_message:
            history[-1].content = [
                {"type": "text", "text": history[-1].content},
                {
                    "type": "image_url",
                    "image_url": {
                        "url": self.messages[-1].content[1]["image_url"]["url"],
                    },
                },
            ]
        return history

    def _correct_response(self, msg: str) -> str:
        """Correct the response from the Xinference API.

        Send the response to a secondary language model. Optionally split the
        response into single sentences and correct each sentence individually.
        Update usage stats.

        Args:
        ----
            msg (str): The response from the model.

        Returns:
        -------
            str: The corrected response (or OK if no correction necessary).

        """
        ca_messages = self.ca_messages.copy()
        ca_messages.append(
            HumanMessage(
                content=msg,
            ),
        )
        ca_messages.append(
            SystemMessage(
                content="If there is nothing to correct, please respond with just 'OK', and nothing else!",
            ),
        )
        history = []
        for m in self.messages:
            if isinstance(m, SystemMessage):
                history.append({"role": "system", "content": m.content})
            elif isinstance(m, HumanMessage):
                history.append({"role": "user", "content": m.content})
            elif isinstance(m, AIMessage):
                history.append({"role": "assistant", "content": m.content})
        prompt = history.pop()
        response = self.ca_model.chat(
            prompt=prompt["content"],
            chat_history=history,
            generate_config={"max_tokens": 2048, "temperature": 0},
        )

        correction = response["choices"][0]["message"]["content"]
        token_usage = response["usage"]

        self._update_usage_stats(self.ca_model_name, token_usage)

        return correction

    def _update_usage_stats(self, model: str, token_usage: dict) -> None:
        """Update redis database with token usage statistics.

        Use the usage_stats object with the increment method.

        Args:
        ----
            model (str): The model name.

            token_usage (dict): The token usage statistics.

        """

    def set_api_key(self) -> bool:
        """Try to get the Xinference model from the client API.

        If the model is found, initialise the conversational agent. If the model
        is not found, `get_model` will raise a RuntimeError.

        Returns
        -------
            bool: True if the model is found, False otherwise.

        """
        try:
            if self.model_name is None or self.model_name == "auto":
                self.model_name = self.list_models_by_type("chat")[0]
            self.model = self.client.get_model(
                self.models[self.model_name]["id"],
            )

            if self.ca_model_name is None or self.ca_model_name == "auto":
                self.ca_model_name = self.list_models_by_type("chat")[0]
            self.ca_model = self.client.get_model(
                self.models[self.ca_model_name]["id"],
            )
            return True

        except RuntimeError:
            self._chat = None
            self._ca_chat = None
            return False

    def list_models_by_type(self, model_type: str) -> list[str]:
        """List the models by type.

        Args:
        ----
            model_type (str): The type of model to list.

        Returns:
        -------
            list[str]: A list of model names.

        """
        names = []
        if model_type in ["embed", "embedding"]:
            for name, model in self.models.items():
                if "model_ability" in model:
                    if "embed" in model["model_ability"]:
                        names.append(name)
                elif model["model_type"] == "embedding":
                    names.append(name)
            return names
        for name, model in self.models.items():
            if "model_ability" in model:
                if model_type in model["model_ability"]:
                    names.append(name)
            elif model["model_type"] == model_type:
                names.append(name)
        return names

__init__(base_url, prompts, model_name='auto', correct=False, split_correction=False)

Connect to an open-source LLM via the Xinference client.

Connect to a running Xinference deployment and set up a conversation with the user. Also initialise a second conversational agent to provide corrections to the model output, if necessary.


base_url (str): The base URL of the Xinference instance (should not
include the /v1 part).

prompts (dict): A dictionary of prompts to use for the conversation.

model_name (str): The name of the model to use. Will be mapped to
the according uid from the list of available models. Can be set to
"auto" to use the first available model.

correct (bool): Whether to correct the model output.

split_correction (bool): Whether to correct the model output by
splitting the output into sentences and correcting each sentence
individually.
Source code in biochatter/biochatter/llm_connect/xinference.py
def __init__(
    self,
    base_url: str,
    prompts: dict,
    model_name: str = "auto",
    correct: bool = False,
    split_correction: bool = False,
) -> None:
    """Connect to an open-source LLM via the Xinference client.

    Connect to a running Xinference deployment and set up a conversation
    with the user. Also initialise a second conversational agent to
    provide corrections to the model output, if necessary.

    Args:
    ----
        base_url (str): The base URL of the Xinference instance (should not
        include the /v1 part).

        prompts (dict): A dictionary of prompts to use for the conversation.

        model_name (str): The name of the model to use. Will be mapped to
        the according uid from the list of available models. Can be set to
        "auto" to use the first available model.

        correct (bool): Whether to correct the model output.

        split_correction (bool): Whether to correct the model output by
        splitting the output into sentences and correcting each sentence
        individually.

    """
    # Shaohong: Please keep this xinference importing code here, so that,
    # we don't need to depend on xinference if we dont need it (xinference
    # is expensive to install)
    from xinference.client import Client

    super().__init__(
        model_name=model_name,
        prompts=prompts,
        correct=correct,
        split_correction=split_correction,
    )
    self.client = Client(base_url=base_url)

    self.models = {}
    self.load_models()

    self.ca_model_name = model_name

    self.set_api_key()

append_ca_message(message)

Override the system message addition for the correcting agent.

Xinference does not accept multiple system messages. We concatenate them if there are multiple.

TODO this currently assumes that the correcting agent is the same model as the primary one.


message (str): The message to append.
Source code in biochatter/biochatter/llm_connect/xinference.py
def append_ca_message(self, message: str) -> None:
    """Override the system message addition for the correcting agent.

    Xinference does not accept multiple system messages. We concatenate them
    if there are multiple.

    TODO this currently assumes that the correcting agent is the same model
    as the primary one.

    Args:
    ----
        message (str): The message to append.

    """
    # if there is not already a system message in self.messages
    if not any(isinstance(m, SystemMessage) for m in self.ca_messages):
        self.ca_messages.append(
            SystemMessage(
                content=message,
            ),
        )
    else:
        # if there is a system message, append to the last one
        for i, msg in enumerate(self.ca_messages):
            if isinstance(msg, SystemMessage):
                self.ca_messages[i].content += f"\n{message}"
                break

append_system_message(message)

Override the system message addition.

Xinference does not accept multiple system messages. We concatenate them if there are multiple.


message (str): The message to append.
Source code in biochatter/biochatter/llm_connect/xinference.py
def append_system_message(self, message: str) -> None:
    """Override the system message addition.

    Xinference does not accept multiple system messages. We concatenate them
    if there are multiple.

    Args:
    ----
        message (str): The message to append.

    """
    # if there is not already a system message in self.messages
    if not any(isinstance(m, SystemMessage) for m in self.messages):
        self.messages.append(
            SystemMessage(
                content=message,
            ),
        )
    else:
        # if there is a system message, append to the last one
        for i, msg in enumerate(self.messages):
            if isinstance(msg, SystemMessage):
                self.messages[i].content += f"\n{message}"
                break

list_models_by_type(model_type)

List the models by type.


model_type (str): The type of model to list.

list[str]: A list of model names.
Source code in biochatter/biochatter/llm_connect/xinference.py
def list_models_by_type(self, model_type: str) -> list[str]:
    """List the models by type.

    Args:
    ----
        model_type (str): The type of model to list.

    Returns:
    -------
        list[str]: A list of model names.

    """
    names = []
    if model_type in ["embed", "embedding"]:
        for name, model in self.models.items():
            if "model_ability" in model:
                if "embed" in model["model_ability"]:
                    names.append(name)
            elif model["model_type"] == "embedding":
                names.append(name)
        return names
    for name, model in self.models.items():
        if "model_ability" in model:
            if model_type in model["model_ability"]:
                names.append(name)
        elif model["model_type"] == model_type:
            names.append(name)
    return names

load_models()

Load the models from the Xinference client.

Source code in biochatter/biochatter/llm_connect/xinference.py
def load_models(self) -> None:
    """Load the models from the Xinference client."""
    for id, model in self.client.list_models().items():
        model["id"] = id
        self.models[model["model_name"]] = model

set_api_key()

Try to get the Xinference model from the client API.

If the model is found, initialise the conversational agent. If the model is not found, get_model will raise a RuntimeError.

Returns
bool: True if the model is found, False otherwise.
Source code in biochatter/biochatter/llm_connect/xinference.py
def set_api_key(self) -> bool:
    """Try to get the Xinference model from the client API.

    If the model is found, initialise the conversational agent. If the model
    is not found, `get_model` will raise a RuntimeError.

    Returns
    -------
        bool: True if the model is found, False otherwise.

    """
    try:
        if self.model_name is None or self.model_name == "auto":
            self.model_name = self.list_models_by_type("chat")[0]
        self.model = self.client.get_model(
            self.models[self.model_name]["id"],
        )

        if self.ca_model_name is None or self.ca_model_name == "auto":
            self.ca_model_name = self.list_models_by_type("chat")[0]
        self.ca_model = self.client.get_model(
            self.models[self.ca_model_name]["id"],
        )
        return True

    except RuntimeError:
        self._chat = None
        self._ca_chat = None
        return False