Source code for langchain_core.output_parsers.openai_tools

import copy
import json
from json import JSONDecodeError
from typing import Any, Dict, List, Optional, Type

from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage, InvalidToolCall
from langchain_core.output_parsers import BaseCumulativeTransformOutputParser
from langchain_core.outputs import ChatGeneration, Generation
from langchain_core.pydantic_v1 import BaseModel, ValidationError
from langchain_core.utils.json import parse_partial_json


[docs]def parse_tool_call( raw_tool_call: Dict[str, Any], *, partial: bool = False, strict: bool = False, return_id: bool = True, ) -> Optional[Dict[str, Any]]: """Parse a single tool call.""" if "function" not in raw_tool_call: return None if partial: try: function_args = parse_partial_json( raw_tool_call["function"]["arguments"], strict=strict ) except (JSONDecodeError, TypeError): # None args raise TypeError return None else: try: function_args = json.loads( raw_tool_call["function"]["arguments"], strict=strict ) except JSONDecodeError as e: raise OutputParserException( f"Function {raw_tool_call['function']['name']} arguments:\n\n" f"{raw_tool_call['function']['arguments']}\n\nare not valid JSON. " f"Received JSONDecodeError {e}" ) parsed = { "name": raw_tool_call["function"]["name"] or "", "args": function_args or {}, } if return_id: parsed["id"] = raw_tool_call.get("id") return parsed
[docs]def make_invalid_tool_call( raw_tool_call: Dict[str, Any], error_msg: Optional[str], ) -> InvalidToolCall: """Create an InvalidToolCall from a raw tool call.""" return InvalidToolCall( name=raw_tool_call["function"]["name"], args=raw_tool_call["function"]["arguments"], id=raw_tool_call.get("id"), error=error_msg, )
[docs]def parse_tool_calls( raw_tool_calls: List[dict], *, partial: bool = False, strict: bool = False, return_id: bool = True, ) -> List[Dict[str, Any]]: """Parse a list of tool calls.""" final_tools: List[Dict[str, Any]] = [] exceptions = [] for tool_call in raw_tool_calls: try: parsed = parse_tool_call( tool_call, partial=partial, strict=strict, return_id=return_id ) if parsed: final_tools.append(parsed) except OutputParserException as e: exceptions.append(str(e)) continue if exceptions: raise OutputParserException("\n\n".join(exceptions)) return final_tools
[docs]class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]): """Parse tools from OpenAI response.""" strict: bool = False """Whether to allow non-JSON-compliant strings. See: https://docs.python.org/3/library/json.html#encoders-and-decoders Useful when the parsed output may include unicode characters or new lines. """ return_id: bool = False """Whether to return the tool call id.""" first_tool_only: bool = False """Whether to return only the first tool call. If False, the result will be a list of tool calls, or an empty list if no tool calls are found. If true, and multiple tool calls are found, only the first one will be returned, and the other tool calls will be ignored. If no tool calls are found, None will be returned. """
[docs] def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any: generation = result[0] if not isinstance(generation, ChatGeneration): raise OutputParserException( "This output parser can only be used with a chat generation." ) message = generation.message if isinstance(message, AIMessage) and message.tool_calls: tool_calls = [dict(tc) for tc in message.tool_calls] for tool_call in tool_calls: if not self.return_id: _ = tool_call.pop("id") else: try: raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"]) except KeyError: return [] tool_calls = parse_tool_calls( raw_tool_calls, partial=partial, strict=self.strict, return_id=self.return_id, ) # for backwards compatibility for tc in tool_calls: tc["type"] = tc.pop("name") if self.first_tool_only: return tool_calls[0] if tool_calls else None return tool_calls
[docs] def parse(self, text: str) -> Any: raise NotImplementedError()
[docs]class JsonOutputKeyToolsParser(JsonOutputToolsParser): """Parse tools from OpenAI response.""" key_name: str """The type of tools to return."""
[docs] def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any: parsed_result = super().parse_result(result, partial=partial) if self.first_tool_only: single_result = ( parsed_result if parsed_result and parsed_result["type"] == self.key_name else None ) if self.return_id: return single_result elif single_result: return single_result["args"] else: return None parsed_result = [res for res in parsed_result if res["type"] == self.key_name] if not self.return_id: parsed_result = [res["args"] for res in parsed_result] return parsed_result
[docs]class PydanticToolsParser(JsonOutputToolsParser): """Parse tools from OpenAI response.""" tools: List[Type[BaseModel]] # TODO: Support more granular streaming of objects. Currently only streams once all # Pydantic object fields are present.
[docs] def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any: json_results = super().parse_result(result, partial=partial) if not json_results: return None if self.first_tool_only else [] json_results = [json_results] if self.first_tool_only else json_results name_dict = {tool.__name__: tool for tool in self.tools} pydantic_objects = [] for res in json_results: try: if not isinstance(res["args"], dict): raise ValueError( f"Tool arguments must be specified as a dict, received: " f"{res['args']}" ) pydantic_objects.append(name_dict[res["type"]](**res["args"])) except (ValidationError, ValueError) as e: if partial: continue else: raise e if self.first_tool_only: return pydantic_objects[0] if pydantic_objects else None else: return pydantic_objects