id
stringlengths
14
16
text
stringlengths
36
2.73k
source
stringlengths
49
117
7732bf78ff52-2
parameter = self._get_referenced_parameter(ref) while isinstance(parameter, Reference): parameter = self._get_referenced_parameter(parameter) return parameter [docs] def get_referenced_schema(self, ref: Reference) -> Schema: """Get a schema (or nested reference) or err.""" ref_name = ref.ref.split("/")[-1] schemas = self._schemas_strict if ref_name not in schemas: raise ValueError(f"No schema found for {ref_name}") return schemas[ref_name] def _get_root_referenced_schema(self, ref: Reference) -> Schema: """Get the root reference or err.""" schema = self.get_referenced_schema(ref) while isinstance(schema, Reference): schema = self.get_referenced_schema(schema) return schema def _get_referenced_request_body( self, ref: Reference ) -> Optional[Union[Reference, RequestBody]]: """Get a request body (or nested reference) or err.""" ref_name = ref.ref.split("/")[-1] request_bodies = self._request_bodies_strict if ref_name not in request_bodies: raise ValueError(f"No request body found for {ref_name}") return request_bodies[ref_name] def _get_root_referenced_request_body( self, ref: Reference ) -> Optional[RequestBody]: """Get the root request Body or err.""" request_body = self._get_referenced_request_body(ref) while isinstance(request_body, Reference): request_body = self._get_referenced_request_body(request_body) return request_body @staticmethod def _alert_unsupported_spec(obj: dict) -> None: """Alert if the spec is not supported."""
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/openapi_utils.html
7732bf78ff52-3
"""Alert if the spec is not supported.""" warning_message = ( " This may result in degraded performance." + " Convert your OpenAPI spec to 3.1.* spec" + " for better support." ) swagger_version = obj.get("swagger") openapi_version = obj.get("openapi") if isinstance(openapi_version, str): if openapi_version != "3.1.0": logger.warning( f"Attempting to load an OpenAPI {openapi_version}" f" spec. {warning_message}" ) else: pass elif isinstance(swagger_version, str): logger.warning( f"Attempting to load a Swagger {swagger_version}" f" spec. {warning_message}" ) else: raise ValueError( "Attempting to load an unsupported spec:" f"\n\n{obj}\n{warning_message}" ) [docs] @classmethod def parse_obj(cls, obj: dict) -> "OpenAPISpec": try: cls._alert_unsupported_spec(obj) return super().parse_obj(obj) except ValidationError as e: # We are handling possibly misconfigured specs and want to do a best-effort # job to get a reasonable interface out of it. new_obj = copy.deepcopy(obj) for error in e.errors(): keys = error["loc"] item = new_obj for key in keys[:-1]: item = item[key] item.pop(keys[-1], None) return cls.parse_obj(new_obj) [docs] @classmethod def from_spec_dict(cls, spec_dict: dict) -> "OpenAPISpec":
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/openapi_utils.html
7732bf78ff52-4
def from_spec_dict(cls, spec_dict: dict) -> "OpenAPISpec": """Get an OpenAPI spec from a dict.""" return cls.parse_obj(spec_dict) [docs] @classmethod def from_text(cls, text: str) -> "OpenAPISpec": """Get an OpenAPI spec from a text.""" try: spec_dict = json.loads(text) except json.JSONDecodeError: spec_dict = yaml.safe_load(text) return cls.from_spec_dict(spec_dict) [docs] @classmethod def from_file(cls, path: Union[str, Path]) -> "OpenAPISpec": """Get an OpenAPI spec from a file path.""" path_ = path if isinstance(path, Path) else Path(path) if not path_.exists(): raise FileNotFoundError(f"{path} does not exist") with path_.open("r") as f: return cls.from_text(f.read()) [docs] @classmethod def from_url(cls, url: str) -> "OpenAPISpec": """Get an OpenAPI spec from a URL.""" response = requests.get(url) return cls.from_text(response.text) @property def base_url(self) -> str: """Get the base url.""" return self.servers[0].url [docs] def get_methods_for_path(self, path: str) -> List[str]: """Return a list of valid methods for the specified path.""" path_item = self._get_path_strict(path) results = [] for method in HTTPVerb: operation = getattr(path_item, method.value, None) if isinstance(operation, Operation): results.append(method.value) return results
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/openapi_utils.html
7732bf78ff52-5
if isinstance(operation, Operation): results.append(method.value) return results [docs] def get_operation(self, path: str, method: str) -> Operation: """Get the operation object for a given path and HTTP method.""" path_item = self._get_path_strict(path) operation_obj = getattr(path_item, method, None) if not isinstance(operation_obj, Operation): raise ValueError(f"No {method} method found for {path}") return operation_obj [docs] def get_parameters_for_operation(self, operation: Operation) -> List[Parameter]: """Get the components for a given operation.""" parameters = [] if operation.parameters: for parameter in operation.parameters: if isinstance(parameter, Reference): parameter = self._get_root_referenced_parameter(parameter) parameters.append(parameter) return parameters [docs] def get_request_body_for_operation( self, operation: Operation ) -> Optional[RequestBody]: """Get the request body for a given operation.""" request_body = operation.requestBody if isinstance(request_body, Reference): request_body = self._get_root_referenced_request_body(request_body) return request_body [docs] @staticmethod def get_cleaned_operation_id(operation: Operation, path: str, method: str) -> str: """Get a cleaned operation id from an operation id.""" operation_id = operation.operationId if operation_id is None: # Replace all punctuation of any kind with underscore path = re.sub(r"[^a-zA-Z0-9]", "_", path.lstrip("/")) operation_id = f"{path}_{method}" return operation_id.replace("-", "_").replace(".", "_").replace("/", "_") By Harrison Chase
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/openapi_utils.html
7732bf78ff52-6
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/openapi_utils.html
f69de0bf9734-0
Source code for langchain.tools.openapi.utils.api_models """Pydantic models for parsing an OpenAPI spec.""" import logging from enum import Enum from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union from openapi_schema_pydantic import MediaType, Parameter, Reference, RequestBody, Schema from pydantic import BaseModel, Field from langchain.tools.openapi.utils.openapi_utils import HTTPVerb, OpenAPISpec logger = logging.getLogger(__name__) PRIMITIVE_TYPES = { "integer": int, "number": float, "string": str, "boolean": bool, "array": List, "object": Dict, "null": None, } # See https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#parameterIn # for more info. class APIPropertyLocation(Enum): """The location of the property.""" QUERY = "query" PATH = "path" HEADER = "header" COOKIE = "cookie" # Not yet supported @classmethod def from_str(cls, location: str) -> "APIPropertyLocation": """Parse an APIPropertyLocation.""" try: return cls(location) except ValueError: raise ValueError( f"Invalid APIPropertyLocation. Valid values are {cls.__members__}" ) _SUPPORTED_MEDIA_TYPES = ("application/json",) SUPPORTED_LOCATIONS = { APIPropertyLocation.QUERY, APIPropertyLocation.PATH, } INVALID_LOCATION_TEMPL = ( 'Unsupported APIPropertyLocation "{location}"' " for parameter {name}. " + f"Valid values are {[loc.value for loc in SUPPORTED_LOCATIONS]}" )
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
f69de0bf9734-1
+ f"Valid values are {[loc.value for loc in SUPPORTED_LOCATIONS]}" ) SCHEMA_TYPE = Union[str, Type, tuple, None, Enum] class APIPropertyBase(BaseModel): """Base model for an API property.""" # The name of the parameter is required and is case sensitive. # If "in" is "path", the "name" field must correspond to a template expression # within the path field in the Paths Object. # If "in" is "header" and the "name" field is "Accept", "Content-Type", # or "Authorization", the parameter definition is ignored. # For all other cases, the "name" corresponds to the parameter # name used by the "in" property. name: str = Field(alias="name") """The name of the property.""" required: bool = Field(alias="required") """Whether the property is required.""" type: SCHEMA_TYPE = Field(alias="type") """The type of the property. Either a primitive type, a component/parameter type, or an array or 'object' (dict) of the above.""" default: Optional[Any] = Field(alias="default", default=None) """The default value of the property.""" description: Optional[str] = Field(alias="description", default=None) """The description of the property.""" class APIProperty(APIPropertyBase): """A model for a property in the query, path, header, or cookie params.""" location: APIPropertyLocation = Field(alias="location") """The path/how it's being passed to the endpoint.""" @staticmethod def _cast_schema_list_type(schema: Schema) -> Optional[Union[str, Tuple[str, ...]]]:
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
f69de0bf9734-2
type_ = schema.type if not isinstance(type_, list): return type_ else: return tuple(type_) @staticmethod def _get_schema_type_for_enum(parameter: Parameter, schema: Schema) -> Enum: """Get the schema type when the parameter is an enum.""" param_name = f"{parameter.name}Enum" return Enum(param_name, {str(v): v for v in schema.enum}) @staticmethod def _get_schema_type_for_array( schema: Schema, ) -> Optional[Union[str, Tuple[str, ...]]]: items = schema.items if isinstance(items, Schema): schema_type = APIProperty._cast_schema_list_type(items) elif isinstance(items, Reference): ref_name = items.ref.split("/")[-1] schema_type = ref_name # TODO: Add ref definitions to make his valid else: raise ValueError(f"Unsupported array items: {items}") if isinstance(schema_type, str): # TODO: recurse schema_type = (schema_type,) return schema_type @staticmethod def _get_schema_type(parameter: Parameter, schema: Optional[Schema]) -> SCHEMA_TYPE: if schema is None: return None schema_type: SCHEMA_TYPE = APIProperty._cast_schema_list_type(schema) if schema_type == "array": schema_type = APIProperty._get_schema_type_for_array(schema) elif schema_type == "object": # TODO: Resolve array and object types to components. raise NotImplementedError("Objects not yet supported") elif schema_type in PRIMITIVE_TYPES: if schema.enum: schema_type = APIProperty._get_schema_type_for_enum(parameter, schema) else:
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
f69de0bf9734-3
schema_type = APIProperty._get_schema_type_for_enum(parameter, schema) else: # Directly use the primitive type pass else: raise NotImplementedError(f"Unsupported type: {schema_type}") return schema_type @staticmethod def _validate_location(location: APIPropertyLocation, name: str) -> None: if location not in SUPPORTED_LOCATIONS: raise NotImplementedError( INVALID_LOCATION_TEMPL.format(location=location, name=name) ) @staticmethod def _validate_content(content: Optional[Dict[str, MediaType]]) -> None: if content: raise ValueError( "API Properties with media content not supported. " "Media content only supported within APIRequestBodyProperty's" ) @staticmethod def _get_schema(parameter: Parameter, spec: OpenAPISpec) -> Optional[Schema]: schema = parameter.param_schema if isinstance(schema, Reference): schema = spec.get_referenced_schema(schema) elif schema is None: return None elif not isinstance(schema, Schema): raise ValueError(f"Error dereferencing schema: {schema}") return schema @staticmethod def is_supported_location(location: str) -> bool: """Return whether the provided location is supported.""" try: return APIPropertyLocation.from_str(location) in SUPPORTED_LOCATIONS except ValueError: return False @classmethod def from_parameter(cls, parameter: Parameter, spec: OpenAPISpec) -> "APIProperty": """Instantiate from an OpenAPI Parameter.""" location = APIPropertyLocation.from_str(parameter.param_in) cls._validate_location( location, parameter.name, ) cls._validate_content(parameter.content)
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
f69de0bf9734-4
location, parameter.name, ) cls._validate_content(parameter.content) schema = cls._get_schema(parameter, spec) schema_type = cls._get_schema_type(parameter, schema) default_val = schema.default if schema is not None else None return cls( name=parameter.name, location=location, default=default_val, description=parameter.description, required=parameter.required, type=schema_type, ) class APIRequestBodyProperty(APIPropertyBase): """A model for a request body property.""" properties: List["APIRequestBodyProperty"] = Field(alias="properties") """The sub-properties of the property.""" # This is useful for handling nested property cycles. # We can define separate types in that case. references_used: List[str] = Field(alias="references_used") """The references used by the property.""" @classmethod def _process_object_schema( cls, schema: Schema, spec: OpenAPISpec, references_used: List[str] ) -> Tuple[Union[str, List[str], None], List["APIRequestBodyProperty"]]: properties = [] required_props = schema.required or [] if schema.properties is None: raise ValueError( f"No properties found when processing object schema: {schema}" ) for prop_name, prop_schema in schema.properties.items(): if isinstance(prop_schema, Reference): ref_name = prop_schema.ref.split("/")[-1] if ref_name not in references_used: references_used.append(ref_name) prop_schema = spec.get_referenced_schema(prop_schema) else: continue properties.append( cls.from_schema( schema=prop_schema, name=prop_name,
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
f69de0bf9734-5
cls.from_schema( schema=prop_schema, name=prop_name, required=prop_name in required_props, spec=spec, references_used=references_used, ) ) return schema.type, properties @classmethod def _process_array_schema( cls, schema: Schema, name: str, spec: OpenAPISpec, references_used: List[str] ) -> str: items = schema.items if items is not None: if isinstance(items, Reference): ref_name = items.ref.split("/")[-1] if ref_name not in references_used: references_used.append(ref_name) items = spec.get_referenced_schema(items) else: pass return f"Array<{ref_name}>" else: pass if isinstance(items, Schema): array_type = cls.from_schema( schema=items, name=f"{name}Item", required=True, # TODO: Add required spec=spec, references_used=references_used, ) return f"Array<{array_type.type}>" return "array" @classmethod def from_schema( cls, schema: Schema, name: str, required: bool, spec: OpenAPISpec, references_used: Optional[List[str]] = None, ) -> "APIRequestBodyProperty": """Recursively populate from an OpenAPI Schema.""" if references_used is None: references_used = [] schema_type = schema.type properties: List[APIRequestBodyProperty] = [] if schema_type == "object" and schema.properties: schema_type, properties = cls._process_object_schema(
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
f69de0bf9734-6
schema_type, properties = cls._process_object_schema( schema, spec, references_used ) elif schema_type == "array": schema_type = cls._process_array_schema(schema, name, spec, references_used) elif schema_type in PRIMITIVE_TYPES: # Use the primitive type directly pass elif schema_type is None: # No typing specified/parsed. WIll map to 'any' pass else: raise ValueError(f"Unsupported type: {schema_type}") return cls( name=name, required=required, type=schema_type, default=schema.default, description=schema.description, properties=properties, references_used=references_used, ) class APIRequestBody(BaseModel): """A model for a request body.""" description: Optional[str] = Field(alias="description") """The description of the request body.""" properties: List[APIRequestBodyProperty] = Field(alias="properties") # E.g., application/json - we only support JSON at the moment. media_type: str = Field(alias="media_type") """The media type of the request body.""" @classmethod def _process_supported_media_type( cls, media_type_obj: MediaType, spec: OpenAPISpec, ) -> List[APIRequestBodyProperty]: """Process the media type of the request body.""" references_used = [] schema = media_type_obj.media_type_schema if isinstance(schema, Reference): references_used.append(schema.ref.split("/")[-1]) schema = spec.get_referenced_schema(schema) if schema is None: raise ValueError( f"Could not resolve schema for media type: {media_type_obj}"
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
f69de0bf9734-7
f"Could not resolve schema for media type: {media_type_obj}" ) api_request_body_properties = [] required_properties = schema.required or [] if schema.type == "object" and schema.properties: for prop_name, prop_schema in schema.properties.items(): if isinstance(prop_schema, Reference): prop_schema = spec.get_referenced_schema(prop_schema) api_request_body_properties.append( APIRequestBodyProperty.from_schema( schema=prop_schema, name=prop_name, required=prop_name in required_properties, spec=spec, ) ) else: api_request_body_properties.append( APIRequestBodyProperty( name="body", required=True, type=schema.type, default=schema.default, description=schema.description, properties=[], references_used=references_used, ) ) return api_request_body_properties @classmethod def from_request_body( cls, request_body: RequestBody, spec: OpenAPISpec ) -> "APIRequestBody": """Instantiate from an OpenAPI RequestBody.""" properties = [] for media_type, media_type_obj in request_body.content.items(): if media_type not in _SUPPORTED_MEDIA_TYPES: continue api_request_body_properties = cls._process_supported_media_type( media_type_obj, spec, ) properties.extend(api_request_body_properties) return cls( description=request_body.description, properties=properties, media_type=media_type, ) [docs]class APIOperation(BaseModel): """A model for a single API operation.""" operation_id: str = Field(alias="operation_id")
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
f69de0bf9734-8
operation_id: str = Field(alias="operation_id") """The unique identifier of the operation.""" description: Optional[str] = Field(alias="description") """The description of the operation.""" base_url: str = Field(alias="base_url") """The base URL of the operation.""" path: str = Field(alias="path") """The path of the operation.""" method: HTTPVerb = Field(alias="method") """The HTTP method of the operation.""" properties: Sequence[APIProperty] = Field(alias="properties") # TODO: Add parse in used components to be able to specify what type of # referenced object it is. # """The properties of the operation.""" # components: Dict[str, BaseModel] = Field(alias="components") request_body: Optional[APIRequestBody] = Field(alias="request_body") """The request body of the operation.""" @staticmethod def _get_properties_from_parameters( parameters: List[Parameter], spec: OpenAPISpec ) -> List[APIProperty]: """Get the properties of the operation.""" properties = [] for param in parameters: if APIProperty.is_supported_location(param.param_in): properties.append(APIProperty.from_parameter(param, spec)) elif param.required: raise ValueError( INVALID_LOCATION_TEMPL.format( location=param.param_in, name=param.name ) ) else: logger.warning( INVALID_LOCATION_TEMPL.format( location=param.param_in, name=param.name ) + " Ignoring optional parameter" ) pass return properties [docs] @classmethod def from_openapi_url( cls, spec_url: str,
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
f69de0bf9734-9
def from_openapi_url( cls, spec_url: str, path: str, method: str, ) -> "APIOperation": """Create an APIOperation from an OpenAPI URL.""" spec = OpenAPISpec.from_url(spec_url) return cls.from_openapi_spec(spec, path, method) [docs] @classmethod def from_openapi_spec( cls, spec: OpenAPISpec, path: str, method: str, ) -> "APIOperation": """Create an APIOperation from an OpenAPI spec.""" operation = spec.get_operation(path, method) parameters = spec.get_parameters_for_operation(operation) properties = cls._get_properties_from_parameters(parameters, spec) operation_id = OpenAPISpec.get_cleaned_operation_id(operation, path, method) request_body = spec.get_request_body_for_operation(operation) api_request_body = ( APIRequestBody.from_request_body(request_body, spec) if request_body is not None else None ) description = operation.description or operation.summary if not description and spec.paths is not None: description = spec.paths[path].description or spec.paths[path].summary return cls( operation_id=operation_id, description=description, base_url=spec.base_url, path=path, method=method, properties=properties, request_body=api_request_body, ) [docs] @staticmethod def ts_type_from_python(type_: SCHEMA_TYPE) -> str: if type_ is None: # TODO: Handle Nones better. These often result when # parsing specs that are < v3 return "any"
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
f69de0bf9734-10
# parsing specs that are < v3 return "any" elif isinstance(type_, str): return { "str": "string", "integer": "number", "float": "number", "date-time": "string", }.get(type_, type_) elif isinstance(type_, tuple): return f"Array<{APIOperation.ts_type_from_python(type_[0])}>" elif isinstance(type_, type) and issubclass(type_, Enum): return " | ".join([f"'{e.value}'" for e in type_]) else: return str(type_) def _format_nested_properties( self, properties: List[APIRequestBodyProperty], indent: int = 2 ) -> str: """Format nested properties.""" formatted_props = [] for prop in properties: prop_name = prop.name prop_type = self.ts_type_from_python(prop.type) prop_required = "" if prop.required else "?" prop_desc = f"/* {prop.description} */" if prop.description else "" if prop.properties: nested_props = self._format_nested_properties( prop.properties, indent + 2 ) prop_type = f"{{\n{nested_props}\n{' ' * indent}}}" formatted_props.append( f"{prop_desc}\n{' ' * indent}{prop_name}{prop_required}: {prop_type}," ) return "\n".join(formatted_props) [docs] def to_typescript(self) -> str: """Get typescript string representation of the operation.""" operation_name = self.operation_id params = [] if self.request_body: formatted_request_body_props = self._format_nested_properties(
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
f69de0bf9734-11
if self.request_body: formatted_request_body_props = self._format_nested_properties( self.request_body.properties ) params.append(formatted_request_body_props) for prop in self.properties: prop_name = prop.name prop_type = self.ts_type_from_python(prop.type) prop_required = "" if prop.required else "?" prop_desc = f"/* {prop.description} */" if prop.description else "" params.append(f"{prop_desc}\n\t\t{prop_name}{prop_required}: {prop_type},") formatted_params = "\n".join(params).strip() description_str = f"/* {self.description} */" if self.description else "" typescript_definition = f""" {description_str} type {operation_name} = (_: {{ {formatted_params} }}) => any; """ return typescript_definition.strip() @property def query_params(self) -> List[str]: return [ property.name for property in self.properties if property.location == APIPropertyLocation.QUERY ] @property def path_params(self) -> List[str]: return [ property.name for property in self.properties if property.location == APIPropertyLocation.PATH ] @property def body_params(self) -> List[str]: if self.request_body is None: return [] return [prop.name for prop in self.request_body.properties] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
981439a6b2ab-0
Source code for langchain.tools.wolfram_alpha.tool """Tool for the Wolfram Alpha API.""" from typing import Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper [docs]class WolframAlphaQueryRun(BaseTool): """Tool that adds the capability to query using the Wolfram Alpha SDK.""" name = "Wolfram Alpha" description = ( "A wrapper around Wolfram Alpha. " "Useful for when you need to answer questions about Math, " "Science, Technology, Culture, Society and Everyday Life. " "Input should be a search query." ) api_wrapper: WolframAlphaAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the WolframAlpha tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the WolframAlpha tool asynchronously.""" raise NotImplementedError("WolframAlphaQueryRun does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/wolfram_alpha/tool.html
87575b4659ec-0
Source code for langchain.tools.metaphor_search.tool """Tool for the Metaphor search API.""" from typing import Dict, List, Optional, Union from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper [docs]class MetaphorSearchResults(BaseTool): """Tool that has capability to query the Metaphor Search API and get back json.""" name = "Metaphor Search Results JSON" description = ( "A wrapper around Metaphor Search. " "Input should be a Metaphor-optimized query. " "Output is a JSON array of the query results" ) api_wrapper: MetaphorSearchAPIWrapper def _run( self, query: str, num_results: int, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> Union[List[Dict], str]: """Use the tool.""" try: return self.api_wrapper.results(query, num_results) except Exception as e: return repr(e) async def _arun( self, query: str, num_results: int, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> Union[List[Dict], str]: """Use the tool asynchronously.""" try: return await self.api_wrapper.results_async(query, num_results) except Exception as e: return repr(e) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/metaphor_search/tool.html
2ca6befb1ad0-0
Source code for langchain.tools.google_places.tool """Tool for the Google search API.""" from typing import Optional from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.google_places_api import GooglePlacesAPIWrapper class GooglePlacesSchema(BaseModel): query: str = Field(..., description="Query for goole maps") [docs]class GooglePlacesTool(BaseTool): """Tool that adds the capability to query the Google places API.""" name = "Google Places" description = ( "A wrapper around Google Places. " "Useful for when you need to validate or " "discover addressed from ambiguous text. " "Input should be a search query." ) api_wrapper: GooglePlacesAPIWrapper = Field(default_factory=GooglePlacesAPIWrapper) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("GooglePlacesRun does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/google_places/tool.html
5a6f50ed84da-0
Source code for langchain.tools.scenexplain.tool """Tool for the SceneXplain API.""" from typing import Optional from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.scenexplain import SceneXplainAPIWrapper class SceneXplainInput(BaseModel): """Input for SceneXplain.""" query: str = Field(..., description="The link to the image to explain") [docs]class SceneXplainTool(BaseTool): """Tool that adds the capability to explain images.""" name = "Image Explainer" description = ( "An Image Captioning Tool: Use this tool to generate a detailed caption " "for an image. The input can be an image file of any format, and " "the output will be a text description that covers every detail of the image." ) api_wrapper: SceneXplainAPIWrapper = Field(default_factory=SceneXplainAPIWrapper) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None ) -> str: """Use the tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("SceneXplainTool does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/scenexplain/tool.html
fbbd7958f8c4-0
Source code for langchain.tools.google_search.tool """Tool for the Google search API.""" from typing import Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.google_search import GoogleSearchAPIWrapper [docs]class GoogleSearchRun(BaseTool): """Tool that adds the capability to query the Google search API.""" name = "Google Search" description = ( "A wrapper around Google Search. " "Useful for when you need to answer questions about current events. " "Input should be a search query." ) api_wrapper: GoogleSearchAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("GoogleSearchRun does not support async") [docs]class GoogleSearchResults(BaseTool): """Tool that has capability to query the Google Search API and get back json.""" name = "Google Search Results JSON" description = ( "A wrapper around Google Search. " "Useful for when you need to answer questions about current events. " "Input should be a search query. Output is a JSON array of the query results" ) num_results: int = 4 api_wrapper: GoogleSearchAPIWrapper def _run( self,
https://python.langchain.com/en/latest/_modules/langchain/tools/google_search/tool.html
fbbd7958f8c4-1
api_wrapper: GoogleSearchAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return str(self.api_wrapper.results(query, self.num_results)) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("GoogleSearchRun does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/google_search/tool.html
42762cd8fdcf-0
Source code for langchain.tools.wikipedia.tool """Tool for the Wikipedia API.""" from typing import Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.wikipedia import WikipediaAPIWrapper [docs]class WikipediaQueryRun(BaseTool): """Tool that adds the capability to search using the Wikipedia API.""" name = "Wikipedia" description = ( "A wrapper around Wikipedia. " "Useful for when you need to answer general questions about " "people, places, companies, facts, historical events, or other subjects. " "Input should be a search query." ) api_wrapper: WikipediaAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Wikipedia tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Wikipedia tool asynchronously.""" raise NotImplementedError("WikipediaQueryRun does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/wikipedia/tool.html
ff3c640f25cd-0
Source code for langchain.tools.azure_cognitive_services.image_analysis from __future__ import annotations import logging from typing import Any, Dict, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.azure_cognitive_services.utils import detect_file_src_type from langchain.tools.base import BaseTool from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureCogsImageAnalysisTool(BaseTool): """Tool that queries the Azure Cognitive Services Image Analysis API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/cognitive-services/computer-vision/quickstarts-sdk/image-analysis-client-library-40 """ azure_cogs_key: str = "" #: :meta private: azure_cogs_endpoint: str = "" #: :meta private: vision_service: Any #: :meta private: analysis_options: Any #: :meta private: name = "Azure Cognitive Services Image Analysis" description = ( "A wrapper around Azure Cognitive Services Image Analysis. " "Useful for when you need to analyze images. " "Input should be a url to an image." ) @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( values, "azure_cogs_key", "AZURE_COGS_KEY" ) azure_cogs_endpoint = get_from_dict_or_env( values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT" )
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/image_analysis.html
ff3c640f25cd-1
values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT" ) try: import azure.ai.vision as sdk values["vision_service"] = sdk.VisionServiceOptions( endpoint=azure_cogs_endpoint, key=azure_cogs_key ) values["analysis_options"] = sdk.ImageAnalysisOptions() values["analysis_options"].features = ( sdk.ImageAnalysisFeature.CAPTION | sdk.ImageAnalysisFeature.OBJECTS | sdk.ImageAnalysisFeature.TAGS | sdk.ImageAnalysisFeature.TEXT ) except ImportError: raise ImportError( "azure-ai-vision is not installed. " "Run `pip install azure-ai-vision` to install." ) return values def _image_analysis(self, image_path: str) -> Dict: try: import azure.ai.vision as sdk except ImportError: pass image_src_type = detect_file_src_type(image_path) if image_src_type == "local": vision_source = sdk.VisionSource(filename=image_path) elif image_src_type == "remote": vision_source = sdk.VisionSource(url=image_path) else: raise ValueError(f"Invalid image path: {image_path}") image_analyzer = sdk.ImageAnalyzer( self.vision_service, vision_source, self.analysis_options ) result = image_analyzer.analyze() res_dict = {} if result.reason == sdk.ImageAnalysisResultReason.ANALYZED: if result.caption is not None: res_dict["caption"] = result.caption.content if result.objects is not None: res_dict["objects"] = [obj.name for obj in result.objects] if result.tags is not None:
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/image_analysis.html
ff3c640f25cd-2
if result.tags is not None: res_dict["tags"] = [tag.name for tag in result.tags] if result.text is not None: res_dict["text"] = [line.content for line in result.text.lines] else: error_details = sdk.ImageAnalysisErrorDetails.from_result(result) raise RuntimeError( f"Image analysis failed.\n" f"Reason: {error_details.reason}\n" f"Details: {error_details.message}" ) return res_dict def _format_image_analysis_result(self, image_analysis_result: Dict) -> str: formatted_result = [] if "caption" in image_analysis_result: formatted_result.append("Caption: " + image_analysis_result["caption"]) if ( "objects" in image_analysis_result and len(image_analysis_result["objects"]) > 0 ): formatted_result.append( "Objects: " + ", ".join(image_analysis_result["objects"]) ) if "tags" in image_analysis_result and len(image_analysis_result["tags"]) > 0: formatted_result.append("Tags: " + ", ".join(image_analysis_result["tags"])) if "text" in image_analysis_result and len(image_analysis_result["text"]) > 0: formatted_result.append("Text: " + ", ".join(image_analysis_result["text"])) return "\n".join(formatted_result) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try: image_analysis_result = self._image_analysis(query) if not image_analysis_result: return "No good image analysis result was found"
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/image_analysis.html
ff3c640f25cd-3
if not image_analysis_result: return "No good image analysis result was found" return self._format_image_analysis_result(image_analysis_result) except Exception as e: raise RuntimeError(f"Error while running AzureCogsImageAnalysisTool: {e}") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("AzureCogsImageAnalysisTool does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/image_analysis.html
84a933040928-0
Source code for langchain.tools.azure_cognitive_services.form_recognizer from __future__ import annotations import logging from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.azure_cognitive_services.utils import detect_file_src_type from langchain.tools.base import BaseTool from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureCogsFormRecognizerTool(BaseTool): """Tool that queries the Azure Cognitive Services Form Recognizer API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/applied-ai-services/form-recognizer/quickstarts/get-started-sdks-rest-api?view=form-recog-3.0.0&pivots=programming-language-python """ azure_cogs_key: str = "" #: :meta private: azure_cogs_endpoint: str = "" #: :meta private: doc_analysis_client: Any #: :meta private: name = "Azure Cognitive Services Form Recognizer" description = ( "A wrapper around Azure Cognitive Services Form Recognizer. " "Useful for when you need to " "extract text, tables, and key-value pairs from documents. " "Input should be a url to a document." ) @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( values, "azure_cogs_key", "AZURE_COGS_KEY" )
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html
84a933040928-1
values, "azure_cogs_key", "AZURE_COGS_KEY" ) azure_cogs_endpoint = get_from_dict_or_env( values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT" ) try: from azure.ai.formrecognizer import DocumentAnalysisClient from azure.core.credentials import AzureKeyCredential values["doc_analysis_client"] = DocumentAnalysisClient( endpoint=azure_cogs_endpoint, credential=AzureKeyCredential(azure_cogs_key), ) except ImportError: raise ImportError( "azure-ai-formrecognizer is not installed. " "Run `pip install azure-ai-formrecognizer` to install." ) return values def _parse_tables(self, tables: List[Any]) -> List[Any]: result = [] for table in tables: rc, cc = table.row_count, table.column_count _table = [["" for _ in range(cc)] for _ in range(rc)] for cell in table.cells: _table[cell.row_index][cell.column_index] = cell.content result.append(_table) return result def _parse_kv_pairs(self, kv_pairs: List[Any]) -> List[Any]: result = [] for kv_pair in kv_pairs: key = kv_pair.key.content if kv_pair.key else "" value = kv_pair.value.content if kv_pair.value else "" result.append((key, value)) return result def _document_analysis(self, document_path: str) -> Dict: document_src_type = detect_file_src_type(document_path) if document_src_type == "local": with open(document_path, "rb") as document:
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html
84a933040928-2
with open(document_path, "rb") as document: poller = self.doc_analysis_client.begin_analyze_document( "prebuilt-document", document ) elif document_src_type == "remote": poller = self.doc_analysis_client.begin_analyze_document_from_url( "prebuilt-document", document_path ) else: raise ValueError(f"Invalid document path: {document_path}") result = poller.result() res_dict = {} if result.content is not None: res_dict["content"] = result.content if result.tables is not None: res_dict["tables"] = self._parse_tables(result.tables) if result.key_value_pairs is not None: res_dict["key_value_pairs"] = self._parse_kv_pairs(result.key_value_pairs) return res_dict def _format_document_analysis_result(self, document_analysis_result: Dict) -> str: formatted_result = [] if "content" in document_analysis_result: formatted_result.append( f"Content: {document_analysis_result['content']}".replace("\n", " ") ) if "tables" in document_analysis_result: for i, table in enumerate(document_analysis_result["tables"]): formatted_result.append(f"Table {i}: {table}".replace("\n", " ")) if "key_value_pairs" in document_analysis_result: for kv_pair in document_analysis_result["key_value_pairs"]: formatted_result.append( f"{kv_pair[0]}: {kv_pair[1]}".replace("\n", " ") ) return "\n".join(formatted_result) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None,
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html
84a933040928-3
run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try: document_analysis_result = self._document_analysis(query) if not document_analysis_result: return "No good document analysis result was found" return self._format_document_analysis_result(document_analysis_result) except Exception as e: raise RuntimeError(f"Error while running AzureCogsFormRecognizerTool: {e}") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("AzureCogsFormRecognizerTool does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html
254f2fc40507-0
Source code for langchain.tools.azure_cognitive_services.speech2text from __future__ import annotations import logging import time from typing import Any, Dict, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.azure_cognitive_services.utils import ( detect_file_src_type, download_audio_from_url, ) from langchain.tools.base import BaseTool from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureCogsSpeech2TextTool(BaseTool): """Tool that queries the Azure Cognitive Services Speech2Text API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-speech-to-text?pivots=programming-language-python """ azure_cogs_key: str = "" #: :meta private: azure_cogs_region: str = "" #: :meta private: speech_language: str = "en-US" #: :meta private: speech_config: Any #: :meta private: name = "Azure Cognitive Services Speech2Text" description = ( "A wrapper around Azure Cognitive Services Speech2Text. " "Useful for when you need to transcribe audio to text. " "Input should be a url to an audio file." ) @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( values, "azure_cogs_key", "AZURE_COGS_KEY" )
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/speech2text.html
254f2fc40507-1
values, "azure_cogs_key", "AZURE_COGS_KEY" ) azure_cogs_region = get_from_dict_or_env( values, "azure_cogs_region", "AZURE_COGS_REGION" ) try: import azure.cognitiveservices.speech as speechsdk values["speech_config"] = speechsdk.SpeechConfig( subscription=azure_cogs_key, region=azure_cogs_region ) except ImportError: raise ImportError( "azure-cognitiveservices-speech is not installed. " "Run `pip install azure-cognitiveservices-speech` to install." ) return values def _continuous_recognize(self, speech_recognizer: Any) -> str: done = False text = "" def stop_cb(evt: Any) -> None: """callback that stop continuous recognition""" speech_recognizer.stop_continuous_recognition_async() nonlocal done done = True def retrieve_cb(evt: Any) -> None: """callback that retrieves the intermediate recognition results""" nonlocal text text += evt.result.text # retrieve text on recognized events speech_recognizer.recognized.connect(retrieve_cb) # stop continuous recognition on either session stopped or canceled events speech_recognizer.session_stopped.connect(stop_cb) speech_recognizer.canceled.connect(stop_cb) # Start continuous speech recognition speech_recognizer.start_continuous_recognition_async() while not done: time.sleep(0.5) return text def _speech2text(self, audio_path: str, speech_language: str) -> str: try: import azure.cognitiveservices.speech as speechsdk
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/speech2text.html
254f2fc40507-2
try: import azure.cognitiveservices.speech as speechsdk except ImportError: pass audio_src_type = detect_file_src_type(audio_path) if audio_src_type == "local": audio_config = speechsdk.AudioConfig(filename=audio_path) elif audio_src_type == "remote": tmp_audio_path = download_audio_from_url(audio_path) audio_config = speechsdk.AudioConfig(filename=tmp_audio_path) else: raise ValueError(f"Invalid audio path: {audio_path}") self.speech_config.speech_recognition_language = speech_language speech_recognizer = speechsdk.SpeechRecognizer(self.speech_config, audio_config) return self._continuous_recognize(speech_recognizer) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try: text = self._speech2text(query, self.speech_language) return text except Exception as e: raise RuntimeError(f"Error while running AzureCogsSpeech2TextTool: {e}") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("AzureCogsSpeech2TextTool does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/speech2text.html
d9d9ec7b7859-0
Source code for langchain.tools.azure_cognitive_services.text2speech from __future__ import annotations import logging import tempfile from typing import Any, Dict, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureCogsText2SpeechTool(BaseTool): """Tool that queries the Azure Cognitive Services Text2Speech API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-text-to-speech?pivots=programming-language-python """ azure_cogs_key: str = "" #: :meta private: azure_cogs_region: str = "" #: :meta private: speech_language: str = "en-US" #: :meta private: speech_config: Any #: :meta private: name = "Azure Cognitive Services Text2Speech" description = ( "A wrapper around Azure Cognitive Services Text2Speech. " "Useful for when you need to convert text to speech. " ) @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( values, "azure_cogs_key", "AZURE_COGS_KEY" ) azure_cogs_region = get_from_dict_or_env( values, "azure_cogs_region", "AZURE_COGS_REGION" ) try:
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/text2speech.html
d9d9ec7b7859-1
) try: import azure.cognitiveservices.speech as speechsdk values["speech_config"] = speechsdk.SpeechConfig( subscription=azure_cogs_key, region=azure_cogs_region ) except ImportError: raise ImportError( "azure-cognitiveservices-speech is not installed. " "Run `pip install azure-cognitiveservices-speech` to install." ) return values def _text2speech(self, text: str, speech_language: str) -> str: try: import azure.cognitiveservices.speech as speechsdk except ImportError: pass self.speech_config.speech_synthesis_language = speech_language speech_synthesizer = speechsdk.SpeechSynthesizer( speech_config=self.speech_config, audio_config=None ) result = speech_synthesizer.speak_text(text) if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted: stream = speechsdk.AudioDataStream(result) with tempfile.NamedTemporaryFile( mode="wb", suffix=".wav", delete=False ) as f: stream.save_to_wav_file(f.name) return f.name elif result.reason == speechsdk.ResultReason.Canceled: cancellation_details = result.cancellation_details logger.debug(f"Speech synthesis canceled: {cancellation_details.reason}") if cancellation_details.reason == speechsdk.CancellationReason.Error: raise RuntimeError( f"Speech synthesis error: {cancellation_details.error_details}" ) return "Speech synthesis canceled." else: return f"Speech synthesis failed: {result.reason}" def _run( self, query: str,
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/text2speech.html
d9d9ec7b7859-2
def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try: speech_file = self._text2speech(query, self.speech_language) return speech_file except Exception as e: raise RuntimeError(f"Error while running AzureCogsText2SpeechTool: {e}") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("AzureCogsText2SpeechTool does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/text2speech.html
8201f21afebc-0
Source code for langchain.tools.youtube.search """ Adapted from https://github.com/venuv/langchain_yt_tools CustomYTSearchTool searches YouTube videos related to a person and returns a specified number of video URLs. Input to this tool should be a comma separated list, - the first part contains a person name - and the second(optional) a number that is the maximum number of video results to return """ import json from typing import Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools import BaseTool [docs]class YouTubeSearchTool(BaseTool): name = "YouTubeSearch" description = ( "search for youtube videos associated with a person. " "the input to this tool should be a comma separated list, " "the first part contains a person name and the second a " "number that is the maximum number of video results " "to return aka num_results. the second part is optional" ) def _search(self, person: str, num_results: int) -> str: from youtube_search import YoutubeSearch results = YoutubeSearch(person, num_results).to_json() data = json.loads(results) url_suffix_list = [video["url_suffix"] for video in data["videos"]] return str(url_suffix_list) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" values = query.split(",") person = values[0] if len(values) > 1: num_results = int(values[1]) else:
https://python.langchain.com/en/latest/_modules/langchain/tools/youtube/search.html
8201f21afebc-1
num_results = int(values[1]) else: num_results = 2 return self._search(person, num_results) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("YouTubeSearchTool does not yet support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/youtube/search.html
36c01f669f60-0
Source code for langchain.tools.shell.tool import asyncio import platform import warnings from typing import List, Optional, Type, Union from pydantic import BaseModel, Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.bash import BashProcess class ShellInput(BaseModel): """Commands for the Bash Shell tool.""" commands: Union[str, List[str]] = Field( ..., description="List of shell commands to run. Deserialized using json.loads", ) """List of shell commands to run.""" @root_validator def _validate_commands(cls, values: dict) -> dict: """Validate commands.""" # TODO: Add real validators commands = values.get("commands") if not isinstance(commands, list): values["commands"] = [commands] # Warn that the bash tool is not safe warnings.warn( "The shell tool has no safeguards by default. Use at your own risk." ) return values def _get_default_bash_processs() -> BashProcess: """Get file path from string.""" return BashProcess(return_err_output=True) def _get_platform() -> str: """Get platform.""" system = platform.system() if system == "Darwin": return "MacOS" return system [docs]class ShellTool(BaseTool): """Tool to run shell commands.""" process: BashProcess = Field(default_factory=_get_default_bash_processs) """Bash process to run commands.""" name: str = "terminal" """Name of tool."""
https://python.langchain.com/en/latest/_modules/langchain/tools/shell/tool.html
36c01f669f60-1
name: str = "terminal" """Name of tool.""" description: str = f"Run shell commands on this {_get_platform()} machine." """Description of tool.""" args_schema: Type[BaseModel] = ShellInput """Schema for input arguments.""" def _run( self, commands: Union[str, List[str]], run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Run commands and return final output.""" return self.process.run(commands) async def _arun( self, commands: Union[str, List[str]], run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Run commands asynchronously and return final output.""" return await asyncio.get_event_loop().run_in_executor( None, self.process.run, commands ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/shell/tool.html
ffe2e6ab562f-0
Source code for langchain.tools.human.tool """Tool for asking human input.""" from typing import Callable, Optional from pydantic import Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool def _print_func(text: str) -> None: print("\n") print(text) [docs]class HumanInputRun(BaseTool): """Tool that adds the capability to ask user for input.""" name = "Human" description = ( "You can ask a human for guidance when you think you " "got stuck or you are not sure what to do next. " "The input should be a question for the human." ) prompt_func: Callable[[str], None] = Field(default_factory=lambda: _print_func) input_func: Callable = Field(default_factory=lambda: input) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Human input tool.""" self.prompt_func(query) return self.input_func() async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Human tool asynchronously.""" raise NotImplementedError("Human tool does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/human/tool.html
e42146565dd3-0
Source code for langchain.tools.google_serper.tool """Tool for the Serper.dev Google Search API.""" from typing import Optional from pydantic.fields import Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.google_serper import GoogleSerperAPIWrapper [docs]class GoogleSerperRun(BaseTool): """Tool that adds the capability to query the Serper.dev Google search API.""" name = "Google Serper" description = ( "A low-cost Google Search API." "Useful for when you need to answer questions about current events." "Input should be a search query." ) api_wrapper: GoogleSerperAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return str(self.api_wrapper.run(query)) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" return (await self.api_wrapper.arun(query)).__str__() [docs]class GoogleSerperResults(BaseTool): """Tool that has capability to query the Serper.dev Google Search API and get back json.""" name = "Google Serrper Results JSON" description = ( "A low-cost Google Search API." "Useful for when you need to answer questions about current events." "Input should be a search query. Output is a JSON object of the query results" )
https://python.langchain.com/en/latest/_modules/langchain/tools/google_serper/tool.html
e42146565dd3-1
) api_wrapper: GoogleSerperAPIWrapper = Field(default_factory=GoogleSerperAPIWrapper) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return str(self.api_wrapper.results(query)) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" return (await self.api_wrapper.aresults(query)).__str__() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/google_serper/tool.html
74faef60f13c-0
Source code for langchain.tools.zapier.tool """## Zapier Natural Language Actions API \ Full docs here: https://nla.zapier.com/api/v1/docs **Zapier Natural Language Actions** gives you access to the 5k+ apps, 20k+ actions on Zapier's platform through a natural language API interface. NLA supports apps like Gmail, Salesforce, Trello, Slack, Asana, HubSpot, Google Sheets, Microsoft Teams, and thousands more apps: https://zapier.com/apps Zapier NLA handles ALL the underlying API auth and translation from natural language --> underlying API call --> return simplified output for LLMs The key idea is you, or your users, expose a set of actions via an oauth-like setup window, which you can then query and execute via a REST API. NLA offers both API Key and OAuth for signing NLA API requests. 1. Server-side (API Key): for quickly getting started, testing, and production scenarios where LangChain will only use actions exposed in the developer's Zapier account (and will use the developer's connected accounts on Zapier.com) 2. User-facing (Oauth): for production scenarios where you are deploying an end-user facing application and LangChain needs access to end-user's exposed actions and connected accounts on Zapier.com This quick start will focus on the server-side use case for brevity. Review [full docs](https://nla.zapier.com/api/v1/docs) or reach out to [email protected] for user-facing oauth developer support. Typically you'd use SequentialChain, here's a basic example: 1. Use NLA to find an email in Gmail 2. Use LLMChain to generate a draft reply to (1)
https://python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
74faef60f13c-1
2. Use LLMChain to generate a draft reply to (1) 3. Use NLA to send the draft reply (2) to someone in Slack via direct message In code, below: ```python import os # get from https://platform.openai.com/ os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY", "") # get from https://nla.zapier.com/demo/provider/debug # (under User Information, after logging in): os.environ["ZAPIER_NLA_API_KEY"] = os.environ.get("ZAPIER_NLA_API_KEY", "") from langchain.llms import OpenAI from langchain.agents import initialize_agent from langchain.agents.agent_toolkits import ZapierToolkit from langchain.utilities.zapier import ZapierNLAWrapper ## step 0. expose gmail 'find email' and slack 'send channel message' actions # first go here, log in, expose (enable) the two actions: # https://nla.zapier.com/demo/start # -- for this example, can leave all fields "Have AI guess" # in an oauth scenario, you'd get your own <provider> id (instead of 'demo') # which you route your users through first llm = OpenAI(temperature=0) zapier = ZapierNLAWrapper() ## To leverage a nla_oauth_access_token you may pass the value to the ZapierNLAWrapper ## If you do this there is no need to initialize the ZAPIER_NLA_API_KEY env variable # zapier = ZapierNLAWrapper(zapier_nla_oauth_access_token="TOKEN_HERE") toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier) agent = initialize_agent( toolkit.get_tools(), llm,
https://python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
74faef60f13c-2
agent = initialize_agent( toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) agent.run(("Summarize the last email I received regarding Silicon Valley Bank. " "Send the summary to the #test-zapier channel in slack.")) ``` """ from typing import Any, Dict, Optional from pydantic import Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.zapier.prompt import BASE_ZAPIER_TOOL_PROMPT from langchain.utilities.zapier import ZapierNLAWrapper [docs]class ZapierNLARunAction(BaseTool): """ Args: action_id: a specific action ID (from list actions) of the action to execute (the set api_key must be associated with the action owner) instructions: a natural language instruction string for using the action (eg. "get the latest email from Mike Knoop" for "Gmail: find email" action) params: a dict, optional. Any params provided will *override* AI guesses from `instructions` (see "understanding the AI guessing flow" here: https://nla.zapier.com/api/v1/docs) """ api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) action_id: str params: Optional[dict] = None base_prompt: str = BASE_ZAPIER_TOOL_PROMPT zapier_description: str params_schema: Dict[str, str] = Field(default_factory=dict) name = "" description = "" @root_validator
https://python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
74faef60f13c-3
name = "" description = "" @root_validator def set_name_description(cls, values: Dict[str, Any]) -> Dict[str, Any]: zapier_description = values["zapier_description"] params_schema = values["params_schema"] if "instructions" in params_schema: del params_schema["instructions"] # Ensure base prompt (if overrided) contains necessary input fields necessary_fields = {"{zapier_description}", "{params}"} if not all(field in values["base_prompt"] for field in necessary_fields): raise ValueError( "Your custom base Zapier prompt must contain input fields for " "{zapier_description} and {params}." ) values["name"] = zapier_description values["description"] = values["base_prompt"].format( zapier_description=zapier_description, params=str(list(params_schema.keys())), ) return values def _run( self, instructions: str, run_manager: Optional[CallbackManagerForToolRun] = None ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return self.api_wrapper.run_as_str(self.action_id, instructions, self.params) async def _arun( self, _: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" raise NotImplementedError("ZapierNLAListActions does not support async") ZapierNLARunAction.__doc__ = ( ZapierNLAWrapper.run.__doc__ + ZapierNLARunAction.__doc__ # type: ignore )
https://python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
74faef60f13c-4
) # other useful actions [docs]class ZapierNLAListActions(BaseTool): """ Args: None """ name = "Zapier NLA: List Actions" description = BASE_ZAPIER_TOOL_PROMPT + ( "This tool returns a list of the user's exposed actions." ) api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) def _run( self, _: str = "", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return self.api_wrapper.list_as_str() async def _arun( self, _: str = "", run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" raise NotImplementedError("ZapierNLAListActions does not support async") ZapierNLAListActions.__doc__ = ( ZapierNLAWrapper.list.__doc__ + ZapierNLAListActions.__doc__ # type: ignore ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
4f14cc24fe48-0
Source code for langchain.tools.playwright.click from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import ( aget_current_page, get_current_page, ) class ClickToolInput(BaseModel): """Input for ClickTool.""" selector: str = Field(..., description="CSS selector for the element to click") [docs]class ClickTool(BaseBrowserTool): name: str = "click_element" description: str = "Click on an element with the given CSS selector" args_schema: Type[BaseModel] = ClickToolInput visible_only: bool = True """Whether to consider only visible elements.""" playwright_strict: bool = False """Whether to employ Playwright's strict mode when clicking on elements.""" playwright_timeout: float = 1_000 """Timeout (in ms) for Playwright to wait for element to be ready.""" def _selector_effective(self, selector: str) -> str: if not self.visible_only: return selector return f"{selector} >> visible=1" def _run( self, selector: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) # Navigate to the desired webpage before using this tool
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/click.html
4f14cc24fe48-1
# Navigate to the desired webpage before using this tool selector_effective = self._selector_effective(selector=selector) from playwright.sync_api import TimeoutError as PlaywrightTimeoutError try: page.click( selector_effective, strict=self.playwright_strict, timeout=self.playwright_timeout, ) except PlaywrightTimeoutError: return f"Unable to click on element '{selector}'" return f"Clicked element '{selector}'" async def _arun( self, selector: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) # Navigate to the desired webpage before using this tool selector_effective = self._selector_effective(selector=selector) from playwright.async_api import TimeoutError as PlaywrightTimeoutError try: await page.click( selector_effective, strict=self.playwright_strict, timeout=self.playwright_timeout, ) except PlaywrightTimeoutError: return f"Unable to click on element '{selector}'" return f"Clicked element '{selector}'" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/click.html
566eb0e2e4c2-0
Source code for langchain.tools.playwright.current_page from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import aget_current_page, get_current_page [docs]class CurrentWebPageTool(BaseBrowserTool): name: str = "current_webpage" description: str = "Returns the URL of the current page" args_schema: Type[BaseModel] = BaseModel def _run( self, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) return str(page.url) async def _arun( self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) return str(page.url) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/current_page.html
540978eb5c8b-0
Source code for langchain.tools.playwright.navigate_back from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import ( aget_current_page, get_current_page, ) [docs]class NavigateBackTool(BaseBrowserTool): """Navigate back to the previous page in the browser history.""" name: str = "previous_webpage" description: str = "Navigate back to the previous page in the browser history" args_schema: Type[BaseModel] = BaseModel def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) response = page.go_back() if response: return ( f"Navigated back to the previous page with URL '{response.url}'." f" Status code {response.status}" ) else: return "Unable to navigate back; no previous page in the history" async def _arun( self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) response = await page.go_back() if response: return (
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate_back.html
540978eb5c8b-1
response = await page.go_back() if response: return ( f"Navigated back to the previous page with URL '{response.url}'." f" Status code {response.status}" ) else: return "Unable to navigate back; no previous page in the history" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate_back.html
c5c75868f82c-0
Source code for langchain.tools.playwright.extract_text from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import aget_current_page, get_current_page [docs]class ExtractTextTool(BaseBrowserTool): name: str = "extract_text" description: str = "Extract all the text on the current webpage" args_schema: Type[BaseModel] = BaseModel @root_validator def check_acheck_bs_importrgs(cls, values: dict) -> dict: """Check that the arguments are valid.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: raise ValueError( "The 'beautifulsoup4' package is required to use this tool." " Please install it with 'pip install beautifulsoup4'." ) return values def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str: """Use the tool.""" # Use Beautiful Soup since it's faster than looping through the elements from bs4 import BeautifulSoup if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) html_content = page.content() # Parse the HTML content with BeautifulSoup soup = BeautifulSoup(html_content, "lxml") return " ".join(text for text in soup.stripped_strings) async def _arun( self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_text.html
c5c75868f82c-1
self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") # Use Beautiful Soup since it's faster than looping through the elements from bs4 import BeautifulSoup page = await aget_current_page(self.async_browser) html_content = await page.content() # Parse the HTML content with BeautifulSoup soup = BeautifulSoup(html_content, "lxml") return " ".join(text for text in soup.stripped_strings) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_text.html
3c597f1beb9f-0
Source code for langchain.tools.playwright.navigate from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import ( aget_current_page, get_current_page, ) class NavigateToolInput(BaseModel): """Input for NavigateToolInput.""" url: str = Field(..., description="url to navigate to") [docs]class NavigateTool(BaseBrowserTool): name: str = "navigate_browser" description: str = "Navigate a browser to the specified URL" args_schema: Type[BaseModel] = NavigateToolInput def _run( self, url: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) response = page.goto(url) status = response.status if response else "unknown" return f"Navigating to {url} returned status code {status}" async def _arun( self, url: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) response = await page.goto(url)
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate.html
3c597f1beb9f-1
response = await page.goto(url) status = response.status if response else "unknown" return f"Navigating to {url} returned status code {status}" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate.html
1f110de81197-0
Source code for langchain.tools.playwright.get_elements from __future__ import annotations import json from typing import TYPE_CHECKING, List, Optional, Sequence, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import aget_current_page, get_current_page if TYPE_CHECKING: from playwright.async_api import Page as AsyncPage from playwright.sync_api import Page as SyncPage class GetElementsToolInput(BaseModel): """Input for GetElementsTool.""" selector: str = Field( ..., description="CSS selector, such as '*', 'div', 'p', 'a', #id, .classname", ) attributes: List[str] = Field( default_factory=lambda: ["innerText"], description="Set of attributes to retrieve for each element", ) async def _aget_elements( page: AsyncPage, selector: str, attributes: Sequence[str] ) -> List[dict]: """Get elements matching the given CSS selector.""" elements = await page.query_selector_all(selector) results = [] for element in elements: result = {} for attribute in attributes: if attribute == "innerText": val: Optional[str] = await element.inner_text() else: val = await element.get_attribute(attribute) if val is not None and val.strip() != "": result[attribute] = val if result: results.append(result) return results def _get_elements( page: SyncPage, selector: str, attributes: Sequence[str] ) -> List[dict]:
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/get_elements.html
1f110de81197-1
) -> List[dict]: """Get elements matching the given CSS selector.""" elements = page.query_selector_all(selector) results = [] for element in elements: result = {} for attribute in attributes: if attribute == "innerText": val: Optional[str] = element.inner_text() else: val = element.get_attribute(attribute) if val is not None and val.strip() != "": result[attribute] = val if result: results.append(result) return results [docs]class GetElementsTool(BaseBrowserTool): name: str = "get_elements" description: str = ( "Retrieve elements in the current web page matching the given CSS selector" ) args_schema: Type[BaseModel] = GetElementsToolInput def _run( self, selector: str, attributes: Sequence[str] = ["innerText"], run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) # Navigate to the desired webpage before using this tool results = _get_elements(page, selector, attributes) return json.dumps(results, ensure_ascii=False) async def _arun( self, selector: str, attributes: Sequence[str] = ["innerText"], run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}")
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/get_elements.html
1f110de81197-2
raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) # Navigate to the desired webpage before using this tool results = await _aget_elements(page, selector, attributes) return json.dumps(results, ensure_ascii=False) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/get_elements.html
7f095163482a-0
Source code for langchain.tools.playwright.extract_hyperlinks from __future__ import annotations import json from typing import TYPE_CHECKING, Any, Optional, Type from pydantic import BaseModel, Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import aget_current_page, get_current_page if TYPE_CHECKING: pass class ExtractHyperlinksToolInput(BaseModel): """Input for ExtractHyperlinksTool.""" absolute_urls: bool = Field( default=False, description="Return absolute URLs instead of relative URLs", ) [docs]class ExtractHyperlinksTool(BaseBrowserTool): """Extract all hyperlinks on the page.""" name: str = "extract_hyperlinks" description: str = "Extract all hyperlinks on the current webpage" args_schema: Type[BaseModel] = ExtractHyperlinksToolInput @root_validator def check_bs_import(cls, values: dict) -> dict: """Check that the arguments are valid.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: raise ValueError( "The 'beautifulsoup4' package is required to use this tool." " Please install it with 'pip install beautifulsoup4'." ) return values [docs] @staticmethod def scrape_page(page: Any, html_content: str, absolute_urls: bool) -> str: from urllib.parse import urljoin from bs4 import BeautifulSoup # Parse the HTML content with BeautifulSoup soup = BeautifulSoup(html_content, "lxml") # Find all the anchor elements and extract their href attributes
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_hyperlinks.html
7f095163482a-1
# Find all the anchor elements and extract their href attributes anchors = soup.find_all("a") if absolute_urls: base_url = page.url links = [urljoin(base_url, anchor.get("href", "")) for anchor in anchors] else: links = [anchor.get("href", "") for anchor in anchors] # Return the list of links as a JSON string return json.dumps(links) def _run( self, absolute_urls: bool = False, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) html_content = page.content() return self.scrape_page(page, html_content, absolute_urls) async def _arun( self, absolute_urls: bool = False, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) html_content = await page.content() return self.scrape_page(page, html_content, absolute_urls) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_hyperlinks.html
2240086ae979-0
Source code for langchain.tools.steamship_image_generation.tool """This tool allows agents to generate images using Steamship. Steamship offers access to different third party image generation APIs using a single API key. Today the following models are supported: - Dall-E - Stable Diffusion To use this tool, you must first set as environment variables: STEAMSHIP_API_KEY ``` """ from __future__ import annotations from enum import Enum from typing import TYPE_CHECKING, Dict, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools import BaseTool from langchain.tools.steamship_image_generation.utils import make_image_public from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: pass class ModelName(str, Enum): """Supported Image Models for generation.""" DALL_E = "dall-e" STABLE_DIFFUSION = "stable-diffusion" SUPPORTED_IMAGE_SIZES = { ModelName.DALL_E: ("256x256", "512x512", "1024x1024"), ModelName.STABLE_DIFFUSION: ("512x512", "768x768"), } [docs]class SteamshipImageGenerationTool(BaseTool): try: from steamship import Steamship except ImportError: pass """Tool used to generate images from a text-prompt.""" model_name: ModelName size: Optional[str] = "512x512" steamship: Steamship return_urls: Optional[bool] = False name = "GenerateImage" description = ( "Useful for when you need to generate an image."
https://python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/tool.html
2240086ae979-1
description = ( "Useful for when you need to generate an image." "Input: A detailed text-2-image prompt describing an image" "Output: the UUID of a generated image" ) @root_validator(pre=True) def validate_size(cls, values: Dict) -> Dict: if "size" in values: size = values["size"] model_name = values["model_name"] if size not in SUPPORTED_IMAGE_SIZES[model_name]: raise RuntimeError(f"size {size} is not supported by {model_name}") return values @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" steamship_api_key = get_from_dict_or_env( values, "steamship_api_key", "STEAMSHIP_API_KEY" ) try: from steamship import Steamship except ImportError: raise ImportError( "steamship is not installed. " "Please install it with `pip install steamship`" ) steamship = Steamship( api_key=steamship_api_key, ) values["steamship"] = steamship if "steamship_api_key" in values: del values["steamship_api_key"] return values def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" image_generator = self.steamship.use_plugin( plugin_handle=self.model_name.value, config={"n": 1, "size": self.size} )
https://python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/tool.html
2240086ae979-2
) task = image_generator.generate(text=query, append_output_to_file=True) task.wait() blocks = task.output.blocks if len(blocks) > 0: if self.return_urls: return make_image_public(self.steamship, blocks[0]) else: return blocks[0].id raise RuntimeError(f"[{self.name}] Tool unable to generate image!") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("GenerateImageTool does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/tool.html
a5a2ea34b30e-0
Source code for langchain.tools.ddg_search.tool """Tool for the DuckDuckGo search API.""" import warnings from typing import Any, Optional from pydantic import Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper [docs]class DuckDuckGoSearchRun(BaseTool): """Tool that adds the capability to query the DuckDuckGo search API.""" name = "DuckDuckGo Search" description = ( "A wrapper around DuckDuckGo Search. " "Useful for when you need to answer questions about current events. " "Input should be a search query." ) api_wrapper: DuckDuckGoSearchAPIWrapper = Field( default_factory=DuckDuckGoSearchAPIWrapper ) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("DuckDuckGoSearch does not support async") [docs]class DuckDuckGoSearchResults(BaseTool): """Tool that queries the Duck Duck Go Search API and get back json.""" name = "DuckDuckGo Results JSON" description = ( "A wrapper around Duck Duck Go Search. "
https://python.langchain.com/en/latest/_modules/langchain/tools/ddg_search/tool.html
a5a2ea34b30e-1
description = ( "A wrapper around Duck Duck Go Search. " "Useful for when you need to answer questions about current events. " "Input should be a search query. Output is a JSON array of the query results" ) num_results: int = 4 api_wrapper: DuckDuckGoSearchAPIWrapper = Field( default_factory=DuckDuckGoSearchAPIWrapper ) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return str(self.api_wrapper.results(query, self.num_results)) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("DuckDuckGoSearchResults does not support async") def DuckDuckGoSearchTool(*args: Any, **kwargs: Any) -> DuckDuckGoSearchRun: warnings.warn( "DuckDuckGoSearchTool will be deprecated in the future. " "Please use DuckDuckGoSearchRun instead.", DeprecationWarning, ) return DuckDuckGoSearchRun(*args, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/ddg_search/tool.html
1aa3cb562ebc-0
Source code for langchain.tools.file_management.copy import shutil from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileCopyInput(BaseModel): """Input for CopyFileTool.""" source_path: str = Field(..., description="Path of the file to copy") destination_path: str = Field(..., description="Path to save the copied file") [docs]class CopyFileTool(BaseFileToolMixin, BaseTool): name: str = "copy_file" args_schema: Type[BaseModel] = FileCopyInput description: str = "Create a copy of a file in a specified location" def _run( self, source_path: str, destination_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: source_path_ = self.get_relative_path(source_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format( arg_name="source_path", value=source_path ) try: destination_path_ = self.get_relative_path(destination_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format( arg_name="destination_path", value=destination_path ) try: shutil.copy2(source_path_, destination_path_, follow_symlinks=False) return f"File copied successfully from {source_path} to {destination_path}." except Exception as e:
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/copy.html
1aa3cb562ebc-1
except Exception as e: return "Error: " + str(e) async def _arun( self, source_path: str, destination_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/copy.html
da55655d7386-0
Source code for langchain.tools.file_management.write from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class WriteFileInput(BaseModel): """Input for WriteFileTool.""" file_path: str = Field(..., description="name of file") text: str = Field(..., description="text to write to file") append: bool = Field( default=False, description="Whether to append to an existing file." ) [docs]class WriteFileTool(BaseFileToolMixin, BaseTool): name: str = "write_file" args_schema: Type[BaseModel] = WriteFileInput description: str = "Write file to disk" def _run( self, file_path: str, text: str, append: bool = False, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: write_path = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path) try: write_path.parent.mkdir(exist_ok=True, parents=False) mode = "a" if append else "w" with write_path.open(mode, encoding="utf-8") as f: f.write(text) return f"File written successfully to {file_path}." except Exception as e: return "Error: " + str(e)
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/write.html
da55655d7386-1
except Exception as e: return "Error: " + str(e) async def _arun( self, file_path: str, text: str, append: bool = False, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/write.html
4f3a073246fb-0
Source code for langchain.tools.file_management.delete import os from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileDeleteInput(BaseModel): """Input for DeleteFileTool.""" file_path: str = Field(..., description="Path of the file to delete") [docs]class DeleteFileTool(BaseFileToolMixin, BaseTool): name: str = "file_delete" args_schema: Type[BaseModel] = FileDeleteInput description: str = "Delete a file" def _run( self, file_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: file_path_ = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path) if not file_path_.exists(): return f"Error: no such file or directory: {file_path}" try: os.remove(file_path_) return f"File deleted successfully: {file_path}." except Exception as e: return "Error: " + str(e) async def _arun( self, file_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/delete.html
4f3a073246fb-1
raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/delete.html
9a5aef0c8088-0
Source code for langchain.tools.file_management.file_search import fnmatch import os from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileSearchInput(BaseModel): """Input for FileSearchTool.""" dir_path: str = Field( default=".", description="Subdirectory to search in.", ) pattern: str = Field( ..., description="Unix shell regex, where * matches everything.", ) [docs]class FileSearchTool(BaseFileToolMixin, BaseTool): name: str = "file_search" args_schema: Type[BaseModel] = FileSearchInput description: str = ( "Recursively search for files in a subdirectory that match the regex pattern" ) def _run( self, pattern: str, dir_path: str = ".", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: dir_path_ = self.get_relative_path(dir_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path) matches = [] try: for root, _, filenames in os.walk(dir_path_): for filename in fnmatch.filter(filenames, pattern): absolute_path = os.path.join(root, filename) relative_path = os.path.relpath(absolute_path, dir_path_) matches.append(relative_path) if matches:
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/file_search.html
9a5aef0c8088-1
matches.append(relative_path) if matches: return "\n".join(matches) else: return f"No files found for pattern {pattern} in directory {dir_path}" except Exception as e: return "Error: " + str(e) async def _arun( self, dir_path: str, pattern: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/file_search.html
49f0dce14fea-0
Source code for langchain.tools.file_management.read from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class ReadFileInput(BaseModel): """Input for ReadFileTool.""" file_path: str = Field(..., description="name of file") [docs]class ReadFileTool(BaseFileToolMixin, BaseTool): name: str = "read_file" args_schema: Type[BaseModel] = ReadFileInput description: str = "Read file from disk" def _run( self, file_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: read_path = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path) if not read_path.exists(): return f"Error: no such file or directory: {file_path}" try: with read_path.open("r", encoding="utf-8") as f: content = f.read() return content except Exception as e: return "Error: " + str(e) async def _arun( self, file_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/read.html
49f0dce14fea-1
# TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/read.html
64aa5a3abd8c-0
Source code for langchain.tools.file_management.move import shutil from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileMoveInput(BaseModel): """Input for MoveFileTool.""" source_path: str = Field(..., description="Path of the file to move") destination_path: str = Field(..., description="New path for the moved file") [docs]class MoveFileTool(BaseFileToolMixin, BaseTool): name: str = "move_file" args_schema: Type[BaseModel] = FileMoveInput description: str = "Move or rename a file from one location to another" def _run( self, source_path: str, destination_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: source_path_ = self.get_relative_path(source_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format( arg_name="source_path", value=source_path ) try: destination_path_ = self.get_relative_path(destination_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format( arg_name="destination_path_", value=destination_path_ ) if not source_path_.exists(): return f"Error: no such file or directory {source_path}" try: # shutil.move expects str args in 3.8 shutil.move(str(source_path_), destination_path_)
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/move.html
64aa5a3abd8c-1
shutil.move(str(source_path_), destination_path_) return f"File moved successfully from {source_path} to {destination_path}." except Exception as e: return "Error: " + str(e) async def _arun( self, source_path: str, destination_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/move.html
c1e20842d284-0
Source code for langchain.tools.file_management.list_dir import os from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class DirectoryListingInput(BaseModel): """Input for ListDirectoryTool.""" dir_path: str = Field(default=".", description="Subdirectory to list.") [docs]class ListDirectoryTool(BaseFileToolMixin, BaseTool): name: str = "list_directory" args_schema: Type[BaseModel] = DirectoryListingInput description: str = "List files and directories in a specified folder" def _run( self, dir_path: str = ".", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: dir_path_ = self.get_relative_path(dir_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path) try: entries = os.listdir(dir_path_) if entries: return "\n".join(entries) else: return f"No files found in directory {dir_path}" except Exception as e: return "Error: " + str(e) async def _arun( self, dir_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/list_dir.html
c1e20842d284-1
raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/list_dir.html
ec8be752ce16-0
Source code for langchain.vectorstores.atlas """Wrapper around Atlas by Nomic.""" from __future__ import annotations import logging import uuid from typing import Any, Iterable, List, Optional, Type import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore logger = logging.getLogger(__name__) [docs]class AtlasDB(VectorStore): """Wrapper around Atlas: Nomic's neural database and rhizomatic instrument. To use, you should have the ``nomic`` python package installed. Example: .. code-block:: python from langchain.vectorstores import AtlasDB from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = AtlasDB("my_project", embeddings.embed_query) """ _ATLAS_DEFAULT_ID_FIELD = "atlas_id" def __init__( self, name: str, embedding_function: Optional[Embeddings] = None, api_key: Optional[str] = None, description: str = "A description for your project", is_public: bool = True, reset_project_if_exists: bool = False, ) -> None: """ Initialize the Atlas Client Args: name (str): The name of your project. If the project already exists, it will be loaded. embedding_function (Optional[Callable]): An optional function used for embedding your data. If None, data will be embedded with Nomic's embed model. api_key (str): Your nomic API key description (str): A description for your project. is_public (bool): Whether your project is publicly accessible.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
ec8be752ce16-1
is_public (bool): Whether your project is publicly accessible. True by default. reset_project_if_exists (bool): Whether to reset this project if it already exists. Default False. Generally userful during development and testing. """ try: import nomic from nomic import AtlasProject except ImportError: raise ValueError( "Could not import nomic python package. " "Please install it with `pip install nomic`." ) if api_key is None: raise ValueError("No API key provided. Sign up at atlas.nomic.ai!") nomic.login(api_key) self._embedding_function = embedding_function modality = "text" if self._embedding_function is not None: modality = "embedding" # Check if the project exists, create it if not self.project = AtlasProject( name=name, description=description, modality=modality, is_public=is_public, reset_project_if_exists=reset_project_if_exists, unique_id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, ) self.project._latest_project_state() [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, refresh: bool = True, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
ec8be752ce16-2
metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]]): An optional list of ids. refresh(bool): Whether or not to refresh indices with the updated data. Default True. Returns: List[str]: List of IDs of the added texts. """ if ( metadatas is not None and len(metadatas) > 0 and "text" in metadatas[0].keys() ): raise ValueError("Cannot accept key text in metadata!") texts = list(texts) if ids is None: ids = [str(uuid.uuid1()) for _ in texts] # Embedding upload case if self._embedding_function is not None: _embeddings = self._embedding_function.embed_documents(texts) embeddings = np.stack(_embeddings) if metadatas is None: data = [ {AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i], "text": texts[i]} for i, _ in enumerate(texts) ] else: for i in range(len(metadatas)): metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i] metadatas[i]["text"] = texts[i] data = metadatas self.project._validate_map_data_inputs( [], id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, data=data ) with self.project.wait_for_project_lock(): self.project.add_embeddings(embeddings=embeddings, data=data) # Text upload case else: if metadatas is None: data = [
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
ec8be752ce16-3
else: if metadatas is None: data = [ {"text": text, AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i]} for i, text in enumerate(texts) ] else: for i, text in enumerate(texts): metadatas[i]["text"] = texts metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i] data = metadatas self.project._validate_map_data_inputs( [], id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, data=data ) with self.project.wait_for_project_lock(): self.project.add_text(data) if refresh: if len(self.project.indices) > 0: with self.project.wait_for_project_lock(): self.project.rebuild_maps() return ids [docs] def create_index(self, **kwargs: Any) -> Any: """Creates an index in your project. See https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index for full detail. """ with self.project.wait_for_project_lock(): return self.project.create_index(**kwargs) [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Document]: """Run similarity search with AtlasDB Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. Returns: List[Document]: List of documents most similar to the query text. """ if self._embedding_function is None: raise NotImplementedError(
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
ec8be752ce16-4
""" if self._embedding_function is None: raise NotImplementedError( "AtlasDB requires an embedding_function for text similarity search!" ) _embedding = self._embedding_function.embed_documents([query])[0] embedding = np.array(_embedding).reshape(1, -1) with self.project.wait_for_project_lock(): neighbors, _ = self.project.projections[0].vector_search( queries=embedding, k=k ) datas = self.project.get_data(ids=neighbors[0]) docs = [ Document(page_content=datas[i]["text"], metadata=datas[i]) for i, neighbor in enumerate(neighbors) ] return docs [docs] @classmethod def from_texts( cls: Type[AtlasDB], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, name: Optional[str] = None, api_key: Optional[str] = None, description: str = "A description for your project", is_public: bool = True, reset_project_if_exists: bool = False, index_kwargs: Optional[dict] = None, **kwargs: Any, ) -> AtlasDB: """Create an AtlasDB vectorstore from a raw documents. Args: texts (List[str]): The list of texts to ingest. name (str): Name of the project to create. api_key (str): Your nomic API key, embedding (Optional[Embeddings]): Embedding function. Defaults to None. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
ec8be752ce16-5
ids (Optional[List[str]]): Optional list of document IDs. If None, ids will be auto created description (str): A description for your project. is_public (bool): Whether your project is publicly accessible. True by default. reset_project_if_exists (bool): Whether to reset this project if it already exists. Default False. Generally userful during development and testing. index_kwargs (Optional[dict]): Dict of kwargs for index creation. See https://docs.nomic.ai/atlas_api.html Returns: AtlasDB: Nomic's neural database and finest rhizomatic instrument """ if name is None or api_key is None: raise ValueError("`name` and `api_key` cannot be None.") # Inject relevant kwargs all_index_kwargs = {"name": name + "_index", "indexed_field": "text"} if index_kwargs is not None: for k, v in index_kwargs.items(): all_index_kwargs[k] = v # Build project atlasDB = cls( name, embedding_function=embedding, api_key=api_key, description="A description for your project", is_public=is_public, reset_project_if_exists=reset_project_if_exists, ) with atlasDB.project.wait_for_project_lock(): atlasDB.add_texts(texts=texts, metadatas=metadatas, ids=ids) atlasDB.create_index(**all_index_kwargs) return atlasDB [docs] @classmethod def from_documents( cls: Type[AtlasDB], documents: List[Document], embedding: Optional[Embeddings] = None, ids: Optional[List[str]] = None,
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
ec8be752ce16-6
ids: Optional[List[str]] = None, name: Optional[str] = None, api_key: Optional[str] = None, persist_directory: Optional[str] = None, description: str = "A description for your project", is_public: bool = True, reset_project_if_exists: bool = False, index_kwargs: Optional[dict] = None, **kwargs: Any, ) -> AtlasDB: """Create an AtlasDB vectorstore from a list of documents. Args: name (str): Name of the collection to create. api_key (str): Your nomic API key, documents (List[Document]): List of documents to add to the vectorstore. embedding (Optional[Embeddings]): Embedding function. Defaults to None. ids (Optional[List[str]]): Optional list of document IDs. If None, ids will be auto created description (str): A description for your project. is_public (bool): Whether your project is publicly accessible. True by default. reset_project_if_exists (bool): Whether to reset this project if it already exists. Default False. Generally userful during development and testing. index_kwargs (Optional[dict]): Dict of kwargs for index creation. See https://docs.nomic.ai/atlas_api.html Returns: AtlasDB: Nomic's neural database and finest rhizomatic instrument """ if name is None or api_key is None: raise ValueError("`name` and `api_key` cannot be None.") texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts( name=name, api_key=api_key,
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
ec8be752ce16-7
return cls.from_texts( name=name, api_key=api_key, texts=texts, embedding=embedding, metadatas=metadatas, ids=ids, description=description, is_public=is_public, reset_project_if_exists=reset_project_if_exists, index_kwargs=index_kwargs, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
d0b6daf2adb4-0
Source code for langchain.vectorstores.supabase from __future__ import annotations from itertools import repeat from typing import ( TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type, Union, ) import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: import supabase [docs]class SupabaseVectorStore(VectorStore): """VectorStore for a Supabase postgres database. Assumes you have the `pgvector` extension installed and a `match_documents` (or similar) function. For more details: https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase You can implement your own `match_documents` function in order to limit the search space to a subset of documents based on your own authorization or business logic. Note that the Supabase Python client does not yet support async operations. If you'd like to use `max_marginal_relevance_search`, please review the instructions below on modifying the `match_documents` function to return matched embeddings. """ _client: supabase.client.Client # This is the embedding function. Don't confuse with the embedding vectors. # We should perhaps rename the underlying Embedding base class to EmbeddingFunction # or something _embedding: Embeddings table_name: str query_name: str def __init__( self, client: supabase.client.Client, embedding: Embeddings, table_name: str,
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
d0b6daf2adb4-1
embedding: Embeddings, table_name: str, query_name: Union[str, None] = None, ) -> None: """Initialize with supabase client.""" try: import supabase # noqa: F401 except ImportError: raise ValueError( "Could not import supabase python package. " "Please install it with `pip install supabase`." ) self._client = client self._embedding: Embeddings = embedding self.table_name = table_name or "documents" self.query_name = query_name or "match_documents" [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict[Any, Any]]] = None, **kwargs: Any, ) -> List[str]: docs = self._texts_to_documents(texts, metadatas) vectors = self._embedding.embed_documents(list(texts)) return self.add_vectors(vectors, docs) [docs] @classmethod def from_texts( cls: Type["SupabaseVectorStore"], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[supabase.client.Client] = None, table_name: Optional[str] = "documents", query_name: Union[str, None] = "match_documents", **kwargs: Any, ) -> "SupabaseVectorStore": """Return VectorStore initialized from texts and embeddings.""" if not client: raise ValueError("Supabase client is required.") if not table_name: raise ValueError("Supabase document table_name is required.")
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
d0b6daf2adb4-2
if not table_name: raise ValueError("Supabase document table_name is required.") embeddings = embedding.embed_documents(texts) docs = cls._texts_to_documents(texts, metadatas) _ids = cls._add_vectors(client, table_name, embeddings, docs) return cls( client=client, embedding=embedding, table_name=table_name, query_name=query_name, ) [docs] def add_vectors( self, vectors: List[List[float]], documents: List[Document] ) -> List[str]: return self._add_vectors(self._client, self.table_name, vectors, documents) [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: vectors = self._embedding.embed_documents([query]) return self.similarity_search_by_vector(vectors[0], k) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: result = self.similarity_search_by_vector_with_relevance_scores(embedding, k) documents = [doc for doc, _ in result] return documents [docs] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: vectors = self._embedding.embed_documents([query]) return self.similarity_search_by_vector_with_relevance_scores(vectors[0], k) [docs] def similarity_search_by_vector_with_relevance_scores( self, query: List[float], k: int
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
d0b6daf2adb4-3
self, query: List[float], k: int ) -> List[Tuple[Document, float]]: match_documents_params = dict(query_embedding=query, match_count=k) res = self._client.rpc(self.query_name, match_documents_params).execute() match_result = [ ( Document( metadata=search.get("metadata", {}), # type: ignore page_content=search.get("content", ""), ), search.get("similarity", 0.0), ) for search in res.data if search.get("content") ] return match_result [docs] def similarity_search_by_vector_returning_embeddings( self, query: List[float], k: int ) -> List[Tuple[Document, float, np.ndarray[np.float32, Any]]]: match_documents_params = dict(query_embedding=query, match_count=k) res = self._client.rpc(self.query_name, match_documents_params).execute() match_result = [ ( Document( metadata=search.get("metadata", {}), # type: ignore page_content=search.get("content", ""), ), search.get("similarity", 0.0), # Supabase returns a vector type as its string represation (!). # This is a hack to convert the string to numpy array. np.fromstring( search.get("embedding", "").strip("[]"), np.float32, sep="," ), ) for search in res.data if search.get("content") ] return match_result @staticmethod def _texts_to_documents( texts: Iterable[str], metadatas: Optional[Iterable[dict[Any, Any]]] = None,
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
d0b6daf2adb4-4
metadatas: Optional[Iterable[dict[Any, Any]]] = None, ) -> List[Document]: """Return list of Documents from list of texts and metadatas.""" if metadatas is None: metadatas = repeat({}) docs = [ Document(page_content=text, metadata=metadata) for text, metadata in zip(texts, metadatas) ] return docs @staticmethod def _add_vectors( client: supabase.client.Client, table_name: str, vectors: List[List[float]], documents: List[Document], ) -> List[str]: """Add vectors to Supabase table.""" rows: List[dict[str, Any]] = [ { "content": documents[idx].page_content, "embedding": embedding, "metadata": documents[idx].metadata, # type: ignore } for idx, embedding in enumerate(vectors) ] # According to the SupabaseVectorStore JS implementation, the best chunk size # is 500 chunk_size = 500 id_list: List[str] = [] for i in range(0, len(rows), chunk_size): chunk = rows[i : i + chunk_size] result = client.from_(table_name).insert(chunk).execute() # type: ignore if len(result.data) == 0: raise Exception("Error inserting: No rows added") # VectorStore.add_vectors returns ids as strings ids = [str(i.get("id")) for i in result.data if i.get("id")] id_list.extend(ids) return id_list [docs] def max_marginal_relevance_search_by_vector(
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
d0b6daf2adb4-5
return id_list [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ result = self.similarity_search_by_vector_returning_embeddings( embedding, fetch_k ) matched_documents = [doc_tuple[0] for doc_tuple in result] matched_embeddings = [doc_tuple[2] for doc_tuple in result] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), matched_embeddings, k=k, lambda_mult=lambda_mult, ) filtered_documents = [matched_documents[i] for i in mmr_selected] return filtered_documents [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20,
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
d0b6daf2adb4-6
k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. `max_marginal_relevance_search` requires that `query_name` returns matched embeddings alongside the match documents. The following function function demonstrates how to do this: ```sql CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536), match_count int) RETURNS TABLE( id bigint, content text, metadata jsonb, embedding vector(1536), similarity float) LANGUAGE plpgsql AS $$ # variable_conflict use_column BEGIN RETURN query SELECT id, content, metadata, embedding, 1 -(docstore.embedding <=> query_embedding) AS similarity FROM docstore ORDER BY docstore.embedding <=> query_embedding LIMIT match_count; END; $$;``` """ embedding = self._embedding.embed_documents([query])
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
d0b6daf2adb4-7
$$;``` """ embedding = self._embedding.embed_documents([query]) docs = self.max_marginal_relevance_search_by_vector( embedding[0], k, fetch_k, lambda_mult=lambda_mult ) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
f7f4eabdab45-0
Source code for langchain.vectorstores.tair """Wrapper around Tair Vector.""" from __future__ import annotations import json import logging import uuid from typing import Any, Iterable, List, Optional, Type from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore logger = logging.getLogger(__name__) def _uuid_key() -> str: return uuid.uuid4().hex [docs]class Tair(VectorStore): def __init__( self, embedding_function: Embeddings, url: str, index_name: str, content_key: str = "content", metadata_key: str = "metadata", search_params: Optional[dict] = None, **kwargs: Any, ): self.embedding_function = embedding_function self.index_name = index_name try: from tair import Tair as TairClient except ImportError: raise ValueError( "Could not import tair python package. " "Please install it with `pip install tair`." ) try: # connect to tair from url client = TairClient.from_url(url, **kwargs) except ValueError as e: raise ValueError(f"Tair failed to connect: {e}") self.client = client self.content_key = content_key self.metadata_key = metadata_key self.search_params = search_params [docs] def create_index_if_not_exist( self, dim: int, distance_type: str, index_type: str, data_type: str, **kwargs: Any, ) -> bool:
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/tair.html