id
stringlengths 14
16
| text
stringlengths 36
2.73k
| source
stringlengths 49
117
|
---|---|---|
78bfa57d15bc-0 | Source code for langchain.document_loaders.figma
"""Loader that loads Figma files json dump."""
import json
import urllib.request
from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import stringify_dict
[docs]class FigmaFileLoader(BaseLoader):
"""Loader that loads Figma file json."""
def __init__(self, access_token: str, ids: str, key: str):
"""Initialize with access token, ids, and key."""
self.access_token = access_token
self.ids = ids
self.key = key
def _construct_figma_api_url(self) -> str:
api_url = "https://api.figma.com/v1/files/%s/nodes?ids=%s" % (
self.key,
self.ids,
)
return api_url
def _get_figma_file(self) -> Any:
"""Get Figma file from Figma REST API."""
headers = {"X-Figma-Token": self.access_token}
request = urllib.request.Request(
self._construct_figma_api_url(), headers=headers
)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
return json_data
[docs] def load(self) -> List[Document]:
"""Load file"""
data = self._get_figma_file()
text = stringify_dict(data)
metadata = {"source": self._construct_figma_api_url()}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/figma.html |
c9a66137cdca-0 | Source code for langchain.document_loaders.srt
"""Loader for .srt (subtitle) files."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class SRTLoader(BaseLoader):
"""Loader for .srt (subtitle) files."""
def __init__(self, file_path: str):
"""Initialize with file path."""
try:
import pysrt # noqa:F401
except ImportError:
raise ImportError(
"package `pysrt` not found, please install it with `pip install pysrt`"
)
self.file_path = file_path
[docs] def load(self) -> List[Document]:
"""Load using pysrt file."""
import pysrt
parsed_info = pysrt.open(self.file_path)
text = " ".join([t.text for t in parsed_info])
metadata = {"source": self.file_path}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/srt.html |
9649f6d70c86-0 | Source code for langchain.document_loaders.discord
"""Load from Discord chat dump"""
from __future__ import annotations
from typing import TYPE_CHECKING, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import pandas as pd
[docs]class DiscordChatLoader(BaseLoader):
"""Load Discord chat logs."""
def __init__(self, chat_log: pd.DataFrame, user_id_col: str = "ID"):
"""Initialize with a Pandas DataFrame containing chat logs."""
if not isinstance(chat_log, pd.DataFrame):
raise ValueError(
f"Expected chat_log to be a pd.DataFrame, got {type(chat_log)}"
)
self.chat_log = chat_log
self.user_id_col = user_id_col
[docs] def load(self) -> List[Document]:
"""Load all chat messages."""
result = []
for _, row in self.chat_log.iterrows():
user_id = row[self.user_id_col]
metadata = row.to_dict()
metadata.pop(self.user_id_col)
result.append(Document(page_content=user_id, metadata=metadata))
return result
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/discord.html |
c7d903d9e8b9-0 | Source code for langchain.document_loaders.dataframe
"""Load from Dataframe object"""
from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class DataFrameLoader(BaseLoader):
"""Load Pandas DataFrames."""
def __init__(self, data_frame: Any, page_content_column: str = "text"):
"""Initialize with dataframe object."""
import pandas as pd
if not isinstance(data_frame, pd.DataFrame):
raise ValueError(
f"Expected data_frame to be a pd.DataFrame, got {type(data_frame)}"
)
self.data_frame = data_frame
self.page_content_column = page_content_column
[docs] def load(self) -> List[Document]:
"""Load from the dataframe."""
result = []
# For very large dataframes, this needs to yield instead of building a list
# but that would require chaging return type to a generator for BaseLoader
# and all its subclasses, which is a bigger refactor. Marking as future TODO.
# This change will allow us to extend this to Spark and Dask dataframes.
for _, row in self.data_frame.iterrows():
text = row[self.page_content_column]
metadata = row.to_dict()
metadata.pop(self.page_content_column)
result.append(Document(page_content=text, metadata=metadata))
return result
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/dataframe.html |
e6661aa831fc-0 | Source code for langchain.document_loaders.html
"""Loader that uses unstructured to load HTML files."""
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class UnstructuredHTMLLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load HTML files."""
def _get_elements(self) -> List:
from unstructured.partition.html import partition_html
return partition_html(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/html.html |
888be621d9e9-0 | Source code for langchain.document_loaders.reddit
"""Reddit document loader."""
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import praw
def _dependable_praw_import() -> praw:
try:
import praw
except ImportError:
raise ValueError(
"praw package not found, please install it with `pip install praw`"
)
return praw
[docs]class RedditPostsLoader(BaseLoader):
"""Reddit posts loader.
Read posts on a subreddit.
First you need to go to
https://www.reddit.com/prefs/apps/
and create your application
"""
def __init__(
self,
client_id: str,
client_secret: str,
user_agent: str,
search_queries: Sequence[str],
mode: str,
categories: Sequence[str] = ["new"],
number_posts: Optional[int] = 10,
):
self.client_id = client_id
self.client_secret = client_secret
self.user_agent = user_agent
self.search_queries = search_queries
self.mode = mode
self.categories = categories
self.number_posts = number_posts
[docs] def load(self) -> List[Document]:
"""Load reddits."""
praw = _dependable_praw_import()
reddit = praw.Reddit(
client_id=self.client_id,
client_secret=self.client_secret,
user_agent=self.user_agent,
)
results: List[Document] = []
if self.mode == "subreddit":
for search_query in self.search_queries: | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/reddit.html |
888be621d9e9-1 | if self.mode == "subreddit":
for search_query in self.search_queries:
for category in self.categories:
docs = self._subreddit_posts_loader(
search_query=search_query, category=category, reddit=reddit
)
results.extend(docs)
elif self.mode == "username":
for search_query in self.search_queries:
for category in self.categories:
docs = self._user_posts_loader(
search_query=search_query, category=category, reddit=reddit
)
results.extend(docs)
else:
raise ValueError(
"mode not correct, please enter 'username' or 'subreddit' as mode"
)
return results
def _subreddit_posts_loader(
self, search_query: str, category: str, reddit: praw.reddit.Reddit
) -> Iterable[Document]:
subreddit = reddit.subreddit(search_query)
method = getattr(subreddit, category)
cat_posts = method(limit=self.number_posts)
"""Format reddit posts into a string."""
for post in cat_posts:
metadata = {
"post_subreddit": post.subreddit_name_prefixed,
"post_category": category,
"post_title": post.title,
"post_score": post.score,
"post_id": post.id,
"post_url": post.url,
"post_author": post.author,
}
yield Document(
page_content=post.selftext,
metadata=metadata,
)
def _user_posts_loader(
self, search_query: str, category: str, reddit: praw.reddit.Reddit
) -> Iterable[Document]:
user = reddit.redditor(search_query)
method = getattr(user.submissions, category) | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/reddit.html |
888be621d9e9-2 | method = getattr(user.submissions, category)
cat_posts = method(limit=self.number_posts)
"""Format reddit posts into a string."""
for post in cat_posts:
metadata = {
"post_subreddit": post.subreddit_name_prefixed,
"post_category": category,
"post_title": post.title,
"post_score": post.score,
"post_id": post.id,
"post_url": post.url,
"post_author": post.author,
}
yield Document(
page_content=post.selftext,
metadata=metadata,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/reddit.html |
80a19e9a566b-0 | Source code for langchain.document_loaders.duckdb_loader
from typing import Dict, List, Optional, cast
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class DuckDBLoader(BaseLoader):
"""Loads a query result from DuckDB into a list of documents.
Each document represents one row of the result. The `page_content_columns`
are written into the `page_content` of the document. The `metadata_columns`
are written into the `metadata` of the document. By default, all columns
are written into the `page_content` and none into the `metadata`.
"""
def __init__(
self,
query: str,
database: str = ":memory:",
read_only: bool = False,
config: Optional[Dict[str, str]] = None,
page_content_columns: Optional[List[str]] = None,
metadata_columns: Optional[List[str]] = None,
):
self.query = query
self.database = database
self.read_only = read_only
self.config = config or {}
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
[docs] def load(self) -> List[Document]:
try:
import duckdb
except ImportError:
raise ImportError(
"Could not import duckdb python package. "
"Please install it with `pip install duckdb`."
)
docs = []
with duckdb.connect(
database=self.database, read_only=self.read_only, config=self.config
) as con:
query_result = con.execute(self.query)
results = query_result.fetchall()
description = cast(list, query_result.description) | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/duckdb_loader.html |
80a19e9a566b-1 | results = query_result.fetchall()
description = cast(list, query_result.description)
field_names = [c[0] for c in description]
if self.page_content_columns is None:
page_content_columns = field_names
else:
page_content_columns = self.page_content_columns
if self.metadata_columns is None:
metadata_columns = []
else:
metadata_columns = self.metadata_columns
for result in results:
page_content = "\n".join(
f"{column}: {result[field_names.index(column)]}"
for column in page_content_columns
)
metadata = {
column: result[field_names.index(column)]
for column in metadata_columns
}
doc = Document(page_content=page_content, metadata=metadata)
docs.append(doc)
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/duckdb_loader.html |
02af49231f7e-0 | Source code for langchain.document_loaders.python
import tokenize
from langchain.document_loaders.text import TextLoader
[docs]class PythonLoader(TextLoader):
"""
Load Python files, respecting any non-default encoding if specified.
"""
def __init__(self, file_path: str):
with open(file_path, "rb") as f:
encoding, _ = tokenize.detect_encoding(f.readline)
super().__init__(file_path=file_path, encoding=encoding)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/python.html |
0af245e9aea1-0 | Source code for langchain.document_loaders.blockchain
import os
import re
import time
from enum import Enum
from typing import List, Optional
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class BlockchainType(Enum):
ETH_MAINNET = "eth-mainnet"
ETH_GOERLI = "eth-goerli"
POLYGON_MAINNET = "polygon-mainnet"
POLYGON_MUMBAI = "polygon-mumbai"
[docs]class BlockchainDocumentLoader(BaseLoader):
"""Loads elements from a blockchain smart contract into Langchain documents.
The supported blockchains are: Ethereum mainnet, Ethereum Goerli testnet,
Polygon mainnet, and Polygon Mumbai testnet.
If no BlockchainType is specified, the default is Ethereum mainnet.
The Loader uses the Alchemy API to interact with the blockchain.
ALCHEMY_API_KEY environment variable must be set to use this loader.
The API returns 100 NFTs per request and can be paginated using the
startToken parameter.
If get_all_tokens is set to True, the loader will get all tokens
on the contract. Note that for contracts with a large number of tokens,
this may take a long time (e.g. 10k tokens is 100 requests).
Default value is false for this reason.
The max_execution_time (sec) can be set to limit the execution time
of the loader.
Future versions of this loader can:
- Support additional Alchemy APIs (e.g. getTransactions, etc.)
- Support additional blockain APIs (e.g. Infura, Opensea, etc.)
"""
def __init__(
self,
contract_address: str, | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html |
0af245e9aea1-1 | """
def __init__(
self,
contract_address: str,
blockchainType: BlockchainType = BlockchainType.ETH_MAINNET,
api_key: str = "docs-demo",
startToken: str = "",
get_all_tokens: bool = False,
max_execution_time: Optional[int] = None,
):
self.contract_address = contract_address
self.blockchainType = blockchainType.value
self.api_key = os.environ.get("ALCHEMY_API_KEY") or api_key
self.startToken = startToken
self.get_all_tokens = get_all_tokens
self.max_execution_time = max_execution_time
if not self.api_key:
raise ValueError("Alchemy API key not provided.")
if not re.match(r"^0x[a-fA-F0-9]{40}$", self.contract_address):
raise ValueError(f"Invalid contract address {self.contract_address}")
[docs] def load(self) -> List[Document]:
result = []
current_start_token = self.startToken
start_time = time.time()
while True:
url = (
f"https://{self.blockchainType}.g.alchemy.com/nft/v2/"
f"{self.api_key}/getNFTsForCollection?withMetadata="
f"True&contractAddress={self.contract_address}"
f"&startToken={current_start_token}"
)
response = requests.get(url)
if response.status_code != 200:
raise ValueError(
f"Request failed with status code {response.status_code}"
)
items = response.json()["nfts"]
if not items:
break
for item in items:
content = str(item)
tokenId = item["id"]["tokenId"]
metadata = { | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html |
0af245e9aea1-2 | tokenId = item["id"]["tokenId"]
metadata = {
"source": self.contract_address,
"blockchain": self.blockchainType,
"tokenId": tokenId,
}
result.append(Document(page_content=content, metadata=metadata))
# exit after the first API call if get_all_tokens is False
if not self.get_all_tokens:
break
# get the start token for the next API call from the last item in array
current_start_token = self._get_next_tokenId(result[-1].metadata["tokenId"])
if (
self.max_execution_time is not None
and (time.time() - start_time) > self.max_execution_time
):
raise RuntimeError("Execution time exceeded the allowed time limit.")
if not result:
raise ValueError(
f"No NFTs found for contract address {self.contract_address}"
)
return result
# add one to the tokenId, ensuring the correct tokenId format is used
def _get_next_tokenId(self, tokenId: str) -> str:
value_type = self._detect_value_type(tokenId)
if value_type == "hex_0x":
value_int = int(tokenId, 16)
elif value_type == "hex_0xbf":
value_int = int(tokenId[2:], 16)
else:
value_int = int(tokenId)
result = value_int + 1
if value_type == "hex_0x":
return "0x" + format(result, "0" + str(len(tokenId) - 2) + "x")
elif value_type == "hex_0xbf": | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html |
0af245e9aea1-3 | elif value_type == "hex_0xbf":
return "0xbf" + format(result, "0" + str(len(tokenId) - 4) + "x")
else:
return str(result)
# A smart contract can use different formats for the tokenId
@staticmethod
def _detect_value_type(tokenId: str) -> str:
if isinstance(tokenId, int):
return "int"
elif tokenId.startswith("0x"):
return "hex_0x"
elif tokenId.startswith("0xbf"):
return "hex_0xbf"
else:
return "hex_0xbf"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html |
129623f3f7bd-0 | Source code for langchain.document_loaders.azure_blob_storage_container
"""Loading logic for loading documents from an Azure Blob Storage container."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.azure_blob_storage_file import (
AzureBlobStorageFileLoader,
)
from langchain.document_loaders.base import BaseLoader
[docs]class AzureBlobStorageContainerLoader(BaseLoader):
"""Loading logic for loading documents from Azure Blob Storage."""
def __init__(self, conn_str: str, container: str, prefix: str = ""):
"""Initialize with connection string, container and blob prefix."""
self.conn_str = conn_str
self.container = container
self.prefix = prefix
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
from azure.storage.blob import ContainerClient
except ImportError as exc:
raise ValueError(
"Could not import azure storage blob python package. "
"Please install it with `pip install azure-storage-blob`."
) from exc
container = ContainerClient.from_connection_string(
conn_str=self.conn_str, container_name=self.container
)
docs = []
blob_list = container.list_blobs(name_starts_with=self.prefix)
for blob in blob_list:
loader = AzureBlobStorageFileLoader(
self.conn_str, self.container, blob.name # type: ignore
)
docs.extend(loader.load())
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/azure_blob_storage_container.html |
1395d09fda06-0 | Source code for langchain.document_loaders.bilibili
import json
import re
import warnings
from typing import List, Tuple
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class BiliBiliLoader(BaseLoader):
"""Loader that loads bilibili transcripts."""
def __init__(self, video_urls: List[str]):
"""Initialize with bilibili url."""
self.video_urls = video_urls
[docs] def load(self) -> List[Document]:
"""Load from bilibili url."""
results = []
for url in self.video_urls:
transcript, video_info = self._get_bilibili_subs_and_info(url)
doc = Document(page_content=transcript, metadata=video_info)
results.append(doc)
return results
def _get_bilibili_subs_and_info(self, url: str) -> Tuple[str, dict]:
try:
from bilibili_api import sync, video
except ImportError:
raise ValueError(
"requests package not found, please install it with "
"`pip install bilibili-api-python`"
)
bvid = re.search(r"BV\w+", url)
if bvid is not None:
v = video.Video(bvid=bvid.group())
else:
aid = re.search(r"av[0-9]+", url)
if aid is not None:
try:
v = video.Video(aid=int(aid.group()[2:]))
except AttributeError:
raise ValueError(f"{url} is not bilibili url.")
else:
raise ValueError(f"{url} is not bilibili url.")
video_info = sync(v.get_info()) | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/bilibili.html |
1395d09fda06-1 | video_info = sync(v.get_info())
video_info.update({"url": url})
# Get subtitle url
subtitle = video_info.pop("subtitle")
sub_list = subtitle["list"]
if sub_list:
sub_url = sub_list[0]["subtitle_url"]
result = requests.get(sub_url)
raw_sub_titles = json.loads(result.content)["body"]
raw_transcript = " ".join([c["content"] for c in raw_sub_titles])
raw_transcript_with_meta_info = (
f"Video Title: {video_info['title']},"
f"description: {video_info['desc']}\n\n"
f"Transcript: {raw_transcript}"
)
return raw_transcript_with_meta_info, video_info
else:
raw_transcript = ""
warnings.warn(
f"""
No subtitles found for video: {url}.
Return Empty transcript.
"""
)
return raw_transcript, video_info
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/bilibili.html |
6caf23a890b9-0 | Source code for langchain.document_loaders.docugami
"""Loader that loads processed documents from Docugami."""
import io
import logging
import os
import re
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
import requests
from pydantic import BaseModel, root_validator
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
TD_NAME = "{http://www.w3.org/1999/xhtml}td"
TABLE_NAME = "{http://www.w3.org/1999/xhtml}table"
XPATH_KEY = "xpath"
DOCUMENT_ID_KEY = "id"
DOCUMENT_NAME_KEY = "name"
STRUCTURE_KEY = "structure"
TAG_KEY = "tag"
PROJECTS_KEY = "projects"
DEFAULT_API_ENDPOINT = "https://api.docugami.com/v1preview1"
logger = logging.getLogger(__name__)
[docs]class DocugamiLoader(BaseLoader, BaseModel):
"""Loader that loads processed docs from Docugami.
To use, you should have the ``lxml`` python package installed.
"""
api: str = DEFAULT_API_ENDPOINT
access_token: Optional[str] = os.environ.get("DOCUGAMI_API_KEY")
docset_id: Optional[str]
document_ids: Optional[Sequence[str]]
file_paths: Optional[Sequence[Union[Path, str]]]
min_chunk_size: int = 32 # appended to the next chunk to avoid over-chunking
@root_validator
def validate_local_or_remote(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that either local file paths are given, or remote API docset ID."""
if values.get("file_paths") and values.get("docset_id"): | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html |
6caf23a890b9-1 | if values.get("file_paths") and values.get("docset_id"):
raise ValueError("Cannot specify both file_paths and remote API docset_id")
if not values.get("file_paths") and not values.get("docset_id"):
raise ValueError("Must specify either file_paths or remote API docset_id")
if values.get("docset_id") and not values.get("access_token"):
raise ValueError("Must specify access token if using remote API docset_id")
return values
def _parse_dgml(
self, document: Mapping, content: bytes, doc_metadata: Optional[Mapping] = None
) -> List[Document]:
"""Parse a single DGML document into a list of Documents."""
try:
from lxml import etree
except ImportError:
raise ImportError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
# helpers
def _xpath_qname_for_chunk(chunk: Any) -> str:
"""Get the xpath qname for a chunk."""
qname = f"{chunk.prefix}:{chunk.tag.split('}')[-1]}"
parent = chunk.getparent()
if parent is not None:
doppelgangers = [x for x in parent if x.tag == chunk.tag]
if len(doppelgangers) > 1:
idx_of_self = doppelgangers.index(chunk)
qname = f"{qname}[{idx_of_self + 1}]"
return qname
def _xpath_for_chunk(chunk: Any) -> str:
"""Get the xpath for a chunk."""
ancestor_chain = chunk.xpath("ancestor-or-self::*") | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html |
6caf23a890b9-2 | ancestor_chain = chunk.xpath("ancestor-or-self::*")
return "/" + "/".join(_xpath_qname_for_chunk(x) for x in ancestor_chain)
def _structure_value(node: Any) -> str:
"""Get the structure value for a node."""
structure = (
"table"
if node.tag == TABLE_NAME
else node.attrib["structure"]
if "structure" in node.attrib
else None
)
return structure
def _is_structural(node: Any) -> bool:
"""Check if a node is structural."""
return _structure_value(node) is not None
def _is_heading(node: Any) -> bool:
"""Check if a node is a heading."""
structure = _structure_value(node)
return structure is not None and structure.lower().startswith("h")
def _get_text(node: Any) -> str:
"""Get the text of a node."""
return " ".join(node.itertext()).strip()
def _has_structural_descendant(node: Any) -> bool:
"""Check if a node has a structural descendant."""
for child in node:
if _is_structural(child) or _has_structural_descendant(child):
return True
return False
def _leaf_structural_nodes(node: Any) -> List:
"""Get the leaf structural nodes of a node."""
if _is_structural(node) and not _has_structural_descendant(node):
return [node]
else:
leaf_nodes = []
for child in node:
leaf_nodes.extend(_leaf_structural_nodes(child))
return leaf_nodes
def _create_doc(node: Any, text: str) -> Document:
"""Create a Document from a node and text.""" | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html |
6caf23a890b9-3 | """Create a Document from a node and text."""
metadata = {
XPATH_KEY: _xpath_for_chunk(node),
DOCUMENT_ID_KEY: document["id"],
DOCUMENT_NAME_KEY: document["name"],
STRUCTURE_KEY: node.attrib.get("structure", ""),
TAG_KEY: re.sub(r"\{.*\}", "", node.tag),
}
if doc_metadata:
metadata.update(doc_metadata)
return Document(
page_content=text,
metadata=metadata,
)
# parse the tree and return chunks
tree = etree.parse(io.BytesIO(content))
root = tree.getroot()
chunks: List[Document] = []
prev_small_chunk_text = None
for node in _leaf_structural_nodes(root):
text = _get_text(node)
if prev_small_chunk_text:
text = prev_small_chunk_text + " " + text
prev_small_chunk_text = None
if _is_heading(node) or len(text) < self.min_chunk_size:
# Save headings or other small chunks to be appended to the next chunk
prev_small_chunk_text = text
else:
chunks.append(_create_doc(node, text))
if prev_small_chunk_text and len(chunks) > 0:
# small chunk at the end left over, just append to last chunk
chunks[-1].page_content += " " + prev_small_chunk_text
return chunks
def _document_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all document details for the given docset ID"""
url = f"{self.api}/docsets/{docset_id}/documents"
all_documents = []
while url:
response = requests.get(
url, | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html |
6caf23a890b9-4 | while url:
response = requests.get(
url,
headers={"Authorization": f"Bearer {self.access_token}"},
)
if response.ok:
data = response.json()
all_documents.extend(data["documents"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_documents
def _project_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all project details for the given docset ID"""
url = f"{self.api}/projects?docset.id={docset_id}"
all_projects = []
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json()
all_projects.extend(data["projects"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_projects
def _metadata_for_project(self, project: Dict) -> Dict:
"""Gets project metadata for all files"""
project_id = project.get("id")
url = f"{self.api}/projects/{project_id}/artifacts/latest"
all_artifacts = []
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json() | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html |
6caf23a890b9-5 | data={},
)
if response.ok:
data = response.json()
all_artifacts.extend(data["artifacts"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
per_file_metadata = {}
for artifact in all_artifacts:
artifact_name = artifact.get("name")
artifact_url = artifact.get("url")
artifact_doc = artifact.get("document")
if artifact_name == f"{project_id}.xml" and artifact_url and artifact_doc:
doc_id = artifact_doc["id"]
metadata: Dict = {}
# the evaluated XML for each document is named after the project
response = requests.request(
"GET",
f"{artifact_url}/content",
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
try:
from lxml import etree
except ImportError:
raise ImportError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
artifact_tree = etree.parse(io.BytesIO(response.content))
artifact_root = artifact_tree.getroot()
ns = artifact_root.nsmap
entries = artifact_root.xpath("//wp:Entry", namespaces=ns)
for entry in entries:
heading = entry.xpath("./wp:Heading", namespaces=ns)[0].text
value = " ".join(
entry.xpath("./wp:Value", namespaces=ns)[0].itertext()
).strip()
metadata[heading] = value
per_file_metadata[doc_id] = metadata
else:
raise Exception( | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html |
6caf23a890b9-6 | per_file_metadata[doc_id] = metadata
else:
raise Exception(
f"Failed to download {artifact_url}/content "
+ "(status: {response.status_code})"
)
return per_file_metadata
def _load_chunks_for_document(
self, docset_id: str, document: Dict, doc_metadata: Optional[Dict] = None
) -> List[Document]:
"""Load chunks for a document."""
document_id = document["id"]
url = f"{self.api}/docsets/{docset_id}/documents/{document_id}/dgml"
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
return self._parse_dgml(document, response.content, doc_metadata)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
[docs] def load(self) -> List[Document]:
"""Load documents."""
chunks: List[Document] = []
if self.access_token and self.docset_id:
# remote mode
_document_details = self._document_details_for_docset_id(self.docset_id)
if self.document_ids:
_document_details = [
d for d in _document_details if d["id"] in self.document_ids
]
_project_details = self._project_details_for_docset_id(self.docset_id)
combined_project_metadata = {}
if _project_details:
# if there are any projects for this docset, load project metadata
for project in _project_details:
metadata = self._metadata_for_project(project) | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html |
6caf23a890b9-7 | for project in _project_details:
metadata = self._metadata_for_project(project)
combined_project_metadata.update(metadata)
for doc in _document_details:
doc_metadata = combined_project_metadata.get(doc["id"])
chunks += self._load_chunks_for_document(
self.docset_id, doc, doc_metadata
)
elif self.file_paths:
# local mode (for integration testing, or pre-downloaded XML)
for path in self.file_paths:
path = Path(path)
with open(path, "rb") as file:
chunks += self._parse_dgml(
{
DOCUMENT_ID_KEY: path.name,
DOCUMENT_NAME_KEY: path.name,
},
file.read(),
)
return chunks
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html |
1d6b119288bc-0 | Source code for langchain.document_loaders.s3_file
"""Loading logic for loading documents from an s3 file."""
import os
import tempfile
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class S3FileLoader(BaseLoader):
"""Loading logic for loading documents from s3."""
def __init__(self, bucket: str, key: str):
"""Initialize with bucket and key name."""
self.bucket = bucket
self.key = key
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
import boto3
except ImportError:
raise ImportError(
"Could not import `boto3` python package. "
"Please install it with `pip install boto3`."
)
s3 = boto3.client("s3")
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.key}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
s3.download_file(self.bucket, self.key, file_path)
loader = UnstructuredFileLoader(file_path)
return loader.load()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/s3_file.html |
509ec1e4afc2-0 | Source code for langchain.document_loaders.directory
"""Loading logic for loading documents from a directory."""
import concurrent
import logging
from pathlib import Path
from typing import Any, List, Optional, Type, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.html_bs import BSHTMLLoader
from langchain.document_loaders.text import TextLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
FILE_LOADER_TYPE = Union[
Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader]
]
logger = logging.getLogger(__name__)
def _is_visible(p: Path) -> bool:
parts = p.parts
for _p in parts:
if _p.startswith("."):
return False
return True
[docs]class DirectoryLoader(BaseLoader):
"""Loading logic for loading documents from a directory."""
def __init__(
self,
path: str,
glob: str = "**/[!.]*",
silent_errors: bool = False,
load_hidden: bool = False,
loader_cls: FILE_LOADER_TYPE = UnstructuredFileLoader,
loader_kwargs: Union[dict, None] = None,
recursive: bool = False,
show_progress: bool = False,
use_multithreading: bool = False,
max_concurrency: int = 4,
):
"""Initialize with path to directory and how to glob over it."""
if loader_kwargs is None:
loader_kwargs = {}
self.path = path
self.glob = glob
self.load_hidden = load_hidden
self.loader_cls = loader_cls
self.loader_kwargs = loader_kwargs
self.silent_errors = silent_errors | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/directory.html |
509ec1e4afc2-1 | self.loader_kwargs = loader_kwargs
self.silent_errors = silent_errors
self.recursive = recursive
self.show_progress = show_progress
self.use_multithreading = use_multithreading
self.max_concurrency = max_concurrency
[docs] def load_file(
self, item: Path, path: Path, docs: List[Document], pbar: Optional[Any]
) -> None:
if item.is_file():
if _is_visible(item.relative_to(path)) or self.load_hidden:
try:
sub_docs = self.loader_cls(str(item), **self.loader_kwargs).load()
docs.extend(sub_docs)
except Exception as e:
if self.silent_errors:
logger.warning(e)
else:
raise e
finally:
if pbar:
pbar.update(1)
[docs] def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.path)
if not p.exists():
raise FileNotFoundError(f"Directory not found: '{self.path}'")
if not p.is_dir():
raise ValueError(f"Expected directory, got file: '{self.path}'")
docs: List[Document] = []
items = list(p.rglob(self.glob) if self.recursive else p.glob(self.glob))
pbar = None
if self.show_progress:
try:
from tqdm import tqdm
pbar = tqdm(total=len(items))
except ImportError as e:
logger.warning(
"To log the progress of DirectoryLoader you need to install tqdm, "
"`pip install tqdm`"
)
if self.silent_errors:
logger.warning(e)
else:
raise e | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/directory.html |
509ec1e4afc2-2 | logger.warning(e)
else:
raise e
if self.use_multithreading:
with concurrent.futures.ThreadPoolExecutor(
max_workers=self.max_concurrency
) as executor:
executor.map(lambda i: self.load_file(i, p, docs, pbar), items)
else:
for i in items:
self.load_file(i, p, docs, pbar)
if pbar:
pbar.close()
return docs
#
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/directory.html |
87fd5e8a03ad-0 | Source code for langchain.document_loaders.psychic
"""Loader that loads documents from Psychic.dev."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class PsychicLoader(BaseLoader):
"""Loader that loads documents from Psychic.dev."""
def __init__(self, api_key: str, connector_id: str, connection_id: str):
"""Initialize with API key, connector id, and connection id."""
try:
from psychicapi import ConnectorId, Psychic # noqa: F401
except ImportError:
raise ImportError(
"`psychicapi` package not found, please run `pip install psychicapi`"
)
self.psychic = Psychic(secret_key=api_key)
self.connector_id = ConnectorId(connector_id)
self.connection_id = connection_id
[docs] def load(self) -> List[Document]:
"""Load documents."""
psychic_docs = self.psychic.get_documents(self.connector_id, self.connection_id)
return [
Document(
page_content=doc["content"],
metadata={"title": doc["title"], "source": doc["uri"]},
)
for doc in psychic_docs
]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/psychic.html |
a758edb01970-0 | Source code for langchain.document_loaders.csv_loader
import csv
from typing import Any, Dict, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
[docs]class CSVLoader(BaseLoader):
"""Loads a CSV file into a list of documents.
Each document represents one row of the CSV file. Every row is converted into a
key/value pair and outputted to a new line in the document's page_content.
The source for each document loaded from csv is set to the value of the
`file_path` argument for all doucments by default.
You can override this by setting the `source_column` argument to the
name of a column in the CSV file.
The source of each document will then be set to the value of the column
with the name specified in `source_column`.
Output Example:
.. code-block:: txt
column1: value1
column2: value2
column3: value3
"""
def __init__(
self,
file_path: str,
source_column: Optional[str] = None,
csv_args: Optional[Dict] = None,
encoding: Optional[str] = None,
):
self.file_path = file_path
self.source_column = source_column
self.encoding = encoding
self.csv_args = csv_args or {}
[docs] def load(self) -> List[Document]:
"""Load data into document objects."""
docs = []
with open(self.file_path, newline="", encoding=self.encoding) as csvfile: | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/csv_loader.html |
a758edb01970-1 | with open(self.file_path, newline="", encoding=self.encoding) as csvfile:
csv_reader = csv.DictReader(csvfile, **self.csv_args) # type: ignore
for i, row in enumerate(csv_reader):
content = "\n".join(f"{k.strip()}: {v.strip()}" for k, v in row.items())
try:
source = (
row[self.source_column]
if self.source_column is not None
else self.file_path
)
except KeyError:
raise ValueError(
f"Source column '{self.source_column}' not found in CSV file."
)
metadata = {"source": source, "row": i}
doc = Document(page_content=content, metadata=metadata)
docs.append(doc)
return docs
[docs]class UnstructuredCSVLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load CSV files."""
def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
validate_unstructured_version(min_unstructured_version="0.6.8")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.csv import partition_csv
return partition_csv(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/csv_loader.html |
17366eba7f1d-0 | Source code for langchain.document_loaders.tomarkdown
"""Loader that loads HTML to markdown using 2markdown."""
from __future__ import annotations
from typing import Iterator, List
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class ToMarkdownLoader(BaseLoader):
"""Loader that loads HTML to markdown using 2markdown."""
def __init__(self, url: str, api_key: str):
"""Initialize with url and api key."""
self.url = url
self.api_key = api_key
[docs] def lazy_load(
self,
) -> Iterator[Document]:
"""Lazily load the file."""
response = requests.post(
"https://2markdown.com/api/2md",
headers={"X-Api-Key": self.api_key},
json={"url": self.url},
)
text = response.json()["article"]
metadata = {"source": self.url}
yield Document(page_content=text, metadata=metadata)
[docs] def load(self) -> List[Document]:
"""Load file."""
return list(self.lazy_load())
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/tomarkdown.html |
c21bda986cb7-0 | Source code for langchain.document_loaders.snowflake_loader
from __future__ import annotations
from typing import Any, Dict, Iterator, List, Optional, Tuple
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class SnowflakeLoader(BaseLoader):
"""Loads a query result from Snowflake into a list of documents.
Each document represents one row of the result. The `page_content_columns`
are written into the `page_content` of the document. The `metadata_columns`
are written into the `metadata` of the document. By default, all columns
are written into the `page_content` and none into the `metadata`.
"""
def __init__(
self,
query: str,
user: str,
password: str,
account: str,
warehouse: str,
role: str,
database: str,
schema: str,
parameters: Optional[Dict[str, Any]] = None,
page_content_columns: Optional[List[str]] = None,
metadata_columns: Optional[List[str]] = None,
):
"""Initialize Snowflake document loader.
Args:
query: The query to run in Snowflake.
user: Snowflake user.
password: Snowflake password.
account: Snowflake account.
warehouse: Snowflake warehouse.
role: Snowflake role.
database: Snowflake database
schema: Snowflake schema
page_content_columns: Optional. Columns written to Document `page_content`.
metadata_columns: Optional. Columns written to Document `metadata`.
"""
self.query = query
self.user = user
self.password = password
self.account = account
self.warehouse = warehouse | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/snowflake_loader.html |
c21bda986cb7-1 | self.password = password
self.account = account
self.warehouse = warehouse
self.role = role
self.database = database
self.schema = schema
self.parameters = parameters
self.page_content_columns = (
page_content_columns if page_content_columns is not None else ["*"]
)
self.metadata_columns = metadata_columns if metadata_columns is not None else []
def _execute_query(self) -> List[Dict[str, Any]]:
try:
import snowflake.connector
except ImportError as ex:
raise ValueError(
"Could not import snowflake-connector-python package. "
"Please install it with `pip install snowflake-connector-python`."
) from ex
conn = snowflake.connector.connect(
user=self.user,
password=self.password,
account=self.account,
warehouse=self.warehouse,
role=self.role,
database=self.database,
schema=self.schema,
parameters=self.parameters,
)
try:
cur = conn.cursor()
cur.execute("USE DATABASE " + self.database)
cur.execute("USE SCHEMA " + self.schema)
cur.execute(self.query, self.parameters)
query_result = cur.fetchall()
column_names = [column[0] for column in cur.description]
query_result = [dict(zip(column_names, row)) for row in query_result]
except Exception as e:
print(f"An error occurred: {e}")
query_result = []
finally:
cur.close()
return query_result
def _get_columns(
self, query_result: List[Dict[str, Any]]
) -> Tuple[List[str], List[str]]:
page_content_columns = ( | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/snowflake_loader.html |
c21bda986cb7-2 | ) -> Tuple[List[str], List[str]]:
page_content_columns = (
self.page_content_columns if self.page_content_columns else []
)
metadata_columns = self.metadata_columns if self.metadata_columns else []
if page_content_columns is None and query_result:
page_content_columns = list(query_result[0].keys())
if metadata_columns is None:
metadata_columns = []
return page_content_columns or [], metadata_columns
[docs] def lazy_load(self) -> Iterator[Document]:
query_result = self._execute_query()
if isinstance(query_result, Exception):
print(f"An error occurred during the query: {query_result}")
return []
page_content_columns, metadata_columns = self._get_columns(query_result)
if "*" in page_content_columns:
page_content_columns = list(query_result[0].keys())
for row in query_result:
page_content = "\n".join(
f"{k}: {v}" for k, v in row.items() if k in page_content_columns
)
metadata = {k: v for k, v in row.items() if k in metadata_columns}
doc = Document(page_content=page_content, metadata=metadata)
yield doc
[docs] def load(self) -> List[Document]:
"""Load data into document objects."""
return list(self.lazy_load())
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/snowflake_loader.html |
3d8c857a005e-0 | Source code for langchain.document_loaders.whatsapp_chat
import re
from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
def concatenate_rows(date: str, sender: str, text: str) -> str:
"""Combine message information in a readable format ready to be used."""
return f"{sender} on {date}: {text}\n\n"
[docs]class WhatsAppChatLoader(BaseLoader):
"""Loader that loads WhatsApp messages text file."""
def __init__(self, path: str):
"""Initialize with path."""
self.file_path = path
[docs] def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.file_path)
text_content = ""
with open(p, encoding="utf8") as f:
lines = f.readlines()
message_line_regex = r"""
\[?
(
\d{1,2}
[\/.]
\d{1,2}
[\/.]
\d{2,4}
,\s
\d{1,2}
:\d{2}
(?:
:\d{2}
)?
(?:[ _](?:AM|PM))?
)
\]?
[\s-]*
([~\w\s]+)
[:]+
\s
(.+)
"""
for line in lines:
result = re.match(message_line_regex, line.strip(), flags=re.VERBOSE)
if result:
date, sender, text = result.groups()
text_content += concatenate_rows(date, sender, text) | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/whatsapp_chat.html |
3d8c857a005e-1 | text_content += concatenate_rows(date, sender, text)
metadata = {"source": str(p)}
return [Document(page_content=text_content, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/whatsapp_chat.html |
3d3b3951834a-0 | Source code for langchain.document_loaders.readthedocs
"""Loader that loads ReadTheDocs documentation directory dump."""
from pathlib import Path
from typing import Any, List, Optional, Tuple, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class ReadTheDocsLoader(BaseLoader):
"""Loader that loads ReadTheDocs documentation directory dump."""
def __init__(
self,
path: Union[str, Path],
encoding: Optional[str] = None,
errors: Optional[str] = None,
custom_html_tag: Optional[Tuple[str, dict]] = None,
**kwargs: Optional[Any]
):
"""
Initialize ReadTheDocsLoader
The loader loops over all files under `path` and extract the actual content of
the files by retrieving main html tags. Default main html tags include
`<main id="main-content>`, <`div role="main>`, and `<article role="main">`. You
can also define your own html tags by passing custom_html_tag, e.g.
`("div", "class=main")`. The loader iterates html tags with the order of
custom html tags (if exists) and default html tags. If any of the tags is not
empty, the loop will break and retrieve the content out of that tag.
Args:
path: The location of pulled readthedocs folder.
encoding: The encoding with which to open the documents.
errors: Specifies how encoding and decoding errors are to be handled—this
cannot be used in binary mode.
custom_html_tag: Optional custom html tag to retrieve the content from
files.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError( | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html |
3d3b3951834a-1 | from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"Could not import python packages. "
"Please install it with `pip install beautifulsoup4`. "
)
try:
_ = BeautifulSoup(
"<html><body>Parser builder library test.</body></html>", **kwargs
)
except Exception as e:
raise ValueError("Parsing kwargs do not appear valid") from e
self.file_path = Path(path)
self.encoding = encoding
self.errors = errors
self.custom_html_tag = custom_html_tag
self.bs_kwargs = kwargs
[docs] def load(self) -> List[Document]:
"""Load documents."""
docs = []
for p in self.file_path.rglob("*"):
if p.is_dir():
continue
with open(p, encoding=self.encoding, errors=self.errors) as f:
text = self._clean_data(f.read())
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
def _clean_data(self, data: str) -> str:
from bs4 import BeautifulSoup
soup = BeautifulSoup(data, **self.bs_kwargs)
# default tags
html_tags = [
("div", {"role": "main"}),
("main", {"id": "main-content"}),
]
if self.custom_html_tag is not None:
html_tags.append(self.custom_html_tag)
text = None
# reversed order. check the custom one first
for tag, attrs in html_tags[::-1]:
text = soup.find(tag, attrs)
# if found, break
if text is not None:
break
if text is not None: | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html |
3d3b3951834a-2 | if text is not None:
break
if text is not None:
text = text.get_text()
else:
text = ""
# trim empty lines
return "\n".join([t for t in text.split("\n") if t])
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html |
93a65e460a62-0 | Source code for langchain.document_loaders.telegram
"""Loader that loads Telegram chat json dump."""
from __future__ import annotations
import asyncio
import json
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
if TYPE_CHECKING:
import pandas as pd
from telethon.hints import EntityLike
def concatenate_rows(row: dict) -> str:
"""Combine message information in a readable format ready to be used."""
date = row["date"]
sender = row["from"]
text = row["text"]
return f"{sender} on {date}: {text}\n\n"
[docs]class TelegramChatFileLoader(BaseLoader):
"""Loader that loads Telegram chat json directory dump."""
def __init__(self, path: str):
"""Initialize with path."""
self.file_path = path
[docs] def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
text = "".join(
concatenate_rows(message)
for message in d["messages"]
if message["type"] == "message" and isinstance(message["text"], str)
)
metadata = {"source": str(p)}
return [Document(page_content=text, metadata=metadata)]
def text_to_docs(text: Union[str, List[str]]) -> List[Document]:
"""Converts a string or list of strings to a list of Documents with metadata."""
if isinstance(text, str):
# Take a single string as one page | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html |
93a65e460a62-1 | if isinstance(text, str):
# Take a single string as one page
text = [text]
page_docs = [Document(page_content=page) for page in text]
# Add page numbers as metadata
for i, doc in enumerate(page_docs):
doc.metadata["page"] = i + 1
# Split pages into chunks
doc_chunks = []
for doc in page_docs:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=800,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=20,
)
chunks = text_splitter.split_text(doc.page_content)
for i, chunk in enumerate(chunks):
doc = Document(
page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i}
)
# Add sources a metadata
doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}"
doc_chunks.append(doc)
return doc_chunks
[docs]class TelegramChatApiLoader(BaseLoader):
"""Loader that loads Telegram chat json directory dump."""
def __init__(
self,
chat_entity: Optional[EntityLike] = None,
api_id: Optional[int] = None,
api_hash: Optional[str] = None,
username: Optional[str] = None,
file_path: str = "telegram_data.json",
):
"""Initialize with API parameters."""
self.chat_entity = chat_entity
self.api_id = api_id
self.api_hash = api_hash
self.username = username
self.file_path = file_path
[docs] async def fetch_data_from_telegram(self) -> None: | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html |
93a65e460a62-2 | [docs] async def fetch_data_from_telegram(self) -> None:
"""Fetch data from Telegram API and save it as a JSON file."""
from telethon.sync import TelegramClient
data = []
async with TelegramClient(self.username, self.api_id, self.api_hash) as client:
async for message in client.iter_messages(self.chat_entity):
is_reply = message.reply_to is not None
reply_to_id = message.reply_to.reply_to_msg_id if is_reply else None
data.append(
{
"sender_id": message.sender_id,
"text": message.text,
"date": message.date.isoformat(),
"message.id": message.id,
"is_reply": is_reply,
"reply_to_id": reply_to_id,
}
)
with open(self.file_path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def _get_message_threads(self, data: pd.DataFrame) -> dict:
"""Create a dictionary of message threads from the given data.
Args:
data (pd.DataFrame): A DataFrame containing the conversation \
data with columns:
- message.sender_id
- text
- date
- message.id
- is_reply
- reply_to_id
Returns:
dict: A dictionary where the key is the parent message ID and \
the value is a list of message IDs in ascending order.
"""
def find_replies(parent_id: int, reply_data: pd.DataFrame) -> List[int]:
"""
Recursively find all replies to a given parent message ID.
Args:
parent_id (int): The parent message ID. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html |
93a65e460a62-3 | Args:
parent_id (int): The parent message ID.
reply_data (pd.DataFrame): A DataFrame containing reply messages.
Returns:
list: A list of message IDs that are replies to the parent message ID.
"""
# Find direct replies to the parent message ID
direct_replies = reply_data[reply_data["reply_to_id"] == parent_id][
"message.id"
].tolist()
# Recursively find replies to the direct replies
all_replies = []
for reply_id in direct_replies:
all_replies += [reply_id] + find_replies(reply_id, reply_data)
return all_replies
# Filter out parent messages
parent_messages = data[~data["is_reply"]]
# Filter out reply messages and drop rows with NaN in 'reply_to_id'
reply_messages = data[data["is_reply"]].dropna(subset=["reply_to_id"])
# Convert 'reply_to_id' to integer
reply_messages["reply_to_id"] = reply_messages["reply_to_id"].astype(int)
# Create a dictionary of message threads with parent message IDs as keys and \
# lists of reply message IDs as values
message_threads = {
parent_id: [parent_id] + find_replies(parent_id, reply_messages)
for parent_id in parent_messages["message.id"]
}
return message_threads
def _combine_message_texts(
self, message_threads: Dict[int, List[int]], data: pd.DataFrame
) -> str:
"""
Combine the message texts for each parent message ID based \
on the list of message threads.
Args:
message_threads (dict): A dictionary where the key is the parent message \ | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html |
93a65e460a62-4 | message_threads (dict): A dictionary where the key is the parent message \
ID and the value is a list of message IDs in ascending order.
data (pd.DataFrame): A DataFrame containing the conversation data:
- message.sender_id
- text
- date
- message.id
- is_reply
- reply_to_id
Returns:
str: A combined string of message texts sorted by date.
"""
combined_text = ""
# Iterate through sorted parent message IDs
for parent_id, message_ids in message_threads.items():
# Get the message texts for the message IDs and sort them by date
message_texts = (
data[data["message.id"].isin(message_ids)]
.sort_values(by="date")["text"]
.tolist()
)
message_texts = [str(elem) for elem in message_texts]
# Combine the message texts
combined_text += " ".join(message_texts) + ".\n"
return combined_text.strip()
[docs] def load(self) -> List[Document]:
"""Load documents."""
if self.chat_entity is not None:
try:
import nest_asyncio
nest_asyncio.apply()
asyncio.run(self.fetch_data_from_telegram())
except ImportError:
raise ImportError(
"""`nest_asyncio` package not found.
please install with `pip install nest_asyncio`
"""
)
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
try:
import pandas as pd
except ImportError:
raise ImportError(
"""`pandas` package not found.
please install with `pip install pandas`
""" | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html |
93a65e460a62-5 | please install with `pip install pandas`
"""
)
normalized_messages = pd.json_normalize(d)
df = pd.DataFrame(normalized_messages)
message_threads = self._get_message_threads(df)
combined_texts = self._combine_message_texts(message_threads, df)
return text_to_docs(combined_texts)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html |
f813bd6f1dda-0 | Source code for langchain.document_loaders.arxiv
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.arxiv import ArxivAPIWrapper
[docs]class ArxivLoader(BaseLoader):
"""Loads a query result from arxiv.org into a list of Documents.
Each document represents one Document.
The loader converts the original PDF format into the text.
"""
def __init__(
self,
query: str,
load_max_docs: Optional[int] = 100,
load_all_available_meta: Optional[bool] = False,
):
self.query = query
self.load_max_docs = load_max_docs
self.load_all_available_meta = load_all_available_meta
[docs] def load(self) -> List[Document]:
arxiv_client = ArxivAPIWrapper(
load_max_docs=self.load_max_docs,
load_all_available_meta=self.load_all_available_meta,
)
docs = arxiv_client.load(self.query)
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/arxiv.html |
b31758952c65-0 | Source code for langchain.document_loaders.trello
"""Loader that loads cards from Trello"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Literal, Optional, Tuple
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import get_from_env
if TYPE_CHECKING:
from trello import Board, Card, TrelloClient
[docs]class TrelloLoader(BaseLoader):
"""Trello loader. Reads all cards from a Trello board."""
def __init__(
self,
client: TrelloClient,
board_name: str,
*,
include_card_name: bool = True,
include_comments: bool = True,
include_checklist: bool = True,
card_filter: Literal["closed", "open", "all"] = "all",
extra_metadata: Tuple[str, ...] = ("due_date", "labels", "list", "closed"),
):
"""Initialize Trello loader.
Args:
client: Trello API client.
board_name: The name of the Trello board.
include_card_name: Whether to include the name of the card in the document.
include_comments: Whether to include the comments on the card in the
document.
include_checklist: Whether to include the checklist on the card in the
document.
card_filter: Filter on card status. Valid values are "closed", "open",
"all".
extra_metadata: List of additional metadata fields to include as document
metadata.Valid values are "due_date", "labels", "list", "closed".
"""
self.client = client
self.board_name = board_name
self.include_card_name = include_card_name | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html |
b31758952c65-1 | self.board_name = board_name
self.include_card_name = include_card_name
self.include_comments = include_comments
self.include_checklist = include_checklist
self.extra_metadata = extra_metadata
self.card_filter = card_filter
[docs] @classmethod
def from_credentials(
cls,
board_name: str,
*,
api_key: Optional[str] = None,
token: Optional[str] = None,
**kwargs: Any,
) -> TrelloLoader:
"""Convenience constructor that builds TrelloClient init param for you.
Args:
board_name: The name of the Trello board.
api_key: Trello API key. Can also be specified as environment variable
TRELLO_API_KEY.
token: Trello token. Can also be specified as environment variable
TRELLO_TOKEN.
include_card_name: Whether to include the name of the card in the document.
include_comments: Whether to include the comments on the card in the
document.
include_checklist: Whether to include the checklist on the card in the
document.
card_filter: Filter on card status. Valid values are "closed", "open",
"all".
extra_metadata: List of additional metadata fields to include as document
metadata.Valid values are "due_date", "labels", "list", "closed".
"""
try:
from trello import TrelloClient # type: ignore
except ImportError as ex:
raise ImportError(
"Could not import trello python package. "
"Please install it with `pip install py-trello`."
) from ex
api_key = api_key or get_from_env("api_key", "TRELLO_API_KEY") | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html |
b31758952c65-2 | token = token or get_from_env("token", "TRELLO_TOKEN")
client = TrelloClient(api_key=api_key, token=token)
return cls(client, board_name, **kwargs)
[docs] def load(self) -> List[Document]:
"""Loads all cards from the specified Trello board.
You can filter the cards, metadata and text included by using the optional
parameters.
Returns:
A list of documents, one for each card in the board.
"""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError as ex:
raise ImportError(
"`beautifulsoup4` package not found, please run"
" `pip install beautifulsoup4`"
) from ex
board = self._get_board()
# Create a dictionary with the list IDs as keys and the list names as values
list_dict = {list_item.id: list_item.name for list_item in board.list_lists()}
# Get Cards on the board
cards = board.get_cards(card_filter=self.card_filter)
return [self._card_to_doc(card, list_dict) for card in cards]
def _get_board(self) -> Board:
# Find the first board with a matching name
board = next(
(b for b in self.client.list_boards() if b.name == self.board_name), None
)
if not board:
raise ValueError(f"Board `{self.board_name}` not found.")
return board
def _card_to_doc(self, card: Card, list_dict: dict) -> Document:
from bs4 import BeautifulSoup # type: ignore
text_content = ""
if self.include_card_name:
text_content = card.name + "\n" | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html |
b31758952c65-3 | if self.include_card_name:
text_content = card.name + "\n"
if card.description.strip():
text_content += BeautifulSoup(card.description, "lxml").get_text()
if self.include_checklist:
# Get all the checklist items on the card
for checklist in card.checklists:
if checklist.items:
items = [
f"{item['name']}:{item['state']}" for item in checklist.items
]
text_content += f"\n{checklist.name}\n" + "\n".join(items)
if self.include_comments:
# Get all the comments on the card
comments = [
BeautifulSoup(comment["data"]["text"], "lxml").get_text()
for comment in card.comments
]
text_content += "Comments:" + "\n".join(comments)
# Default metadata fields
metadata = {
"title": card.name,
"id": card.id,
"url": card.url,
}
# Extra metadata fields. Card object is not subscriptable.
if "labels" in self.extra_metadata:
metadata["labels"] = [label.name for label in card.labels]
if "list" in self.extra_metadata:
if card.list_id in list_dict:
metadata["list"] = list_dict[card.list_id]
if "closed" in self.extra_metadata:
metadata["closed"] = card.closed
if "due_date" in self.extra_metadata:
metadata["due_date"] = card.due_date
return Document(page_content=text_content, metadata=metadata)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html |
8f1a44b8eada-0 | Source code for langchain.document_loaders.bigquery
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
from google.auth.credentials import Credentials
[docs]class BigQueryLoader(BaseLoader):
"""Loads a query result from BigQuery into a list of documents.
Each document represents one row of the result. The `page_content_columns`
are written into the `page_content` of the document. The `metadata_columns`
are written into the `metadata` of the document. By default, all columns
are written into the `page_content` and none into the `metadata`.
"""
def __init__(
self,
query: str,
project: Optional[str] = None,
page_content_columns: Optional[List[str]] = None,
metadata_columns: Optional[List[str]] = None,
credentials: Optional[Credentials] = None,
):
"""Initialize BigQuery document loader.
Args:
query: The query to run in BigQuery.
project: Optional. The project to run the query in.
page_content_columns: Optional. The columns to write into the `page_content`
of the document.
metadata_columns: Optional. The columns to write into the `metadata` of the
document.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
(`google.auth.compute_engine.Credentials`) or Service Account
(`google.oauth2.service_account.Credentials`) credentials directly.
"""
self.query = query
self.project = project
self.page_content_columns = page_content_columns | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/bigquery.html |
8f1a44b8eada-1 | self.project = project
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
self.credentials = credentials
[docs] def load(self) -> List[Document]:
try:
from google.cloud import bigquery
except ImportError as ex:
raise ValueError(
"Could not import google-cloud-bigquery python package. "
"Please install it with `pip install google-cloud-bigquery`."
) from ex
bq_client = bigquery.Client(credentials=self.credentials, project=self.project)
query_result = bq_client.query(self.query).result()
docs: List[Document] = []
page_content_columns = self.page_content_columns
metadata_columns = self.metadata_columns
if page_content_columns is None:
page_content_columns = [column.name for column in query_result.schema]
if metadata_columns is None:
metadata_columns = []
for row in query_result:
page_content = "\n".join(
f"{k}: {v}" for k, v in row.items() if k in page_content_columns
)
metadata = {k: v for k, v in row.items() if k in metadata_columns}
doc = Document(page_content=page_content, metadata=metadata)
docs.append(doc)
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/bigquery.html |
4ae9ecbe54a4-0 | Source code for langchain.document_loaders.excel
"""Loader that loads Microsoft Excel files."""
from typing import Any, List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
[docs]class UnstructuredExcelLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load Microsoft Excel files."""
def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
validate_unstructured_version(min_unstructured_version="0.6.7")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.xlsx import partition_xlsx
return partition_xlsx(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/excel.html |
97fdf384f11b-0 | Source code for langchain.document_loaders.html_bs
"""Loader that uses bs4 to load HTML files, enriching metadata with page title."""
import logging
from typing import Dict, List, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class BSHTMLLoader(BaseLoader):
"""Loader that uses beautiful soup to parse HTML files."""
def __init__(
self,
file_path: str,
open_encoding: Union[str, None] = None,
bs_kwargs: Union[dict, None] = None,
get_text_separator: str = "",
) -> None:
"""Initialise with path, and optionally, file encoding to use, and any kwargs
to pass to the BeautifulSoup object."""
try:
import bs4 # noqa:F401
except ImportError:
raise ValueError(
"beautifulsoup4 package not found, please install it with "
"`pip install beautifulsoup4`"
)
self.file_path = file_path
self.open_encoding = open_encoding
if bs_kwargs is None:
bs_kwargs = {"features": "lxml"}
self.bs_kwargs = bs_kwargs
self.get_text_separator = get_text_separator
[docs] def load(self) -> List[Document]:
from bs4 import BeautifulSoup
"""Load HTML document into document objects."""
with open(self.file_path, "r", encoding=self.open_encoding) as f:
soup = BeautifulSoup(f, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ""
metadata: Dict[str, Union[str, None]] = { | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/html_bs.html |
97fdf384f11b-1 | title = ""
metadata: Dict[str, Union[str, None]] = {
"source": self.file_path,
"title": title,
}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/html_bs.html |
18c10dc308e8-0 | Source code for langchain.document_loaders.bibtex
import logging
import re
from pathlib import Path
from typing import Any, Iterator, List, Mapping, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.bibtex import BibtexparserWrapper
logger = logging.getLogger(__name__)
[docs]class BibtexLoader(BaseLoader):
"""Loads a bibtex file into a list of Documents.
Each document represents one entry from the bibtex file.
If a PDF file is present in the `file` bibtex field, the original PDF
is loaded into the document text. If no such file entry is present,
the `abstract` field is used instead.
"""
def __init__(
self,
file_path: str,
*,
parser: Optional[BibtexparserWrapper] = None,
max_docs: Optional[int] = None,
max_content_chars: Optional[int] = 4_000,
load_extra_metadata: bool = False,
file_pattern: str = r"[^:]+\.pdf",
):
"""Initialize the BibtexLoader.
Args:
file_path: Path to the bibtex file.
max_docs: Max number of associated documents to load. Use -1 means
no limit.
"""
self.file_path = file_path
self.parser = parser or BibtexparserWrapper()
self.max_docs = max_docs
self.max_content_chars = max_content_chars
self.load_extra_metadata = load_extra_metadata
self.file_regex = re.compile(file_pattern)
def _load_entry(self, entry: Mapping[str, Any]) -> Optional[Document]:
import fitz | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/bibtex.html |
18c10dc308e8-1 | import fitz
parent_dir = Path(self.file_path).parent
# regex is useful for Zotero flavor bibtex files
file_names = self.file_regex.findall(entry.get("file", ""))
if not file_names:
return None
texts: List[str] = []
for file_name in file_names:
try:
with fitz.open(parent_dir / file_name) as f:
texts.extend(page.get_text() for page in f)
except FileNotFoundError as e:
logger.debug(e)
content = "\n".join(texts) or entry.get("abstract", "")
if self.max_content_chars:
content = content[: self.max_content_chars]
metadata = self.parser.get_metadata(entry, load_extra=self.load_extra_metadata)
return Document(
page_content=content,
metadata=metadata,
)
[docs] def lazy_load(self) -> Iterator[Document]:
"""Load bibtex file using bibtexparser and get the article texts plus the
article metadata.
See https://bibtexparser.readthedocs.io/en/master/
Returns:
a list of documents with the document.page_content in text format
"""
try:
import fitz # noqa: F401
except ImportError:
raise ImportError(
"PyMuPDF package not found, please install it with "
"`pip install pymupdf`"
)
entries = self.parser.load_bibtex_entries(self.file_path)
if self.max_docs:
entries = entries[: self.max_docs]
for entry in entries:
doc = self._load_entry(entry)
if doc:
yield doc
[docs] def load(self) -> List[Document]: | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/bibtex.html |
18c10dc308e8-2 | yield doc
[docs] def load(self) -> List[Document]:
"""Load bibtex file documents from the given bibtex file path.
See https://bibtexparser.readthedocs.io/en/master/
Args:
file_path: the path to the bibtex file
Returns:
a list of documents with the document.page_content in text format
"""
return list(self.lazy_load())
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/document_loaders/bibtex.html |
13e625cde2e9-0 | Source code for langchain.tools.plugin
from __future__ import annotations
import json
from typing import Optional, Type
import requests
import yaml
from pydantic import BaseModel
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.base import BaseTool
class ApiConfig(BaseModel):
type: str
url: str
has_user_authentication: Optional[bool] = False
class AIPlugin(BaseModel):
"""AI Plugin Definition."""
schema_version: str
name_for_model: str
name_for_human: str
description_for_model: str
description_for_human: str
auth: Optional[dict] = None
api: ApiConfig
logo_url: Optional[str]
contact_email: Optional[str]
legal_info_url: Optional[str]
@classmethod
def from_url(cls, url: str) -> AIPlugin:
"""Instantiate AIPlugin from a URL."""
response = requests.get(url).json()
return cls(**response)
def marshal_spec(txt: str) -> dict:
"""Convert the yaml or json serialized spec to a dict."""
try:
return json.loads(txt)
except json.JSONDecodeError:
return yaml.safe_load(txt)
class AIPluginToolSchema(BaseModel):
"""AIPLuginToolSchema."""
tool_input: Optional[str] = ""
[docs]class AIPluginTool(BaseTool):
plugin: AIPlugin
api_spec: str
args_schema: Type[AIPluginToolSchema] = AIPluginToolSchema
[docs] @classmethod
def from_plugin_url(cls, url: str) -> AIPluginTool:
plugin = AIPlugin.from_url(url)
description = ( | https://python.langchain.com/en/latest/_modules/langchain/tools/plugin.html |
13e625cde2e9-1 | plugin = AIPlugin.from_url(url)
description = (
f"Call this tool to get the OpenAPI spec (and usage guide) "
f"for interacting with the {plugin.name_for_human} API. "
f"You should only call this ONCE! What is the "
f"{plugin.name_for_human} API useful for? "
) + plugin.description_for_human
open_api_spec_str = requests.get(plugin.api.url).text
open_api_spec = marshal_spec(open_api_spec_str)
api_spec = (
f"Usage Guide: {plugin.description_for_model}\n\n"
f"OpenAPI Spec: {open_api_spec}"
)
return cls(
name=plugin.name_for_model,
description=description,
plugin=plugin,
api_spec=api_spec,
)
def _run(
self,
tool_input: Optional[str] = "",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_spec
async def _arun(
self,
tool_input: Optional[str] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return self.api_spec
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/tools/plugin.html |
f52ef237b5c8-0 | Source code for langchain.tools.ifttt
"""From https://github.com/SidU/teams-langchain-js/wiki/Connecting-IFTTT-Services.
# Creating a webhook
- Go to https://ifttt.com/create
# Configuring the "If This"
- Click on the "If This" button in the IFTTT interface.
- Search for "Webhooks" in the search bar.
- Choose the first option for "Receive a web request with a JSON payload."
- Choose an Event Name that is specific to the service you plan to connect to.
This will make it easier for you to manage the webhook URL.
For example, if you're connecting to Spotify, you could use "Spotify" as your
Event Name.
- Click the "Create Trigger" button to save your settings and create your webhook.
# Configuring the "Then That"
- Tap on the "Then That" button in the IFTTT interface.
- Search for the service you want to connect, such as Spotify.
- Choose an action from the service, such as "Add track to a playlist".
- Configure the action by specifying the necessary details, such as the playlist name,
e.g., "Songs from AI".
- Reference the JSON Payload received by the Webhook in your action. For the Spotify
scenario, choose "{{JsonPayload}}" as your search query.
- Tap the "Create Action" button to save your action settings.
- Once you have finished configuring your action, click the "Finish" button to
complete the setup.
- Congratulations! You have successfully connected the Webhook to the desired
service, and you're ready to start receiving data and triggering actions 🎉
# Finishing up
- To get your webhook URL go to https://ifttt.com/maker_webhooks/settings | https://python.langchain.com/en/latest/_modules/langchain/tools/ifttt.html |
f52ef237b5c8-1 | - To get your webhook URL go to https://ifttt.com/maker_webhooks/settings
- Copy the IFTTT key value from there. The URL is of the form
https://maker.ifttt.com/use/YOUR_IFTTT_KEY. Grab the YOUR_IFTTT_KEY value.
"""
from typing import Optional
import requests
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.base import BaseTool
[docs]class IFTTTWebhook(BaseTool):
"""IFTTT Webhook.
Args:
name: name of the tool
description: description of the tool
url: url to hit with the json event.
"""
url: str
def _run(
self,
tool_input: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
body = {"this": tool_input}
response = requests.post(self.url, data=body)
return response.text
async def _arun(
self,
tool_input: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
raise NotImplementedError("Not implemented.")
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/tools/ifttt.html |
46860caba4ff-0 | Source code for langchain.tools.base
"""Base implementation for tools or skills."""
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, Optional, Tuple, Type, Union
from pydantic import (
BaseModel,
Extra,
Field,
create_model,
root_validator,
validate_arguments,
)
from pydantic.main import ModelMetaclass
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForToolRun,
CallbackManager,
CallbackManagerForToolRun,
Callbacks,
)
class SchemaAnnotationError(TypeError):
"""Raised when 'args_schema' is missing or has an incorrect type annotation."""
class ToolMetaclass(ModelMetaclass):
"""Metaclass for BaseTool to ensure the provided args_schema
doesn't silently ignored."""
def __new__(
cls: Type[ToolMetaclass], name: str, bases: Tuple[Type, ...], dct: dict
) -> ToolMetaclass:
"""Create the definition of the new tool class."""
schema_type: Optional[Type[BaseModel]] = dct.get("args_schema")
if schema_type is not None:
schema_annotations = dct.get("__annotations__", {})
args_schema_type = schema_annotations.get("args_schema", None)
if args_schema_type is None or args_schema_type == BaseModel:
# Throw errors for common mis-annotations.
# TODO: Use get_args / get_origin and fully
# specify valid annotations.
typehint_mandate = """
class ChildTool(BaseTool):
...
args_schema: Type[BaseModel] = SchemaClass
...""" | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
46860caba4ff-1 | ...
args_schema: Type[BaseModel] = SchemaClass
..."""
raise SchemaAnnotationError(
f"Tool definition for {name} must include valid type annotations"
f" for argument 'args_schema' to behave as expected.\n"
f"Expected annotation of 'Type[BaseModel]'"
f" but got '{args_schema_type}'.\n"
f"Expected class looks like:\n"
f"{typehint_mandate}"
)
# Pass through to Pydantic's metaclass
return super().__new__(cls, name, bases, dct)
def _create_subset_model(
name: str, model: BaseModel, field_names: list
) -> Type[BaseModel]:
"""Create a pydantic model with only a subset of model's fields."""
fields = {
field_name: (
model.__fields__[field_name].type_,
model.__fields__[field_name].default,
)
for field_name in field_names
if field_name in model.__fields__
}
return create_model(name, **fields) # type: ignore
def get_filtered_args(
inferred_model: Type[BaseModel],
func: Callable,
) -> dict:
"""Get the arguments from a function's signature."""
schema = inferred_model.schema()["properties"]
valid_keys = signature(func).parameters
return {k: schema[k] for k in valid_keys if k != "run_manager"}
class _SchemaConfig:
"""Configuration for the pydantic model."""
extra = Extra.forbid
arbitrary_types_allowed = True
def create_schema_from_function(
model_name: str,
func: Callable,
) -> Type[BaseModel]: | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
46860caba4ff-2 | func: Callable,
) -> Type[BaseModel]:
"""Create a pydantic schema from a function's signature."""
validated = validate_arguments(func, config=_SchemaConfig) # type: ignore
inferred_model = validated.model # type: ignore
if "run_manager" in inferred_model.__fields__:
del inferred_model.__fields__["run_manager"]
# Pydantic adds placeholder virtual fields we need to strip
filtered_args = get_filtered_args(inferred_model, func)
return _create_subset_model(
f"{model_name}Schema", inferred_model, list(filtered_args)
)
class ToolException(Exception):
"""An optional exception that tool throws when execution error occurs.
When this exception is thrown, the agent will not stop working,
but will handle the exception according to the handle_tool_error
variable of the tool, and the processing result will be returned
to the agent as observation, and printed in red on the console.
"""
pass
[docs]class BaseTool(ABC, BaseModel, metaclass=ToolMetaclass):
"""Interface LangChain tools must implement."""
name: str
"""The unique name of the tool that clearly communicates its purpose."""
description: str
"""Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
"""
args_schema: Optional[Type[BaseModel]] = None
"""Pydantic model class to validate and parse the tool's input arguments."""
return_direct: bool = False
"""Whether to return the tool's output directly. Setting this to True means
that after the tool is called, the AgentExecutor will stop looping.
"""
verbose: bool = False | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
46860caba4ff-3 | """
verbose: bool = False
"""Whether to log the tool's progress."""
callbacks: Callbacks = Field(default=None, exclude=True)
"""Callbacks to be called during tool execution."""
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
"""Deprecated. Please use callbacks instead."""
handle_tool_error: Optional[
Union[bool, str, Callable[[ToolException], str]]
] = False
"""Handle the content of the ToolException thrown."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def is_single_input(self) -> bool:
"""Whether the tool only accepts a single input."""
keys = {k for k in self.args if k != "kwargs"}
return len(keys) == 1
@property
def args(self) -> dict:
if self.args_schema is not None:
return self.args_schema.schema()["properties"]
else:
schema = create_schema_from_function(self.name, self._run)
return schema.schema()["properties"]
def _parse_input(
self,
tool_input: Union[str, Dict],
) -> Union[str, Dict[str, Any]]:
"""Convert tool input to pydantic model."""
input_args = self.args_schema
if isinstance(tool_input, str):
if input_args is not None:
key_ = next(iter(input_args.__fields__.keys()))
input_args.validate({key_: tool_input})
return tool_input
else:
if input_args is not None:
result = input_args.parse_obj(tool_input) | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
46860caba4ff-4 | if input_args is not None:
result = input_args.parse_obj(tool_input)
return {k: v for k, v in result.dict().items() if k in tool_input}
return tool_input
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@abstractmethod
def _run(
self,
*args: Any,
**kwargs: Any,
) -> Any:
"""Use the tool.
Add run_manager: Optional[CallbackManagerForToolRun] = None
to child implementations to enable tracing,
"""
@abstractmethod
async def _arun(
self,
*args: Any,
**kwargs: Any,
) -> Any:
"""Use the tool asynchronously.
Add run_manager: Optional[AsyncCallbackManagerForToolRun] = None
to child implementations to enable tracing,
"""
def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]:
# For backwards compatibility, if run_input is a string,
# pass as a positional argument.
if isinstance(tool_input, str):
return (tool_input,), {}
else:
return (), tool_input
[docs] def run(
self,
tool_input: Union[str, Dict],
verbose: Optional[bool] = None, | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
46860caba4ff-5 | verbose: Optional[bool] = None,
start_color: Optional[str] = "green",
color: Optional[str] = "green",
callbacks: Callbacks = None,
**kwargs: Any,
) -> Any:
"""Run the tool."""
parsed_input = self._parse_input(tool_input)
if not self.verbose and verbose is not None:
verbose_ = verbose
else:
verbose_ = self.verbose
callback_manager = CallbackManager.configure(
callbacks, self.callbacks, verbose=verbose_
)
# TODO: maybe also pass through run_manager is _run supports kwargs
new_arg_supported = signature(self._run).parameters.get("run_manager")
run_manager = callback_manager.on_tool_start(
{"name": self.name, "description": self.description},
tool_input if isinstance(tool_input, str) else str(tool_input),
color=start_color,
**kwargs,
)
try:
tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)
observation = (
self._run(*tool_args, run_manager=run_manager, **tool_kwargs)
if new_arg_supported
else self._run(*tool_args, **tool_kwargs)
)
except ToolException as e:
if not self.handle_tool_error:
run_manager.on_tool_error(e)
raise e
elif isinstance(self.handle_tool_error, bool):
if e.args:
observation = e.args[0]
else:
observation = "Tool execution error"
elif isinstance(self.handle_tool_error, str):
observation = self.handle_tool_error
elif callable(self.handle_tool_error):
observation = self.handle_tool_error(e)
else:
raise ValueError( | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
46860caba4ff-6 | observation = self.handle_tool_error(e)
else:
raise ValueError(
f"Got unexpected type of `handle_tool_error`. Expected bool, str "
f"or callable. Received: {self.handle_tool_error}"
)
run_manager.on_tool_end(
str(observation), color="red", name=self.name, **kwargs
)
return observation
except (Exception, KeyboardInterrupt) as e:
run_manager.on_tool_error(e)
raise e
else:
run_manager.on_tool_end(
str(observation), color=color, name=self.name, **kwargs
)
return observation
[docs] async def arun(
self,
tool_input: Union[str, Dict],
verbose: Optional[bool] = None,
start_color: Optional[str] = "green",
color: Optional[str] = "green",
callbacks: Callbacks = None,
**kwargs: Any,
) -> Any:
"""Run the tool asynchronously."""
parsed_input = self._parse_input(tool_input)
if not self.verbose and verbose is not None:
verbose_ = verbose
else:
verbose_ = self.verbose
callback_manager = AsyncCallbackManager.configure(
callbacks, self.callbacks, verbose=verbose_
)
new_arg_supported = signature(self._arun).parameters.get("run_manager")
run_manager = await callback_manager.on_tool_start(
{"name": self.name, "description": self.description},
tool_input if isinstance(tool_input, str) else str(tool_input),
color=start_color,
**kwargs,
)
try:
# We then call the tool on the tool input to get an observation | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
46860caba4ff-7 | try:
# We then call the tool on the tool input to get an observation
tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)
observation = (
await self._arun(*tool_args, run_manager=run_manager, **tool_kwargs)
if new_arg_supported
else await self._arun(*tool_args, **tool_kwargs)
)
except ToolException as e:
if not self.handle_tool_error:
await run_manager.on_tool_error(e)
raise e
elif isinstance(self.handle_tool_error, bool):
if e.args:
observation = e.args[0]
else:
observation = "Tool execution error"
elif isinstance(self.handle_tool_error, str):
observation = self.handle_tool_error
elif callable(self.handle_tool_error):
observation = self.handle_tool_error(e)
else:
raise ValueError(
f"Got unexpected type of `handle_tool_error`. Expected bool, str "
f"or callable. Received: {self.handle_tool_error}"
)
await run_manager.on_tool_end(
str(observation), color="red", name=self.name, **kwargs
)
return observation
except (Exception, KeyboardInterrupt) as e:
await run_manager.on_tool_error(e)
raise e
else:
await run_manager.on_tool_end(
str(observation), color=color, name=self.name, **kwargs
)
return observation
def __call__(self, tool_input: str, callbacks: Callbacks = None) -> str:
"""Make tool callable."""
return self.run(tool_input, callbacks=callbacks)
[docs]class Tool(BaseTool):
"""Tool that takes in function or coroutine directly.""" | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
46860caba4ff-8 | """Tool that takes in function or coroutine directly."""
description: str = ""
func: Callable[..., str]
"""The function to run when the tool is called."""
coroutine: Optional[Callable[..., Awaitable[str]]] = None
"""The asynchronous version of the function."""
@property
def args(self) -> dict:
"""The tool's input arguments."""
if self.args_schema is not None:
return self.args_schema.schema()["properties"]
# For backwards compatibility, if the function signature is ambiguous,
# assume it takes a single string input.
return {"tool_input": {"type": "string"}}
def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]:
"""Convert tool input to pydantic model."""
args, kwargs = super()._to_args_and_kwargs(tool_input)
# For backwards compatibility. The tool must be run with a single input
all_args = list(args) + list(kwargs.values())
if len(all_args) != 1:
raise ValueError(
f"Too many arguments to single-input tool {self.name}."
f" Args: {all_args}"
)
return tuple(all_args), {}
def _run(
self,
*args: Any,
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> Any:
"""Use the tool."""
new_argument_supported = signature(self.func).parameters.get("callbacks")
return (
self.func(
*args,
callbacks=run_manager.get_child() if run_manager else None,
**kwargs,
)
if new_argument_supported | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
46860caba4ff-9 | **kwargs,
)
if new_argument_supported
else self.func(*args, **kwargs)
)
async def _arun(
self,
*args: Any,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
**kwargs: Any,
) -> Any:
"""Use the tool asynchronously."""
if self.coroutine:
new_argument_supported = signature(self.coroutine).parameters.get(
"callbacks"
)
return (
await self.coroutine(
*args,
callbacks=run_manager.get_child() if run_manager else None,
**kwargs,
)
if new_argument_supported
else await self.coroutine(*args, **kwargs)
)
raise NotImplementedError("Tool does not support async")
# TODO: this is for backwards compatibility, remove in future
def __init__(
self, name: str, func: Callable, description: str, **kwargs: Any
) -> None:
"""Initialize tool."""
super(Tool, self).__init__(
name=name, func=func, description=description, **kwargs
)
[docs] @classmethod
def from_function(
cls,
func: Callable,
name: str, # We keep these required to support backwards compatibility
description: str,
return_direct: bool = False,
args_schema: Optional[Type[BaseModel]] = None,
**kwargs: Any,
) -> Tool:
"""Initialize tool from a function."""
return cls(
name=name,
func=func,
description=description,
return_direct=return_direct,
args_schema=args_schema,
**kwargs, | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
46860caba4ff-10 | return_direct=return_direct,
args_schema=args_schema,
**kwargs,
)
[docs]class StructuredTool(BaseTool):
"""Tool that can operate on any number of inputs."""
description: str = ""
args_schema: Type[BaseModel] = Field(..., description="The tool schema.")
"""The input arguments' schema."""
func: Callable[..., Any]
"""The function to run when the tool is called."""
coroutine: Optional[Callable[..., Awaitable[Any]]] = None
"""The asynchronous version of the function."""
@property
def args(self) -> dict:
"""The tool's input arguments."""
return self.args_schema.schema()["properties"]
def _run(
self,
*args: Any,
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> Any:
"""Use the tool."""
new_argument_supported = signature(self.func).parameters.get("callbacks")
return (
self.func(
*args,
callbacks=run_manager.get_child() if run_manager else None,
**kwargs,
)
if new_argument_supported
else self.func(*args, **kwargs)
)
async def _arun(
self,
*args: Any,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Use the tool asynchronously."""
if self.coroutine:
new_argument_supported = signature(self.coroutine).parameters.get(
"callbacks"
)
return (
await self.coroutine(
*args, | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
46860caba4ff-11 | )
return (
await self.coroutine(
*args,
callbacks=run_manager.get_child() if run_manager else None,
**kwargs,
)
if new_argument_supported
else await self.coroutine(*args, **kwargs)
)
raise NotImplementedError("Tool does not support async")
[docs] @classmethod
def from_function(
cls,
func: Callable,
name: Optional[str] = None,
description: Optional[str] = None,
return_direct: bool = False,
args_schema: Optional[Type[BaseModel]] = None,
infer_schema: bool = True,
**kwargs: Any,
) -> StructuredTool:
name = name or func.__name__
description = description or func.__doc__
assert (
description is not None
), "Function must have a docstring if description not provided."
# Description example:
# search_api(query: str) - Searches the API for the query.
description = f"{name}{signature(func)} - {description.strip()}"
_args_schema = args_schema
if _args_schema is None and infer_schema:
_args_schema = create_schema_from_function(f"{name}Schema", func)
return cls(
name=name,
func=func,
args_schema=_args_schema,
description=description,
return_direct=return_direct,
**kwargs,
)
[docs]def tool(
*args: Union[str, Callable],
return_direct: bool = False,
args_schema: Optional[Type[BaseModel]] = None,
infer_schema: bool = True,
) -> Callable: | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
46860caba4ff-12 | infer_schema: bool = True,
) -> Callable:
"""Make tools out of functions, can be used with or without arguments.
Args:
*args: The arguments to the tool.
return_direct: Whether to return directly from the tool rather
than continuing the agent loop.
args_schema: optional argument schema for user to specify
infer_schema: Whether to infer the schema of the arguments from
the function's signature. This also makes the resultant tool
accept a dictionary input to its `run()` function.
Requires:
- Function must be of type (str) -> str
- Function must have a docstring
Examples:
.. code-block:: python
@tool
def search_api(query: str) -> str:
# Searches the API for the query.
return
@tool("search", return_direct=True)
def search_api(query: str) -> str:
# Searches the API for the query.
return
"""
def _make_with_name(tool_name: str) -> Callable:
def _make_tool(func: Callable) -> BaseTool:
if infer_schema or args_schema is not None:
return StructuredTool.from_function(
func,
name=tool_name,
return_direct=return_direct,
args_schema=args_schema,
infer_schema=infer_schema,
)
# If someone doesn't want a schema applied, we must treat it as
# a simple string->string function
assert func.__doc__ is not None, "Function must have a docstring"
return Tool(
name=tool_name,
func=func,
description=f"{tool_name} tool",
return_direct=return_direct,
) | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
46860caba4ff-13 | return_direct=return_direct,
)
return _make_tool
if len(args) == 1 and isinstance(args[0], str):
# if the argument is a string, then we use the string as the tool name
# Example usage: @tool("search", return_direct=True)
return _make_with_name(args[0])
elif len(args) == 1 and callable(args[0]):
# if the argument is a function, then we use the function name as the tool name
# Example usage: @tool
return _make_with_name(args[0].__name__)(args[0])
elif len(args) == 0:
# if there are no arguments, then we use the function name as the tool name
# Example usage: @tool(return_direct=True)
def _partial(func: Callable[[str], str]) -> BaseTool:
return _make_with_name(func.__name__)(func)
return _partial
else:
raise ValueError("Too many arguments for tool decorator")
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/tools/base.html |
dcbc2da06604-0 | Source code for langchain.tools.playwright.click
from __future__ import annotations
from typing import Optional, Type
from pydantic import BaseModel, Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class ClickToolInput(BaseModel):
"""Input for ClickTool."""
selector: str = Field(..., description="CSS selector for the element to click")
[docs]class ClickTool(BaseBrowserTool):
name: str = "click_element"
description: str = "Click on an element with the given CSS selector"
args_schema: Type[BaseModel] = ClickToolInput
visible_only: bool = True
"""Whether to consider only visible elements."""
playwright_strict: bool = False
"""Whether to employ Playwright's strict mode when clicking on elements."""
playwright_timeout: float = 1_000
"""Timeout (in ms) for Playwright to wait for element to be ready."""
def _selector_effective(self, selector: str) -> str:
if not self.visible_only:
return selector
return f"{selector} >> visible=1"
def _run(
self,
selector: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
# Navigate to the desired webpage before using this tool | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/click.html |
dcbc2da06604-1 | # Navigate to the desired webpage before using this tool
selector_effective = self._selector_effective(selector=selector)
from playwright.sync_api import TimeoutError as PlaywrightTimeoutError
try:
page.click(
selector_effective,
strict=self.playwright_strict,
timeout=self.playwright_timeout,
)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
return f"Clicked element '{selector}'"
async def _arun(
self,
selector: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
# Navigate to the desired webpage before using this tool
selector_effective = self._selector_effective(selector=selector)
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
try:
await page.click(
selector_effective,
strict=self.playwright_strict,
timeout=self.playwright_timeout,
)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
return f"Clicked element '{selector}'"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/click.html |
61af0d0587da-0 | Source code for langchain.tools.playwright.get_elements
from __future__ import annotations
import json
from typing import TYPE_CHECKING, List, Optional, Sequence, Type
from pydantic import BaseModel, Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import aget_current_page, get_current_page
if TYPE_CHECKING:
from playwright.async_api import Page as AsyncPage
from playwright.sync_api import Page as SyncPage
class GetElementsToolInput(BaseModel):
"""Input for GetElementsTool."""
selector: str = Field(
...,
description="CSS selector, such as '*', 'div', 'p', 'a', #id, .classname",
)
attributes: List[str] = Field(
default_factory=lambda: ["innerText"],
description="Set of attributes to retrieve for each element",
)
async def _aget_elements(
page: AsyncPage, selector: str, attributes: Sequence[str]
) -> List[dict]:
"""Get elements matching the given CSS selector."""
elements = await page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == "innerText":
val: Optional[str] = await element.inner_text()
else:
val = await element.get_attribute(attribute)
if val is not None and val.strip() != "":
result[attribute] = val
if result:
results.append(result)
return results
def _get_elements(
page: SyncPage, selector: str, attributes: Sequence[str]
) -> List[dict]: | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/get_elements.html |
61af0d0587da-1 | ) -> List[dict]:
"""Get elements matching the given CSS selector."""
elements = page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == "innerText":
val: Optional[str] = element.inner_text()
else:
val = element.get_attribute(attribute)
if val is not None and val.strip() != "":
result[attribute] = val
if result:
results.append(result)
return results
[docs]class GetElementsTool(BaseBrowserTool):
name: str = "get_elements"
description: str = (
"Retrieve elements in the current web page matching the given CSS selector"
)
args_schema: Type[BaseModel] = GetElementsToolInput
def _run(
self,
selector: str,
attributes: Sequence[str] = ["innerText"],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
# Navigate to the desired webpage before using this tool
results = _get_elements(page, selector, attributes)
return json.dumps(results, ensure_ascii=False)
async def _arun(
self,
selector: str,
attributes: Sequence[str] = ["innerText"],
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}") | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/get_elements.html |
61af0d0587da-2 | raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
# Navigate to the desired webpage before using this tool
results = await _aget_elements(page, selector, attributes)
return json.dumps(results, ensure_ascii=False)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/get_elements.html |
a2aa35e494f2-0 | Source code for langchain.tools.playwright.current_page
from __future__ import annotations
from typing import Optional, Type
from pydantic import BaseModel
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import aget_current_page, get_current_page
[docs]class CurrentWebPageTool(BaseBrowserTool):
name: str = "current_webpage"
description: str = "Returns the URL of the current page"
args_schema: Type[BaseModel] = BaseModel
def _run(
self,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
return str(page.url)
async def _arun(
self,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
return str(page.url)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/current_page.html |
8002f2737ca6-0 | Source code for langchain.tools.playwright.extract_hyperlinks
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Optional, Type
from pydantic import BaseModel, Field, root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import aget_current_page, get_current_page
if TYPE_CHECKING:
pass
class ExtractHyperlinksToolInput(BaseModel):
"""Input for ExtractHyperlinksTool."""
absolute_urls: bool = Field(
default=False,
description="Return absolute URLs instead of relative URLs",
)
[docs]class ExtractHyperlinksTool(BaseBrowserTool):
"""Extract all hyperlinks on the page."""
name: str = "extract_hyperlinks"
description: str = "Extract all hyperlinks on the current webpage"
args_schema: Type[BaseModel] = ExtractHyperlinksToolInput
@root_validator
def check_bs_import(cls, values: dict) -> dict:
"""Check that the arguments are valid."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ValueError(
"The 'beautifulsoup4' package is required to use this tool."
" Please install it with 'pip install beautifulsoup4'."
)
return values
[docs] @staticmethod
def scrape_page(page: Any, html_content: str, absolute_urls: bool) -> str:
from urllib.parse import urljoin
from bs4 import BeautifulSoup
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
# Find all the anchor elements and extract their href attributes | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_hyperlinks.html |
8002f2737ca6-1 | # Find all the anchor elements and extract their href attributes
anchors = soup.find_all("a")
if absolute_urls:
base_url = page.url
links = [urljoin(base_url, anchor.get("href", "")) for anchor in anchors]
else:
links = [anchor.get("href", "") for anchor in anchors]
# Return the list of links as a JSON string
return json.dumps(links)
def _run(
self,
absolute_urls: bool = False,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
html_content = page.content()
return self.scrape_page(page, html_content, absolute_urls)
async def _arun(
self,
absolute_urls: bool = False,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
html_content = await page.content()
return self.scrape_page(page, html_content, absolute_urls)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_hyperlinks.html |
87a702a7dc78-0 | Source code for langchain.tools.playwright.navigate_back
from __future__ import annotations
from typing import Optional, Type
from pydantic import BaseModel
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
[docs]class NavigateBackTool(BaseBrowserTool):
"""Navigate back to the previous page in the browser history."""
name: str = "previous_webpage"
description: str = "Navigate back to the previous page in the browser history"
args_schema: Type[BaseModel] = BaseModel
def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
response = page.go_back()
if response:
return (
f"Navigated back to the previous page with URL '{response.url}'."
f" Status code {response.status}"
)
else:
return "Unable to navigate back; no previous page in the history"
async def _arun(
self,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
response = await page.go_back()
if response:
return ( | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate_back.html |
87a702a7dc78-1 | response = await page.go_back()
if response:
return (
f"Navigated back to the previous page with URL '{response.url}'."
f" Status code {response.status}"
)
else:
return "Unable to navigate back; no previous page in the history"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate_back.html |
12a4f868196b-0 | Source code for langchain.tools.playwright.navigate
from __future__ import annotations
from typing import Optional, Type
from pydantic import BaseModel, Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class NavigateToolInput(BaseModel):
"""Input for NavigateToolInput."""
url: str = Field(..., description="url to navigate to")
[docs]class NavigateTool(BaseBrowserTool):
name: str = "navigate_browser"
description: str = "Navigate a browser to the specified URL"
args_schema: Type[BaseModel] = NavigateToolInput
def _run(
self,
url: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
response = page.goto(url)
status = response.status if response else "unknown"
return f"Navigating to {url} returned status code {status}"
async def _arun(
self,
url: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
response = await page.goto(url) | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate.html |
12a4f868196b-1 | response = await page.goto(url)
status = response.status if response else "unknown"
return f"Navigating to {url} returned status code {status}"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate.html |
1045adbb2847-0 | Source code for langchain.tools.playwright.extract_text
from __future__ import annotations
from typing import Optional, Type
from pydantic import BaseModel, root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import aget_current_page, get_current_page
[docs]class ExtractTextTool(BaseBrowserTool):
name: str = "extract_text"
description: str = "Extract all the text on the current webpage"
args_schema: Type[BaseModel] = BaseModel
@root_validator
def check_acheck_bs_importrgs(cls, values: dict) -> dict:
"""Check that the arguments are valid."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ValueError(
"The 'beautifulsoup4' package is required to use this tool."
" Please install it with 'pip install beautifulsoup4'."
)
return values
def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
"""Use the tool."""
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
html_content = page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
async def _arun(
self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_text.html |
1045adbb2847-1 | self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
page = await aget_current_page(self.async_browser)
html_content = await page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_text.html |
e0a1a5bcc832-0 | Source code for langchain.tools.azure_cognitive_services.text2speech
from __future__ import annotations
import logging
import tempfile
from typing import Any, Dict, Optional
from pydantic import root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.base import BaseTool
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class AzureCogsText2SpeechTool(BaseTool):
"""Tool that queries the Azure Cognitive Services Text2Speech API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-text-to-speech?pivots=programming-language-python
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_region: str = "" #: :meta private:
speech_language: str = "en-US" #: :meta private:
speech_config: Any #: :meta private:
name = "Azure Cognitive Services Text2Speech"
description = (
"A wrapper around Azure Cognitive Services Text2Speech. "
"Useful for when you need to convert text to speech. "
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
)
azure_cogs_region = get_from_dict_or_env(
values, "azure_cogs_region", "AZURE_COGS_REGION"
)
try: | https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/text2speech.html |
e0a1a5bcc832-1 | )
try:
import azure.cognitiveservices.speech as speechsdk
values["speech_config"] = speechsdk.SpeechConfig(
subscription=azure_cogs_key, region=azure_cogs_region
)
except ImportError:
raise ImportError(
"azure-cognitiveservices-speech is not installed. "
"Run `pip install azure-cognitiveservices-speech` to install."
)
return values
def _text2speech(self, text: str, speech_language: str) -> str:
try:
import azure.cognitiveservices.speech as speechsdk
except ImportError:
pass
self.speech_config.speech_synthesis_language = speech_language
speech_synthesizer = speechsdk.SpeechSynthesizer(
speech_config=self.speech_config, audio_config=None
)
result = speech_synthesizer.speak_text(text)
if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
stream = speechsdk.AudioDataStream(result)
with tempfile.NamedTemporaryFile(
mode="wb", suffix=".wav", delete=False
) as f:
stream.save_to_wav_file(f.name)
return f.name
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
logger.debug(f"Speech synthesis canceled: {cancellation_details.reason}")
if cancellation_details.reason == speechsdk.CancellationReason.Error:
raise RuntimeError(
f"Speech synthesis error: {cancellation_details.error_details}"
)
return "Speech synthesis canceled."
else:
return f"Speech synthesis failed: {result.reason}"
def _run(
self,
query: str, | https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/text2speech.html |
e0a1a5bcc832-2 | def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
speech_file = self._text2speech(query, self.speech_language)
return speech_file
except Exception as e:
raise RuntimeError(f"Error while running AzureCogsText2SpeechTool: {e}")
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("AzureCogsText2SpeechTool does not support async")
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/text2speech.html |
dd9b6e2d9b95-0 | Source code for langchain.tools.azure_cognitive_services.form_recognizer
from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional
from pydantic import root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.azure_cognitive_services.utils import detect_file_src_type
from langchain.tools.base import BaseTool
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class AzureCogsFormRecognizerTool(BaseTool):
"""Tool that queries the Azure Cognitive Services Form Recognizer API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/applied-ai-services/form-recognizer/quickstarts/get-started-sdks-rest-api?view=form-recog-3.0.0&pivots=programming-language-python
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_endpoint: str = "" #: :meta private:
doc_analysis_client: Any #: :meta private:
name = "Azure Cognitive Services Form Recognizer"
description = (
"A wrapper around Azure Cognitive Services Form Recognizer. "
"Useful for when you need to "
"extract text, tables, and key-value pairs from documents. "
"Input should be a url to a document."
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
) | https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html |
dd9b6e2d9b95-1 | )
azure_cogs_endpoint = get_from_dict_or_env(
values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT"
)
try:
from azure.ai.formrecognizer import DocumentAnalysisClient
from azure.core.credentials import AzureKeyCredential
values["doc_analysis_client"] = DocumentAnalysisClient(
endpoint=azure_cogs_endpoint,
credential=AzureKeyCredential(azure_cogs_key),
)
except ImportError:
raise ImportError(
"azure-ai-formrecognizer is not installed. "
"Run `pip install azure-ai-formrecognizer` to install."
)
return values
def _parse_tables(self, tables: List[Any]) -> List[Any]:
result = []
for table in tables:
rc, cc = table.row_count, table.column_count
_table = [["" for _ in range(cc)] for _ in range(rc)]
for cell in table.cells:
_table[cell.row_index][cell.column_index] = cell.content
result.append(_table)
return result
def _parse_kv_pairs(self, kv_pairs: List[Any]) -> List[Any]:
result = []
for kv_pair in kv_pairs:
key = kv_pair.key.content if kv_pair.key else ""
value = kv_pair.value.content if kv_pair.value else ""
result.append((key, value))
return result
def _document_analysis(self, document_path: str) -> Dict:
document_src_type = detect_file_src_type(document_path)
if document_src_type == "local":
with open(document_path, "rb") as document:
poller = self.doc_analysis_client.begin_analyze_document(
"prebuilt-document", document
) | https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html |
dd9b6e2d9b95-2 | "prebuilt-document", document
)
elif document_src_type == "remote":
poller = self.doc_analysis_client.begin_analyze_document_from_url(
"prebuilt-document", document_path
)
else:
raise ValueError(f"Invalid document path: {document_path}")
result = poller.result()
res_dict = {}
if result.content is not None:
res_dict["content"] = result.content
if result.tables is not None:
res_dict["tables"] = self._parse_tables(result.tables)
if result.key_value_pairs is not None:
res_dict["key_value_pairs"] = self._parse_kv_pairs(result.key_value_pairs)
return res_dict
def _format_document_analysis_result(self, document_analysis_result: Dict) -> str:
formatted_result = []
if "content" in document_analysis_result:
formatted_result.append(
f"Content: {document_analysis_result['content']}".replace("\n", " ")
)
if "tables" in document_analysis_result:
for i, table in enumerate(document_analysis_result["tables"]):
formatted_result.append(f"Table {i}: {table}".replace("\n", " "))
if "key_value_pairs" in document_analysis_result:
for kv_pair in document_analysis_result["key_value_pairs"]:
formatted_result.append(
f"{kv_pair[0]}: {kv_pair[1]}".replace("\n", " ")
)
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try: | https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html |
dd9b6e2d9b95-3 | ) -> str:
"""Use the tool."""
try:
document_analysis_result = self._document_analysis(query)
if not document_analysis_result:
return "No good document analysis result was found"
return self._format_document_analysis_result(document_analysis_result)
except Exception as e:
raise RuntimeError(f"Error while running AzureCogsFormRecognizerTool: {e}")
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("AzureCogsFormRecognizerTool does not support async")
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html |
Subsets and Splits