id
stringlengths
14
16
text
stringlengths
36
2.73k
source
stringlengths
49
117
b0bec2280ea5-4
return query_params @property def url(self) -> str: return f"https://api.github.com/repos/{self.repo}/issues?{self.query_params}" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/github.html
e3ebdb3c4276-0
Source code for langchain.document_loaders.onedrive_file from __future__ import annotations import tempfile from typing import TYPE_CHECKING, List from pydantic import BaseModel, Field from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader if TYPE_CHECKING: from O365.drive import File CHUNK_SIZE = 1024 * 1024 * 5 [docs]class OneDriveFileLoader(BaseLoader, BaseModel): file: File = Field(...) class Config: arbitrary_types_allowed = True [docs] def load(self) -> List[Document]: """Load Documents""" with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.file.name}" self.file.download(to_path=temp_dir, chunk_size=CHUNK_SIZE) loader = UnstructuredFileLoader(file_path) return loader.load() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive_file.html
9fc5552066a0-0
Source code for langchain.document_loaders.twitter """Twitter document loader.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import tweepy from tweepy import OAuth2BearerHandler, OAuthHandler def _dependable_tweepy_import() -> tweepy: try: import tweepy except ImportError: raise ImportError( "tweepy package not found, please install it with `pip install tweepy`" ) return tweepy [docs]class TwitterTweetLoader(BaseLoader): """Twitter tweets loader. Read tweets of user twitter handle. First you need to go to `https://developer.twitter.com/en/docs/twitter-api /getting-started/getting-access-to-the-twitter-api` to get your token. And create a v2 version of the app. """ def __init__( self, auth_handler: Union[OAuthHandler, OAuth2BearerHandler], twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ): self.auth = auth_handler self.twitter_users = twitter_users self.number_tweets = number_tweets [docs] def load(self) -> List[Document]: """Load tweets.""" tweepy = _dependable_tweepy_import() api = tweepy.API(self.auth, parser=tweepy.parsers.JSONParser()) results: List[Document] = [] for username in self.twitter_users: tweets = api.user_timeline(screen_name=username, count=self.number_tweets) user = api.get_user(screen_name=username)
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/twitter.html
9fc5552066a0-1
user = api.get_user(screen_name=username) docs = self._format_tweets(tweets, user) results.extend(docs) return results def _format_tweets( self, tweets: List[Dict[str, Any]], user_info: dict ) -> Iterable[Document]: """Format tweets into a string.""" for tweet in tweets: metadata = { "created_at": tweet["created_at"], "user_info": user_info, } yield Document( page_content=tweet["text"], metadata=metadata, ) [docs] @classmethod def from_bearer_token( cls, oauth2_bearer_token: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ) -> TwitterTweetLoader: """Create a TwitterTweetLoader from OAuth2 bearer token.""" tweepy = _dependable_tweepy_import() auth = tweepy.OAuth2BearerHandler(oauth2_bearer_token) return cls( auth_handler=auth, twitter_users=twitter_users, number_tweets=number_tweets, ) [docs] @classmethod def from_secrets( cls, access_token: str, access_token_secret: str, consumer_key: str, consumer_secret: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ) -> TwitterTweetLoader: """Create a TwitterTweetLoader from access tokens and secrets.""" tweepy = _dependable_tweepy_import() auth = tweepy.OAuthHandler( access_token=access_token, access_token_secret=access_token_secret,
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/twitter.html
9fc5552066a0-2
access_token=access_token, access_token_secret=access_token_secret, consumer_key=consumer_key, consumer_secret=consumer_secret, ) return cls( auth_handler=auth, twitter_users=twitter_users, number_tweets=number_tweets, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/twitter.html
33e7b7518b19-0
Source code for langchain.document_loaders.rtf """Loader that loads rich text files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, satisfies_min_unstructured_version, ) [docs]class UnstructuredRTFLoader(UnstructuredFileLoader): """Loader that uses unstructured to load rtf files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): min_unstructured_version = "0.5.12" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( "Partitioning rtf files is only supported in " f"unstructured>={min_unstructured_version}." ) super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.rtf import partition_rtf return partition_rtf(filename=self.file_path, **self.unstructured_kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/rtf.html
6d626537f839-0
Source code for langchain.document_loaders.directory """Loading logic for loading documents from a directory.""" import concurrent import logging from pathlib import Path from typing import Any, List, Optional, Type, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.html_bs import BSHTMLLoader from langchain.document_loaders.text import TextLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader FILE_LOADER_TYPE = Union[ Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader] ] logger = logging.getLogger(__name__) def _is_visible(p: Path) -> bool: parts = p.parts for _p in parts: if _p.startswith("."): return False return True [docs]class DirectoryLoader(BaseLoader): """Loading logic for loading documents from a directory.""" def __init__( self, path: str, glob: str = "**/[!.]*", silent_errors: bool = False, load_hidden: bool = False, loader_cls: FILE_LOADER_TYPE = UnstructuredFileLoader, loader_kwargs: Union[dict, None] = None, recursive: bool = False, show_progress: bool = False, use_multithreading: bool = False, max_concurrency: int = 4, ): """Initialize with path to directory and how to glob over it.""" if loader_kwargs is None: loader_kwargs = {} self.path = path self.glob = glob self.load_hidden = load_hidden self.loader_cls = loader_cls self.loader_kwargs = loader_kwargs self.silent_errors = silent_errors
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/directory.html
6d626537f839-1
self.loader_kwargs = loader_kwargs self.silent_errors = silent_errors self.recursive = recursive self.show_progress = show_progress self.use_multithreading = use_multithreading self.max_concurrency = max_concurrency [docs] def load_file( self, item: Path, path: Path, docs: List[Document], pbar: Optional[Any] ) -> None: if item.is_file(): if _is_visible(item.relative_to(path)) or self.load_hidden: try: sub_docs = self.loader_cls(str(item), **self.loader_kwargs).load() docs.extend(sub_docs) except Exception as e: if self.silent_errors: logger.warning(e) else: raise e finally: if pbar: pbar.update(1) [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.path) if not p.exists(): raise FileNotFoundError(f"Directory not found: '{self.path}'") if not p.is_dir(): raise ValueError(f"Expected directory, got file: '{self.path}'") docs: List[Document] = [] items = list(p.rglob(self.glob) if self.recursive else p.glob(self.glob)) pbar = None if self.show_progress: try: from tqdm import tqdm pbar = tqdm(total=len(items)) except ImportError as e: logger.warning( "To log the progress of DirectoryLoader you need to install tqdm, " "`pip install tqdm`" ) if self.silent_errors: logger.warning(e) else: raise e
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/directory.html
6d626537f839-2
logger.warning(e) else: raise e if self.use_multithreading: with concurrent.futures.ThreadPoolExecutor( max_workers=self.max_concurrency ) as executor: executor.map(lambda i: self.load_file(i, p, docs, pbar), items) else: for i in items: self.load_file(i, p, docs, pbar) if pbar: pbar.close() return docs # By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/directory.html
0e53866a4068-0
Source code for langchain.document_loaders.blockchain import os import re import time from enum import Enum from typing import List, Optional import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader class BlockchainType(Enum): ETH_MAINNET = "eth-mainnet" ETH_GOERLI = "eth-goerli" POLYGON_MAINNET = "polygon-mainnet" POLYGON_MUMBAI = "polygon-mumbai" [docs]class BlockchainDocumentLoader(BaseLoader): """Loads elements from a blockchain smart contract into Langchain documents. The supported blockchains are: Ethereum mainnet, Ethereum Goerli testnet, Polygon mainnet, and Polygon Mumbai testnet. If no BlockchainType is specified, the default is Ethereum mainnet. The Loader uses the Alchemy API to interact with the blockchain. ALCHEMY_API_KEY environment variable must be set to use this loader. The API returns 100 NFTs per request and can be paginated using the startToken parameter. If get_all_tokens is set to True, the loader will get all tokens on the contract. Note that for contracts with a large number of tokens, this may take a long time (e.g. 10k tokens is 100 requests). Default value is false for this reason. The max_execution_time (sec) can be set to limit the execution time of the loader. Future versions of this loader can: - Support additional Alchemy APIs (e.g. getTransactions, etc.) - Support additional blockain APIs (e.g. Infura, Opensea, etc.) """ def __init__( self, contract_address: str,
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
0e53866a4068-1
""" def __init__( self, contract_address: str, blockchainType: BlockchainType = BlockchainType.ETH_MAINNET, api_key: str = "docs-demo", startToken: str = "", get_all_tokens: bool = False, max_execution_time: Optional[int] = None, ): self.contract_address = contract_address self.blockchainType = blockchainType.value self.api_key = os.environ.get("ALCHEMY_API_KEY") or api_key self.startToken = startToken self.get_all_tokens = get_all_tokens self.max_execution_time = max_execution_time if not self.api_key: raise ValueError("Alchemy API key not provided.") if not re.match(r"^0x[a-fA-F0-9]{40}$", self.contract_address): raise ValueError(f"Invalid contract address {self.contract_address}") [docs] def load(self) -> List[Document]: result = [] current_start_token = self.startToken start_time = time.time() while True: url = ( f"https://{self.blockchainType}.g.alchemy.com/nft/v2/" f"{self.api_key}/getNFTsForCollection?withMetadata=" f"True&contractAddress={self.contract_address}" f"&startToken={current_start_token}" ) response = requests.get(url) if response.status_code != 200: raise ValueError( f"Request failed with status code {response.status_code}" ) items = response.json()["nfts"] if not items: break for item in items: content = str(item) tokenId = item["id"]["tokenId"] metadata = {
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
0e53866a4068-2
tokenId = item["id"]["tokenId"] metadata = { "source": self.contract_address, "blockchain": self.blockchainType, "tokenId": tokenId, } result.append(Document(page_content=content, metadata=metadata)) # exit after the first API call if get_all_tokens is False if not self.get_all_tokens: break # get the start token for the next API call from the last item in array current_start_token = self._get_next_tokenId(result[-1].metadata["tokenId"]) if ( self.max_execution_time is not None and (time.time() - start_time) > self.max_execution_time ): raise RuntimeError("Execution time exceeded the allowed time limit.") if not result: raise ValueError( f"No NFTs found for contract address {self.contract_address}" ) return result # add one to the tokenId, ensuring the correct tokenId format is used def _get_next_tokenId(self, tokenId: str) -> str: value_type = self._detect_value_type(tokenId) if value_type == "hex_0x": value_int = int(tokenId, 16) elif value_type == "hex_0xbf": value_int = int(tokenId[2:], 16) else: value_int = int(tokenId) result = value_int + 1 if value_type == "hex_0x": return "0x" + format(result, "0" + str(len(tokenId) - 2) + "x") elif value_type == "hex_0xbf":
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
0e53866a4068-3
elif value_type == "hex_0xbf": return "0xbf" + format(result, "0" + str(len(tokenId) - 4) + "x") else: return str(result) # A smart contract can use different formats for the tokenId @staticmethod def _detect_value_type(tokenId: str) -> str: if isinstance(tokenId, int): return "int" elif tokenId.startswith("0x"): return "hex_0x" elif tokenId.startswith("0xbf"): return "hex_0xbf" else: return "hex_0xbf" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
4a0777bac3f9-0
Source code for langchain.document_loaders.dataframe """Load from Dataframe object""" from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class DataFrameLoader(BaseLoader): """Load Pandas DataFrames.""" def __init__(self, data_frame: Any, page_content_column: str = "text"): """Initialize with dataframe object.""" import pandas as pd if not isinstance(data_frame, pd.DataFrame): raise ValueError( f"Expected data_frame to be a pd.DataFrame, got {type(data_frame)}" ) self.data_frame = data_frame self.page_content_column = page_content_column [docs] def load(self) -> List[Document]: """Load from the dataframe.""" result = [] # For very large dataframes, this needs to yield instead of building a list # but that would require chaging return type to a generator for BaseLoader # and all its subclasses, which is a bigger refactor. Marking as future TODO. # This change will allow us to extend this to Spark and Dask dataframes. for _, row in self.data_frame.iterrows(): text = row[self.page_content_column] metadata = row.to_dict() metadata.pop(self.page_content_column) result.append(Document(page_content=text, metadata=metadata)) return result By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/dataframe.html
324497907cab-0
Source code for langchain.document_loaders.slack_directory """Loader for documents from a Slack export.""" import json import zipfile from pathlib import Path from typing import Dict, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class SlackDirectoryLoader(BaseLoader): """Loader for loading documents from a Slack directory dump.""" def __init__(self, zip_path: str, workspace_url: Optional[str] = None): """Initialize the SlackDirectoryLoader. Args: zip_path (str): The path to the Slack directory dump zip file. workspace_url (Optional[str]): The Slack workspace URL. Including the URL will turn sources into links. Defaults to None. """ self.zip_path = Path(zip_path) self.workspace_url = workspace_url self.channel_id_map = self._get_channel_id_map(self.zip_path) @staticmethod def _get_channel_id_map(zip_path: Path) -> Dict[str, str]: """Get a dictionary mapping channel names to their respective IDs.""" with zipfile.ZipFile(zip_path, "r") as zip_file: try: with zip_file.open("channels.json", "r") as f: channels = json.load(f) return {channel["name"]: channel["id"] for channel in channels} except KeyError: return {} [docs] def load(self) -> List[Document]: """Load and return documents from the Slack directory dump.""" docs = [] with zipfile.ZipFile(self.zip_path, "r") as zip_file: for channel_path in zip_file.namelist(): channel_name = Path(channel_path).parent.name if not channel_name: continue
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/slack_directory.html
324497907cab-1
if not channel_name: continue if channel_path.endswith(".json"): messages = self._read_json(zip_file, channel_path) for message in messages: document = self._convert_message_to_document( message, channel_name ) docs.append(document) return docs def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) -> List[dict]: """Read JSON data from a zip subfile.""" with zip_file.open(file_path, "r") as f: data = json.load(f) return data def _convert_message_to_document( self, message: dict, channel_name: str ) -> Document: """ Convert a message to a Document object. Args: message (dict): A message in the form of a dictionary. channel_name (str): The name of the channel the message belongs to. Returns: Document: A Document object representing the message. """ text = message.get("text", "") metadata = self._get_message_metadata(message, channel_name) return Document( page_content=text, metadata=metadata, ) def _get_message_metadata(self, message: dict, channel_name: str) -> dict: """Create and return metadata for a given message and channel.""" timestamp = message.get("ts", "") user = message.get("user", "") source = self._get_message_source(channel_name, user, timestamp) return { "source": source, "channel": channel_name, "timestamp": timestamp, "user": user, }
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/slack_directory.html
324497907cab-2
"timestamp": timestamp, "user": user, } def _get_message_source(self, channel_name: str, user: str, timestamp: str) -> str: """ Get the message source as a string. Args: channel_name (str): The name of the channel the message belongs to. user (str): The user ID who sent the message. timestamp (str): The timestamp of the message. Returns: str: The message source. """ if self.workspace_url: channel_id = self.channel_id_map.get(channel_name, "") return ( f"{self.workspace_url}/archives/{channel_id}" + f"/p{timestamp.replace('.', '')}" ) else: return f"{channel_name} - {user} - {timestamp}" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/slack_directory.html
74b03f57b5e3-0
Source code for langchain.document_loaders.googledrive """Loader that loads data from Google Drive.""" # Prerequisites: # 1. Create a Google Cloud project # 2. Enable the Google Drive API: # https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com # 3. Authorize credentials for desktop app: # https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa: E501 # 4. For service accounts visit # https://cloud.google.com/iam/docs/service-accounts-create from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union from pydantic import BaseModel, root_validator, validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader SCOPES = ["https://www.googleapis.com/auth/drive.readonly"] [docs]class GoogleDriveLoader(BaseLoader, BaseModel): """Loader that loads Google Docs from Google Drive.""" service_account_key: Path = Path.home() / ".credentials" / "keys.json" credentials_path: Path = Path.home() / ".credentials" / "credentials.json" token_path: Path = Path.home() / ".credentials" / "token.json" folder_id: Optional[str] = None document_ids: Optional[List[str]] = None file_ids: Optional[List[str]] = None recursive: bool = False file_types: Optional[Sequence[str]] = None load_trashed_files: bool = False @root_validator def validate_inputs(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate that either folder_id or document_ids is set, but not both.""" if values.get("folder_id") and (
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
74b03f57b5e3-1
if values.get("folder_id") and ( values.get("document_ids") or values.get("file_ids") ): raise ValueError( "Cannot specify both folder_id and document_ids nor " "folder_id and file_ids" ) if ( not values.get("folder_id") and not values.get("document_ids") and not values.get("file_ids") ): raise ValueError("Must specify either folder_id, document_ids, or file_ids") file_types = values.get("file_types") if file_types: if values.get("document_ids") or values.get("file_ids"): raise ValueError( "file_types can only be given when folder_id is given," " (not when document_ids or file_ids are given)." ) type_mapping = { "document": "application/vnd.google-apps.document", "sheet": "application/vnd.google-apps.spreadsheet", "pdf": "application/pdf", } allowed_types = list(type_mapping.keys()) + list(type_mapping.values()) short_names = ", ".join([f"'{x}'" for x in type_mapping.keys()]) full_names = ", ".join([f"'{x}'" for x in type_mapping.values()]) for file_type in file_types: if file_type not in allowed_types: raise ValueError( f"Given file type {file_type} is not supported. " f"Supported values are: {short_names}; and " f"their full-form names: {full_names}" ) # replace short-form file types by full-form file types def full_form(x: str) -> str: return type_mapping[x] if x in type_mapping else x
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
74b03f57b5e3-2
return type_mapping[x] if x in type_mapping else x values["file_types"] = [full_form(file_type) for file_type in file_types] return values @validator("credentials_path") def validate_credentials_path(cls, v: Any, **kwargs: Any) -> Any: """Validate that credentials_path exists.""" if not v.exists(): raise ValueError(f"credentials_path {v} does not exist") return v def _load_credentials(self) -> Any: """Load credentials.""" # Adapted from https://developers.google.com/drive/api/v3/quickstart/python try: from google.auth.transport.requests import Request from google.oauth2 import service_account from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow except ImportError: raise ImportError( "You must run " "`pip install --upgrade " "google-api-python-client google-auth-httplib2 " "google-auth-oauthlib` " "to use the Google Drive loader." ) creds = None if self.service_account_key.exists(): return service_account.Credentials.from_service_account_file( str(self.service_account_key), scopes=SCOPES ) if self.token_path.exists(): creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( str(self.credentials_path), SCOPES ) creds = flow.run_local_server(port=0) with open(self.token_path, "w") as token:
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
74b03f57b5e3-3
with open(self.token_path, "w") as token: token.write(creds.to_json()) return creds def _load_sheet_from_id(self, id: str) -> List[Document]: """Load a sheet and all tabs from an ID.""" from googleapiclient.discovery import build creds = self._load_credentials() sheets_service = build("sheets", "v4", credentials=creds) spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute() sheets = spreadsheet.get("sheets", []) documents = [] for sheet in sheets: sheet_name = sheet["properties"]["title"] result = ( sheets_service.spreadsheets() .values() .get(spreadsheetId=id, range=sheet_name) .execute() ) values = result.get("values", []) header = values[0] for i, row in enumerate(values[1:], start=1): metadata = { "source": ( f"https://docs.google.com/spreadsheets/d/{id}/" f"edit?gid={sheet['properties']['sheetId']}" ), "title": f"{spreadsheet['properties']['title']} - {sheet_name}", "row": i, } content = [] for j, v in enumerate(row): title = header[j].strip() if len(header) > j else "" content.append(f"{title}: {v.strip()}") page_content = "\n".join(content) documents.append(Document(page_content=page_content, metadata=metadata)) return documents def _load_document_from_id(self, id: str) -> Document: """Load a document from an ID."""
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
74b03f57b5e3-4
"""Load a document from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.errors import HttpError from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build("drive", "v3", credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True).execute() request = service.files().export_media(fileId=id, mimeType="text/plain") fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False try: while done is False: status, done = downloader.next_chunk() except HttpError as e: if e.resp.status == 404: print("File not found: {}".format(id)) else: print("An error occurred: {}".format(e)) text = fh.getvalue().decode("utf-8") metadata = { "source": f"https://docs.google.com/document/d/{id}/edit", "title": f"{file.get('name')}", } return Document(page_content=text, metadata=metadata) def _load_documents_from_folder( self, folder_id: str, *, file_types: Optional[Sequence[str]] = None ) -> List[Document]: """Load documents from a folder.""" from googleapiclient.discovery import build creds = self._load_credentials() service = build("drive", "v3", credentials=creds) files = self._fetch_files_recursive(service, folder_id) # If file types filter is provided, we'll filter by the file type. if file_types:
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
74b03f57b5e3-5
if file_types: _files = [f for f in files if f["mimeType"] in file_types] # type: ignore else: _files = files returns = [] for file in files: if file["trashed"] and not self.load_trashed_files: continue elif file["mimeType"] == "application/vnd.google-apps.document": returns.append(self._load_document_from_id(file["id"])) # type: ignore elif file["mimeType"] == "application/vnd.google-apps.spreadsheet": returns.extend(self._load_sheet_from_id(file["id"])) # type: ignore elif file["mimeType"] == "application/pdf": returns.extend(self._load_file_from_id(file["id"])) # type: ignore else: pass return returns def _fetch_files_recursive( self, service: Any, folder_id: str ) -> List[Dict[str, Union[str, List[str]]]]: """Fetch all files and subfolders recursively.""" results = ( service.files() .list( q=f"'{folder_id}' in parents", pageSize=1000, includeItemsFromAllDrives=True, supportsAllDrives=True, fields="nextPageToken, files(id, name, mimeType, parents, trashed)", ) .execute() ) files = results.get("files", []) returns = [] for file in files: if file["mimeType"] == "application/vnd.google-apps.folder": if self.recursive: returns.extend(self._fetch_files_recursive(service, file["id"])) else: returns.append(file) return returns
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
74b03f57b5e3-6
else: returns.append(file) return returns def _load_documents_from_ids(self) -> List[Document]: """Load documents from a list of IDs.""" if not self.document_ids: raise ValueError("document_ids must be set") return [self._load_document_from_id(doc_id) for doc_id in self.document_ids] def _load_file_from_id(self, id: str) -> List[Document]: """Load a file from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build("drive", "v3", credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True).execute() request = service.files().get_media(fileId=id) fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() content = fh.getvalue() from PyPDF2 import PdfReader pdf_reader = PdfReader(BytesIO(content)) return [ Document( page_content=page.extract_text(), metadata={ "source": f"https://drive.google.com/file/d/{id}/view", "title": f"{file.get('name')}", "page": i, }, ) for i, page in enumerate(pdf_reader.pages) ] def _load_file_from_ids(self) -> List[Document]: """Load files from a list of IDs.""" if not self.file_ids: raise ValueError("file_ids must be set") docs = []
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
74b03f57b5e3-7
raise ValueError("file_ids must be set") docs = [] for file_id in self.file_ids: docs.extend(self._load_file_from_id(file_id)) return docs [docs] def load(self) -> List[Document]: """Load documents.""" if self.folder_id: return self._load_documents_from_folder( self.folder_id, file_types=self.file_types ) elif self.document_ids: return self._load_documents_from_ids() else: return self._load_file_from_ids() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
5eec718be0e4-0
Source code for langchain.document_loaders.iugu """Loader that fetches data from IUGU""" import json import urllib.request from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_dict IUGU_ENDPOINTS = { "invoices": "https://api.iugu.com/v1/invoices", "customers": "https://api.iugu.com/v1/customers", "charges": "https://api.iugu.com/v1/charges", "subscriptions": "https://api.iugu.com/v1/subscriptions", "plans": "https://api.iugu.com/v1/plans", } [docs]class IuguLoader(BaseLoader): def __init__(self, resource: str, api_token: Optional[str] = None) -> None: self.resource = resource api_token = api_token or get_from_env("api_token", "IUGU_API_TOKEN") self.headers = {"Authorization": f"Bearer {api_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = IUGU_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]: return self._get_resource() By Harrison Chase
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/iugu.html
5eec718be0e4-1
return self._get_resource() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/iugu.html
b84a7605df21-0
Source code for langchain.document_loaders.obsidian """Loader that loads Obsidian directory dump.""" import re from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ObsidianLoader(BaseLoader): """Loader that loads Obsidian files from disk.""" FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.MULTILINE | re.DOTALL) def __init__( self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True ): """Initialize with path.""" self.file_path = path self.encoding = encoding self.collect_metadata = collect_metadata def _parse_front_matter(self, content: str) -> dict: """Parse front matter metadata from the content and return it as a dict.""" if not self.collect_metadata: return {} match = self.FRONT_MATTER_REGEX.search(content) front_matter = {} if match: lines = match.group(1).split("\n") for line in lines: if ":" in line: key, value = line.split(":", 1) front_matter[key.strip()] = value.strip() else: # Skip lines without a colon continue return front_matter def _remove_front_matter(self, content: str) -> str: """Remove front matter metadata from the given content.""" if not self.collect_metadata: return content return self.FRONT_MATTER_REGEX.sub("", content) [docs] def load(self) -> List[Document]: """Load documents.""" ps = list(Path(self.file_path).glob("**/*.md"))
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/obsidian.html
b84a7605df21-1
ps = list(Path(self.file_path).glob("**/*.md")) docs = [] for p in ps: with open(p, encoding=self.encoding) as f: text = f.read() front_matter = self._parse_front_matter(text) text = self._remove_front_matter(text) metadata = { "source": str(p.name), "path": str(p), "created": p.stat().st_ctime, "last_modified": p.stat().st_mtime, "last_accessed": p.stat().st_atime, **front_matter, } docs.append(Document(page_content=text, metadata=metadata)) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/obsidian.html
7d58b0483320-0
Source code for langchain.document_loaders.psychic """Loader that loads documents from Psychic.dev.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class PsychicLoader(BaseLoader): """Loader that loads documents from Psychic.dev.""" def __init__(self, api_key: str, connector_id: str, connection_id: str): """Initialize with API key, connector id, and connection id.""" try: from psychicapi import ConnectorId, Psychic # noqa: F401 except ImportError: raise ImportError( "`psychicapi` package not found, please run `pip install psychicapi`" ) self.psychic = Psychic(secret_key=api_key) self.connector_id = ConnectorId(connector_id) self.connection_id = connection_id [docs] def load(self) -> List[Document]: """Load documents.""" psychic_docs = self.psychic.get_documents(self.connector_id, self.connection_id) return [ Document( page_content=doc["content"], metadata={"title": doc["title"], "source": doc["uri"]}, ) for doc in psychic_docs ] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/psychic.html
8303725e759f-0
Source code for langchain.document_loaders.trello """Loader that loads cards from Trello""" from __future__ import annotations from typing import TYPE_CHECKING, Any, List, Literal, Optional, Tuple from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env if TYPE_CHECKING: from trello import Board, Card, TrelloClient [docs]class TrelloLoader(BaseLoader): """Trello loader. Reads all cards from a Trello board.""" def __init__( self, client: TrelloClient, board_name: str, *, include_card_name: bool = True, include_comments: bool = True, include_checklist: bool = True, card_filter: Literal["closed", "open", "all"] = "all", extra_metadata: Tuple[str, ...] = ("due_date", "labels", "list", "closed"), ): """Initialize Trello loader. Args: client: Trello API client. board_name: The name of the Trello board. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ self.client = client self.board_name = board_name self.include_card_name = include_card_name
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
8303725e759f-1
self.board_name = board_name self.include_card_name = include_card_name self.include_comments = include_comments self.include_checklist = include_checklist self.extra_metadata = extra_metadata self.card_filter = card_filter [docs] @classmethod def from_credentials( cls, board_name: str, *, api_key: Optional[str] = None, token: Optional[str] = None, **kwargs: Any, ) -> TrelloLoader: """Convenience constructor that builds TrelloClient init param for you. Args: board_name: The name of the Trello board. api_key: Trello API key. Can also be specified as environment variable TRELLO_API_KEY. token: Trello token. Can also be specified as environment variable TRELLO_TOKEN. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ try: from trello import TrelloClient # type: ignore except ImportError as ex: raise ImportError( "Could not import trello python package. " "Please install it with `pip install py-trello`." ) from ex api_key = api_key or get_from_env("api_key", "TRELLO_API_KEY")
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
8303725e759f-2
token = token or get_from_env("token", "TRELLO_TOKEN") client = TrelloClient(api_key=api_key, token=token) return cls(client, board_name, **kwargs) [docs] def load(self) -> List[Document]: """Loads all cards from the specified Trello board. You can filter the cards, metadata and text included by using the optional parameters. Returns: A list of documents, one for each card in the board. """ try: from bs4 import BeautifulSoup # noqa: F401 except ImportError as ex: raise ImportError( "`beautifulsoup4` package not found, please run" " `pip install beautifulsoup4`" ) from ex board = self._get_board() # Create a dictionary with the list IDs as keys and the list names as values list_dict = {list_item.id: list_item.name for list_item in board.list_lists()} # Get Cards on the board cards = board.get_cards(card_filter=self.card_filter) return [self._card_to_doc(card, list_dict) for card in cards] def _get_board(self) -> Board: # Find the first board with a matching name board = next( (b for b in self.client.list_boards() if b.name == self.board_name), None ) if not board: raise ValueError(f"Board `{self.board_name}` not found.") return board def _card_to_doc(self, card: Card, list_dict: dict) -> Document: from bs4 import BeautifulSoup # type: ignore text_content = "" if self.include_card_name: text_content = card.name + "\n"
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
8303725e759f-3
if self.include_card_name: text_content = card.name + "\n" if card.description.strip(): text_content += BeautifulSoup(card.description, "lxml").get_text() if self.include_checklist: # Get all the checklist items on the card for checklist in card.checklists: if checklist.items: items = [ f"{item['name']}:{item['state']}" for item in checklist.items ] text_content += f"\n{checklist.name}\n" + "\n".join(items) if self.include_comments: # Get all the comments on the card comments = [ BeautifulSoup(comment["data"]["text"], "lxml").get_text() for comment in card.comments ] text_content += "Comments:" + "\n".join(comments) # Default metadata fields metadata = { "title": card.name, "id": card.id, "url": card.url, } # Extra metadata fields. Card object is not subscriptable. if "labels" in self.extra_metadata: metadata["labels"] = [label.name for label in card.labels] if "list" in self.extra_metadata: if card.list_id in list_dict: metadata["list"] = list_dict[card.list_id] if "closed" in self.extra_metadata: metadata["closed"] = card.closed if "due_date" in self.extra_metadata: metadata["due_date"] = card.due_date return Document(page_content=text_content, metadata=metadata) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
4b0a2627baf9-0
Source code for langchain.document_loaders.notebook """Loader that loads .ipynb notebook files.""" import json from pathlib import Path from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_cells( cell: dict, include_outputs: bool, max_output_length: int, traceback: bool ) -> str: """Combine cells information in a readable format ready to be used.""" cell_type = cell["cell_type"] source = cell["source"] output = cell["outputs"] if include_outputs and cell_type == "code" and output: if "ename" in output[0].keys(): error_name = output[0]["ename"] error_value = output[0]["evalue"] if traceback: traceback = output[0]["traceback"] return ( f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}'," f" with description '{error_value}'\n" f"and traceback '{traceback}'\n\n" ) else: return ( f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}'," f"with description '{error_value}'\n\n" ) elif output[0]["output_type"] == "stream": output = output[0]["text"] min_output = min(max_output_length, len(output)) return ( f"'{cell_type}' cell: '{source}'\n with " f"output: '{output[:min_output]}'\n\n" ) else: return f"'{cell_type}' cell: '{source}'\n\n"
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/notebook.html
4b0a2627baf9-1
return f"'{cell_type}' cell: '{source}'\n\n" return "" def remove_newlines(x: Any) -> Any: """Remove recursively newlines, no matter the data structure they are stored in.""" import pandas as pd if isinstance(x, str): return x.replace("\n", "") elif isinstance(x, list): return [remove_newlines(elem) for elem in x] elif isinstance(x, pd.DataFrame): return x.applymap(remove_newlines) else: return x [docs]class NotebookLoader(BaseLoader): """Loader that loads .ipynb notebook files.""" def __init__( self, path: str, include_outputs: bool = False, max_output_length: int = 10, remove_newline: bool = False, traceback: bool = False, ): """Initialize with path.""" self.file_path = path self.include_outputs = include_outputs self.max_output_length = max_output_length self.remove_newline = remove_newline self.traceback = traceback [docs] def load( self, ) -> List[Document]: """Load documents.""" try: import pandas as pd except ImportError: raise ImportError( "pandas is needed for Notebook Loader, " "please install with `pip install pandas`" ) p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) data = pd.json_normalize(d["cells"]) filtered_data = data[["cell_type", "source", "outputs"]] if self.remove_newline:
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/notebook.html
4b0a2627baf9-2
if self.remove_newline: filtered_data = filtered_data.applymap(remove_newlines) text = filtered_data.apply( lambda x: concatenate_cells( x, self.include_outputs, self.max_output_length, self.traceback ), axis=1, ).str.cat(sep=" ") metadata = {"source": str(p)} return [Document(page_content=text, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/notebook.html
31a8b80dbb21-0
Source code for langchain.document_loaders.web_base """Web base loader class.""" import asyncio import logging import warnings from typing import Any, Dict, List, Optional, Union import aiohttp import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) default_header_template = { "User-Agent": "", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*" ";q=0.8", "Accept-Language": "en-US,en;q=0.5", "Referer": "https://www.google.com/", "DNT": "1", "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1", } def _build_metadata(soup: Any, url: str) -> dict: """Build metadata from BeautifulSoup output.""" metadata = {"source": url} if title := soup.find("title"): metadata["title"] = title.get_text() if description := soup.find("meta", attrs={"name": "description"}): metadata["description"] = description.get("content", None) if html := soup.find("html"): metadata["language"] = html.get("lang", None) return metadata [docs]class WebBaseLoader(BaseLoader): """Loader that uses urllib and beautiful soup to load webpages.""" web_paths: List[str] requests_per_second: int = 2 """Max number of concurrent requests to make.""" default_parser: str = "html.parser" """Default parser to use for BeautifulSoup.""" requests_kwargs: Dict[str, Any] = {} """kwargs for requests""" def __init__(
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
31a8b80dbb21-1
"""kwargs for requests""" def __init__( self, web_path: Union[str, List[str]], header_template: Optional[dict] = None ): """Initialize with webpage path.""" # TODO: Deprecate web_path in favor of web_paths, and remove this # left like this because there are a number of loaders that expect single # urls if isinstance(web_path, str): self.web_paths = [web_path] elif isinstance(web_path, List): self.web_paths = web_path self.session = requests.Session() try: import bs4 # noqa:F401 except ImportError: raise ValueError( "bs4 package not found, please install it with " "`pip install bs4`" ) headers = header_template or default_header_template if not headers.get("User-Agent"): try: from fake_useragent import UserAgent headers["User-Agent"] = UserAgent().random except ImportError: logger.info( "fake_useragent not found, using default user agent." "To get a realistic header for requests, " "`pip install fake_useragent`." ) self.session.headers = dict(headers) @property def web_path(self) -> str: if len(self.web_paths) > 1: raise ValueError("Multiple webpaths found.") return self.web_paths[0] async def _fetch( self, url: str, retries: int = 3, cooldown: int = 2, backoff: float = 1.5 ) -> str: async with aiohttp.ClientSession() as session: for i in range(retries): try:
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
31a8b80dbb21-2
for i in range(retries): try: async with session.get( url, headers=self.session.headers ) as response: return await response.text() except aiohttp.ClientConnectionError as e: if i == retries - 1: raise else: logger.warning( f"Error fetching {url} with attempt " f"{i + 1}/{retries}: {e}. Retrying..." ) await asyncio.sleep(cooldown * backoff**i) raise ValueError("retry count exceeded") async def _fetch_with_rate_limit( self, url: str, semaphore: asyncio.Semaphore ) -> str: async with semaphore: return await self._fetch(url) [docs] async def fetch_all(self, urls: List[str]) -> Any: """Fetch all urls concurrently with rate limiting.""" semaphore = asyncio.Semaphore(self.requests_per_second) tasks = [] for url in urls: task = asyncio.ensure_future(self._fetch_with_rate_limit(url, semaphore)) tasks.append(task) try: from tqdm.asyncio import tqdm_asyncio return await tqdm_asyncio.gather( *tasks, desc="Fetching pages", ascii=True, mininterval=1 ) except ImportError: warnings.warn("For better logging of progress, `pip install tqdm`") return await asyncio.gather(*tasks) @staticmethod def _check_parser(parser: str) -> None: """Check that parser is valid for bs4.""" valid_parsers = ["html.parser", "lxml", "xml", "lxml-xml", "html5lib"] if parser not in valid_parsers: raise ValueError(
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
31a8b80dbb21-3
if parser not in valid_parsers: raise ValueError( "`parser` must be one of " + ", ".join(valid_parsers) + "." ) [docs] def scrape_all(self, urls: List[str], parser: Union[str, None] = None) -> List[Any]: """Fetch all urls, then return soups for all results.""" from bs4 import BeautifulSoup results = asyncio.run(self.fetch_all(urls)) final_results = [] for i, result in enumerate(results): url = urls[i] if parser is None: if url.endswith(".xml"): parser = "xml" else: parser = self.default_parser self._check_parser(parser) final_results.append(BeautifulSoup(result, parser)) return final_results def _scrape(self, url: str, parser: Union[str, None] = None) -> Any: from bs4 import BeautifulSoup if parser is None: if url.endswith(".xml"): parser = "xml" else: parser = self.default_parser self._check_parser(parser) html_doc = self.session.get(url, **self.requests_kwargs) html_doc.encoding = html_doc.apparent_encoding return BeautifulSoup(html_doc.text, parser) [docs] def scrape(self, parser: Union[str, None] = None) -> Any: """Scrape data from webpage and return it in BeautifulSoup format.""" if parser is None: parser = self.default_parser return self._scrape(self.web_path, parser) [docs] def load(self) -> List[Document]: """Load text from the url(s) in web_path.""" docs = [] for path in self.web_paths:
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
31a8b80dbb21-4
docs = [] for path in self.web_paths: soup = self._scrape(path) text = soup.get_text() metadata = _build_metadata(soup, path) docs.append(Document(page_content=text, metadata=metadata)) return docs [docs] def aload(self) -> List[Document]: """Load text from the urls in web_path async into Documents.""" results = self.scrape_all(self.web_paths) docs = [] for i in range(len(results)): soup = results[i] text = soup.get_text() metadata = _build_metadata(soup, self.web_paths[i]) docs.append(Document(page_content=text, metadata=metadata)) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
1ba5326d52f9-0
Source code for langchain.document_loaders.evernote """Load documents from Evernote. https://gist.github.com/foxmask/7b29c43a161e001ff04afdb2f181e31c """ import hashlib import logging from base64 import b64decode from time import strptime from typing import Any, Dict, Iterator, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class EverNoteLoader(BaseLoader): """EverNote Loader. Loads an EverNote notebook export file e.g. my_notebook.enex into Documents. Instructions on producing this file can be found at https://help.evernote.com/hc/en-us/articles/209005557-Export-notes-and-notebooks-as-ENEX-or-HTML Currently only the plain text in the note is extracted and stored as the contents of the Document, any non content metadata (e.g. 'author', 'created', 'updated' etc. but not 'content-raw' or 'resource') tags on the note will be extracted and stored as metadata on the Document. Args: file_path (str): The path to the notebook export with a .enex extension load_single_document (bool): Whether or not to concatenate the content of all notes into a single long Document. If this is set to True (default) then the only metadata on the document will be the 'source' which contains the file name of the export. """ # noqa: E501 def __init__(self, file_path: str, load_single_document: bool = True): """Initialize with file path.""" self.file_path = file_path self.load_single_document = load_single_document
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
1ba5326d52f9-1
self.file_path = file_path self.load_single_document = load_single_document [docs] def load(self) -> List[Document]: """Load documents from EverNote export file.""" documents = [ Document( page_content=note["content"], metadata={ **{ key: value for key, value in note.items() if key not in ["content", "content-raw", "resource"] }, **{"source": self.file_path}, }, ) for note in self._parse_note_xml(self.file_path) if note.get("content") is not None ] if not self.load_single_document: return documents return [ Document( page_content="".join([document.page_content for document in documents]), metadata={"source": self.file_path}, ) ] @staticmethod def _parse_content(content: str) -> str: try: import html2text return html2text.html2text(content).strip() except ImportError as e: logging.error( "Could not import `html2text`. Although it is not a required package " "to use Langchain, using the EverNote loader requires `html2text`. " "Please install `html2text` via `pip install html2text` and try again." ) raise e @staticmethod def _parse_resource(resource: list) -> dict: rsc_dict: Dict[str, Any] = {} for elem in resource: if elem.tag == "data": # Sometimes elem.text is None rsc_dict[elem.tag] = b64decode(elem.text) if elem.text else b""
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
1ba5326d52f9-2
rsc_dict["hash"] = hashlib.md5(rsc_dict[elem.tag]).hexdigest() else: rsc_dict[elem.tag] = elem.text return rsc_dict @staticmethod def _parse_note(note: List, prefix: Optional[str] = None) -> dict: note_dict: Dict[str, Any] = {} resources = [] def add_prefix(element_tag: str) -> str: if prefix is None: return element_tag return f"{prefix}.{element_tag}" for elem in note: if elem.tag == "content": note_dict[elem.tag] = EverNoteLoader._parse_content(elem.text) # A copy of original content note_dict["content-raw"] = elem.text elif elem.tag == "resource": resources.append(EverNoteLoader._parse_resource(elem)) elif elem.tag == "created" or elem.tag == "updated": note_dict[elem.tag] = strptime(elem.text, "%Y%m%dT%H%M%SZ") elif elem.tag == "note-attributes": additional_attributes = EverNoteLoader._parse_note( elem, elem.tag ) # Recursively enter the note-attributes tag note_dict.update(additional_attributes) else: note_dict[elem.tag] = elem.text if len(resources) > 0: note_dict["resource"] = resources return {add_prefix(key): value for key, value in note_dict.items()} @staticmethod def _parse_note_xml(xml_file: str) -> Iterator[Dict[str, Any]]: """Parse Evernote xml.""" # Without huge_tree set to True, parser may complain about huge text node
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
1ba5326d52f9-3
# Without huge_tree set to True, parser may complain about huge text node # Try to recover, because there may be " ", which will cause # "XMLSyntaxError: Entity 'nbsp' not defined" try: from lxml import etree except ImportError as e: logging.error( "Could not import `lxml`. Although it is not a required package to use " "Langchain, using the EverNote loader requires `lxml`. Please install " "`lxml` via `pip install lxml` and try again." ) raise e context = etree.iterparse( xml_file, encoding="utf-8", strip_cdata=False, huge_tree=True, recover=True ) for action, elem in context: if elem.tag == "note": yield EverNoteLoader._parse_note(elem) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
632f2319c7c5-0
Source code for langchain.document_loaders.whatsapp_chat import re from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_rows(date: str, sender: str, text: str) -> str: """Combine message information in a readable format ready to be used.""" return f"{sender} on {date}: {text}\n\n" [docs]class WhatsAppChatLoader(BaseLoader): """Loader that loads WhatsApp messages text file.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) text_content = "" with open(p, encoding="utf8") as f: lines = f.readlines() message_line_regex = r""" \[? ( \d{1,2} [\/.] \d{1,2} [\/.] \d{2,4} ,\s \d{1,2} :\d{2} (?: :\d{2} )? (?:[ _](?:AM|PM))? ) \]? [\s-]* ([~\w\s]+) [:]+ \s (.+) """ for line in lines: result = re.match(message_line_regex, line.strip(), flags=re.VERBOSE) if result: date, sender, text = result.groups() text_content += concatenate_rows(date, sender, text)
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/whatsapp_chat.html
632f2319c7c5-1
text_content += concatenate_rows(date, sender, text) metadata = {"source": str(p)} return [Document(page_content=text_content, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/whatsapp_chat.html
7cc42d7bb3e6-0
Source code for langchain.document_loaders.arxiv from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.arxiv import ArxivAPIWrapper [docs]class ArxivLoader(BaseLoader): """Loads a query result from arxiv.org into a list of Documents. Each document represents one Document. The loader converts the original PDF format into the text. """ def __init__( self, query: str, load_max_docs: Optional[int] = 100, load_all_available_meta: Optional[bool] = False, ): self.query = query self.load_max_docs = load_max_docs self.load_all_available_meta = load_all_available_meta [docs] def load(self) -> List[Document]: arxiv_client = ArxivAPIWrapper( load_max_docs=self.load_max_docs, load_all_available_meta=self.load_all_available_meta, ) docs = arxiv_client.load(self.query) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/arxiv.html
bb31913cb09d-0
Source code for langchain.document_loaders.facebook_chat """Loader that loads Facebook chat json dump.""" import datetime import json from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_rows(row: dict) -> str: """Combine message information in a readable format ready to be used.""" sender = row["sender_name"] text = row["content"] date = datetime.datetime.fromtimestamp(row["timestamp_ms"] / 1000).strftime( "%Y-%m-%d %H:%M:%S" ) return f"{sender} on {date}: {text}\n\n" [docs]class FacebookChatLoader(BaseLoader): """Loader that loads Facebook messages json directory dump.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) text = "".join( concatenate_rows(message) for message in d["messages"] if message.get("content") and isinstance(message["content"], str) ) metadata = {"source": str(p)} return [Document(page_content=text, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/facebook_chat.html
5827d86e0de9-0
Source code for langchain.document_loaders.epub """Loader that loads EPub files.""" from typing import List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, satisfies_min_unstructured_version, ) [docs]class UnstructuredEPubLoader(UnstructuredFileLoader): """Loader that uses unstructured to load epub files.""" def _get_elements(self) -> List: min_unstructured_version = "0.5.4" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( "Partitioning epub files is only supported in " f"unstructured>={min_unstructured_version}." ) from unstructured.partition.epub import partition_epub return partition_epub(filename=self.file_path, **self.unstructured_kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/epub.html
69775e8ca700-0
Source code for langchain.document_loaders.diffbot """Loader that uses Diffbot to load webpages in text format.""" import logging from typing import Any, List import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class DiffbotLoader(BaseLoader): """Loader that loads Diffbot file json.""" def __init__( self, api_token: str, urls: List[str], continue_on_failure: bool = True ): """Initialize with API token, ids, and key.""" self.api_token = api_token self.urls = urls self.continue_on_failure = continue_on_failure def _diffbot_api_url(self, diffbot_api: str) -> str: return f"https://api.diffbot.com/v3/{diffbot_api}" def _get_diffbot_data(self, url: str) -> Any: """Get Diffbot file from Diffbot REST API.""" # TODO: Add support for other Diffbot APIs diffbot_url = self._diffbot_api_url("article") params = { "token": self.api_token, "url": url, } response = requests.get(diffbot_url, params=params, timeout=10) # TODO: handle non-ok errors return response.json() if response.ok else {} [docs] def load(self) -> List[Document]: """Extract text from Diffbot on all the URLs and return Document instances""" docs: List[Document] = list() for url in self.urls: try: data = self._get_diffbot_data(url) text = data["objects"][0]["text"] if "objects" in data else ""
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/diffbot.html
69775e8ca700-1
text = data["objects"][0]["text"] if "objects" in data else "" metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exception: {e}") else: raise e return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/diffbot.html
f194e8a130be-0
Source code for langchain.document_loaders.youtube """Loader that loads YouTube transcript.""" from __future__ import annotations import logging from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union from urllib.parse import parse_qs, urlparse from pydantic import root_validator from pydantic.dataclasses import dataclass from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) SCOPES = ["https://www.googleapis.com/auth/youtube.readonly"] [docs]@dataclass class GoogleApiClient: """A Generic Google Api Client. To use, you should have the ``google_auth_oauthlib,youtube_transcript_api,google`` python package installed. As the google api expects credentials you need to set up a google account and register your Service. "https://developers.google.com/docs/api/quickstart/python" Example: .. code-block:: python from langchain.document_loaders import GoogleApiClient google_api_client = GoogleApiClient( service_account_path=Path("path_to_your_sec_file.json") ) """ credentials_path: Path = Path.home() / ".credentials" / "credentials.json" service_account_path: Path = Path.home() / ".credentials" / "credentials.json" token_path: Path = Path.home() / ".credentials" / "token.json" def __post_init__(self) -> None: self.creds = self._load_credentials() [docs] @root_validator def validate_channel_or_videoIds_is_set( cls, values: Dict[str, Any] ) -> Dict[str, Any]: """Validate that either folder_id or document_ids is set, but not both."""
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
f194e8a130be-1
"""Validate that either folder_id or document_ids is set, but not both.""" if not values.get("credentials_path") and not values.get( "service_account_path" ): raise ValueError("Must specify either channel_name or video_ids") return values def _load_credentials(self) -> Any: """Load credentials.""" # Adapted from https://developers.google.com/drive/api/v3/quickstart/python try: from google.auth.transport.requests import Request from google.oauth2 import service_account from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401 except ImportError: raise ImportError( "You must run" "`pip install --upgrade " "google-api-python-client google-auth-httplib2 " "google-auth-oauthlib " "youtube-transcript-api` " "to use the Google Drive loader" ) creds = None if self.service_account_path.exists(): return service_account.Credentials.from_service_account_file( str(self.service_account_path) ) if self.token_path.exists(): creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( str(self.credentials_path), SCOPES ) creds = flow.run_local_server(port=0) with open(self.token_path, "w") as token: token.write(creds.to_json()) return creds
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
f194e8a130be-2
token.write(creds.to_json()) return creds ALLOWED_SCHEMAS = {"http", "https"} ALLOWED_NETLOCK = { "youtu.be", "m.youtube.com", "youtube.com", "www.youtube.com", "www.youtube-nocookie.com", "vid.plus", } def _parse_video_id(url: str) -> Optional[str]: """Parse a youtube url and return the video id if valid, otherwise None.""" parsed_url = urlparse(url) if parsed_url.scheme not in ALLOWED_SCHEMAS: return None if parsed_url.netloc not in ALLOWED_NETLOCK: return None path = parsed_url.path if path.endswith("/watch"): query = parsed_url.query parsed_query = parse_qs(query) if "v" in parsed_query: ids = parsed_query["v"] video_id = ids if isinstance(ids, str) else ids[0] else: return None else: path = parsed_url.path.lstrip("/") video_id = path.split("/")[-1] if len(video_id) != 11: # Video IDs are 11 characters long return None return video_id [docs]class YoutubeLoader(BaseLoader): """Loader that loads Youtube transcripts.""" def __init__( self, video_id: str, add_video_info: bool = False, language: Union[str, Sequence[str]] = "en", translation: str = "en", continue_on_failure: bool = False, ): """Initialize with YouTube video ID.""" self.video_id = video_id self.add_video_info = add_video_info self.language = language
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
f194e8a130be-3
self.add_video_info = add_video_info self.language = language if isinstance(language, str): self.language = [language] else: self.language = language self.translation = translation self.continue_on_failure = continue_on_failure [docs] @staticmethod def extract_video_id(youtube_url: str) -> str: """Extract video id from common YT urls.""" video_id = _parse_video_id(youtube_url) if not video_id: raise ValueError( f"Could not determine the video ID for the URL {youtube_url}" ) return video_id [docs] @classmethod def from_youtube_url(cls, youtube_url: str, **kwargs: Any) -> YoutubeLoader: """Given youtube URL, load video.""" video_id = cls.extract_video_id(youtube_url) return cls(video_id, **kwargs) [docs] def load(self) -> List[Document]: """Load documents.""" try: from youtube_transcript_api import ( NoTranscriptFound, TranscriptsDisabled, YouTubeTranscriptApi, ) except ImportError: raise ImportError( "Could not import youtube_transcript_api python package. " "Please install it with `pip install youtube-transcript-api`." ) metadata = {"source": self.video_id} if self.add_video_info: # Get more video meta info # Such as title, description, thumbnail url, publish_date video_info = self._get_video_info() metadata.update(video_info) try: transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id) except TranscriptsDisabled: return [] try:
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
f194e8a130be-4
except TranscriptsDisabled: return [] try: transcript = transcript_list.find_transcript(self.language) except NoTranscriptFound: en_transcript = transcript_list.find_transcript(["en"]) transcript = en_transcript.translate(self.translation) transcript_pieces = transcript.fetch() transcript = " ".join([t["text"].strip(" ") for t in transcript_pieces]) return [Document(page_content=transcript, metadata=metadata)] def _get_video_info(self) -> dict: """Get important video information. Components are: - title - description - thumbnail url, - publish_date - channel_author - and more. """ try: from pytube import YouTube except ImportError: raise ImportError( "Could not import pytube python package. " "Please install it with `pip install pytube`." ) yt = YouTube(f"https://www.youtube.com/watch?v={self.video_id}") video_info = { "title": yt.title or "Unknown", "description": yt.description or "Unknown", "view_count": yt.views or 0, "thumbnail_url": yt.thumbnail_url or "Unknown", "publish_date": yt.publish_date.strftime("%Y-%m-%d %H:%M:%S") if yt.publish_date else "Unknown", "length": yt.length or 0, "author": yt.author or "Unknown", } return video_info [docs]@dataclass class GoogleApiYoutubeLoader(BaseLoader): """Loader that loads all Videos from a Channel To use, you should have the ``googleapiclient,youtube_transcript_api``
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
f194e8a130be-5
To use, you should have the ``googleapiclient,youtube_transcript_api`` python package installed. As the service needs a google_api_client, you first have to initialize the GoogleApiClient. Additionally you have to either provide a channel name or a list of videoids "https://developers.google.com/docs/api/quickstart/python" Example: .. code-block:: python from langchain.document_loaders import GoogleApiClient from langchain.document_loaders import GoogleApiYoutubeLoader google_api_client = GoogleApiClient( service_account_path=Path("path_to_your_sec_file.json") ) loader = GoogleApiYoutubeLoader( google_api_client=google_api_client, channel_name = "CodeAesthetic" ) load.load() """ google_api_client: GoogleApiClient channel_name: Optional[str] = None video_ids: Optional[List[str]] = None add_video_info: bool = True captions_language: str = "en" continue_on_failure: bool = False def __post_init__(self) -> None: self.youtube_client = self._build_youtube_client(self.google_api_client.creds) def _build_youtube_client(self, creds: Any) -> Any: try: from googleapiclient.discovery import build from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401 except ImportError: raise ImportError( "You must run" "`pip install --upgrade " "google-api-python-client google-auth-httplib2 " "google-auth-oauthlib " "youtube-transcript-api` " "to use the Google Drive loader" )
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
f194e8a130be-6
"to use the Google Drive loader" ) return build("youtube", "v3", credentials=creds) [docs] @root_validator def validate_channel_or_videoIds_is_set( cls, values: Dict[str, Any] ) -> Dict[str, Any]: """Validate that either folder_id or document_ids is set, but not both.""" if not values.get("channel_name") and not values.get("video_ids"): raise ValueError("Must specify either channel_name or video_ids") return values def _get_transcripe_for_video_id(self, video_id: str) -> str: from youtube_transcript_api import NoTranscriptFound, YouTubeTranscriptApi transcript_list = YouTubeTranscriptApi.list_transcripts(video_id) try: transcript = transcript_list.find_transcript([self.captions_language]) except NoTranscriptFound: for available_transcript in transcript_list: transcript = available_transcript.translate(self.captions_language) continue transcript_pieces = transcript.fetch() return " ".join([t["text"].strip(" ") for t in transcript_pieces]) def _get_document_for_video_id(self, video_id: str, **kwargs: Any) -> Document: captions = self._get_transcripe_for_video_id(video_id) video_response = ( self.youtube_client.videos() .list( part="id,snippet", id=video_id, ) .execute() ) return Document( page_content=captions, metadata=video_response.get("items")[0], ) def _get_channel_id(self, channel_name: str) -> str: request = self.youtube_client.search().list(
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
f194e8a130be-7
request = self.youtube_client.search().list( part="id", q=channel_name, type="channel", maxResults=1, # we only need one result since channel names are unique ) response = request.execute() channel_id = response["items"][0]["id"]["channelId"] return channel_id def _get_document_for_channel(self, channel: str, **kwargs: Any) -> List[Document]: try: from youtube_transcript_api import ( NoTranscriptFound, TranscriptsDisabled, ) except ImportError: raise ImportError( "You must run" "`pip install --upgrade " "youtube-transcript-api` " "to use the youtube loader" ) channel_id = self._get_channel_id(channel) request = self.youtube_client.search().list( part="id,snippet", channelId=channel_id, maxResults=50, # adjust this value to retrieve more or fewer videos ) video_ids = [] while request is not None: response = request.execute() # Add each video ID to the list for item in response["items"]: if not item["id"].get("videoId"): continue meta_data = {"videoId": item["id"]["videoId"]} if self.add_video_info: item["snippet"].pop("thumbnails") meta_data.update(item["snippet"]) try: page_content = self._get_transcripe_for_video_id( item["id"]["videoId"] ) video_ids.append( Document( page_content=page_content, metadata=meta_data, ) )
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
f194e8a130be-8
metadata=meta_data, ) ) except (TranscriptsDisabled, NoTranscriptFound) as e: if self.continue_on_failure: logger.error( "Error fetching transscript " + f" {item['id']['videoId']}, exception: {e}" ) else: raise e pass request = self.youtube_client.search().list_next(request, response) return video_ids [docs] def load(self) -> List[Document]: """Load documents.""" document_list = [] if self.channel_name: document_list.extend(self._get_document_for_channel(self.channel_name)) elif self.video_ids: document_list.extend( [ self._get_document_for_video_id(video_id) for video_id in self.video_ids ] ) else: raise ValueError("Must specify either channel_name or video_ids") return document_list By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
d1ce96d55bd5-0
Source code for langchain.document_loaders.python import tokenize from langchain.document_loaders.text import TextLoader [docs]class PythonLoader(TextLoader): """ Load Python files, respecting any non-default encoding if specified. """ def __init__(self, file_path: str): with open(file_path, "rb") as f: encoding, _ = tokenize.detect_encoding(f.readline) super().__init__(file_path=file_path, encoding=encoding) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/python.html
a36a099dd540-0
Source code for langchain.document_loaders.odt """Loader that loads Open Office ODT files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredODTLoader(UnstructuredFileLoader): """Loader that uses unstructured to load open office ODT files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.3") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.odt import partition_odt return partition_odt(filename=self.file_path, **self.unstructured_kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/odt.html
ca73ca360056-0
Source code for langchain.document_loaders.bigquery from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: from google.auth.credentials import Credentials [docs]class BigQueryLoader(BaseLoader): """Loads a query result from BigQuery into a list of documents. Each document represents one row of the result. The `page_content_columns` are written into the `page_content` of the document. The `metadata_columns` are written into the `metadata` of the document. By default, all columns are written into the `page_content` and none into the `metadata`. """ def __init__( self, query: str, project: Optional[str] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None, credentials: Optional[Credentials] = None, ): """Initialize BigQuery document loader. Args: query: The query to run in BigQuery. project: Optional. The project to run the query in. page_content_columns: Optional. The columns to write into the `page_content` of the document. metadata_columns: Optional. The columns to write into the `metadata` of the document. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine (`google.auth.compute_engine.Credentials`) or Service Account (`google.oauth2.service_account.Credentials`) credentials directly. """ self.query = query self.project = project self.page_content_columns = page_content_columns
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/bigquery.html
ca73ca360056-1
self.project = project self.page_content_columns = page_content_columns self.metadata_columns = metadata_columns self.credentials = credentials [docs] def load(self) -> List[Document]: try: from google.cloud import bigquery except ImportError as ex: raise ValueError( "Could not import google-cloud-bigquery python package. " "Please install it with `pip install google-cloud-bigquery`." ) from ex bq_client = bigquery.Client(credentials=self.credentials, project=self.project) query_result = bq_client.query(self.query).result() docs: List[Document] = [] page_content_columns = self.page_content_columns metadata_columns = self.metadata_columns if page_content_columns is None: page_content_columns = [column.name for column in query_result.schema] if metadata_columns is None: metadata_columns = [] for row in query_result: page_content = "\n".join( f"{k}: {v}" for k, v in row.items() if k in page_content_columns ) metadata = {k: v for k, v in row.items() if k in metadata_columns} doc = Document(page_content=page_content, metadata=metadata) docs.append(doc) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/bigquery.html
3e057ad6e570-0
Source code for langchain.document_loaders.joplin import json import urllib from datetime import datetime from typing import Iterator, List, Optional from langchain.document_loaders.base import BaseLoader from langchain.schema import Document from langchain.utils import get_from_env LINK_NOTE_TEMPLATE = "joplin://x-callback-url/openNote?id={id}" [docs]class JoplinLoader(BaseLoader): """ Loader that fetches notes from Joplin. In order to use this loader, you need to have Joplin running with the Web Clipper enabled (look for "Web Clipper" in the app settings). To get the access token, you need to go to the Web Clipper options and under "Advanced Options" you will find the access token. You can find more information about the Web Clipper service here: https://joplinapp.org/clipper/ """ def __init__( self, access_token: Optional[str] = None, port: int = 41184, host: str = "localhost", ) -> None: access_token = access_token or get_from_env( "access_token", "JOPLIN_ACCESS_TOKEN" ) base_url = f"http://{host}:{port}" self._get_note_url = ( f"{base_url}/notes?token={access_token}" f"&fields=id,parent_id,title,body,created_time,updated_time&page={{page}}" ) self._get_folder_url = ( f"{base_url}/folders/{{id}}?token={access_token}&fields=title" ) self._get_tag_url = (
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/joplin.html
3e057ad6e570-1
) self._get_tag_url = ( f"{base_url}/notes/{{id}}/tags?token={access_token}&fields=title" ) def _get_notes(self) -> Iterator[Document]: has_more = True page = 1 while has_more: req_note = urllib.request.Request(self._get_note_url.format(page=page)) with urllib.request.urlopen(req_note) as response: json_data = json.loads(response.read().decode()) for note in json_data["items"]: metadata = { "source": LINK_NOTE_TEMPLATE.format(id=note["id"]), "folder": self._get_folder(note["parent_id"]), "tags": self._get_tags(note["id"]), "title": note["title"], "created_time": self._convert_date(note["created_time"]), "updated_time": self._convert_date(note["updated_time"]), } yield Document(page_content=note["body"], metadata=metadata) has_more = json_data["has_more"] page += 1 def _get_folder(self, folder_id: str) -> str: req_folder = urllib.request.Request(self._get_folder_url.format(id=folder_id)) with urllib.request.urlopen(req_folder) as response: json_data = json.loads(response.read().decode()) return json_data["title"] def _get_tags(self, note_id: str) -> List[str]: req_tag = urllib.request.Request(self._get_tag_url.format(id=note_id)) with urllib.request.urlopen(req_tag) as response: json_data = json.loads(response.read().decode()) return [tag["title"] for tag in json_data["items"]] def _convert_date(self, date: int) -> str:
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/joplin.html
3e057ad6e570-2
def _convert_date(self, date: int) -> str: return datetime.fromtimestamp(date / 1000).strftime("%Y-%m-%d %H:%M:%S") [docs] def lazy_load(self) -> Iterator[Document]: yield from self._get_notes() [docs] def load(self) -> List[Document]: return list(self.lazy_load()) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/joplin.html
f59faa4055cd-0
Source code for langchain.document_loaders.excel """Loader that loads Microsoft Excel files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredExcelLoader(UnstructuredFileLoader): """Loader that uses unstructured to load Microsoft Excel files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.7") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.xlsx import partition_xlsx return partition_xlsx(filename=self.file_path, **self.unstructured_kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/excel.html
4828e264202e-0
Source code for langchain.document_loaders.figma """Loader that loads Figma files json dump.""" import json import urllib.request from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import stringify_dict [docs]class FigmaFileLoader(BaseLoader): """Loader that loads Figma file json.""" def __init__(self, access_token: str, ids: str, key: str): """Initialize with access token, ids, and key.""" self.access_token = access_token self.ids = ids self.key = key def _construct_figma_api_url(self) -> str: api_url = "https://api.figma.com/v1/files/%s/nodes?ids=%s" % ( self.key, self.ids, ) return api_url def _get_figma_file(self) -> Any: """Get Figma file from Figma REST API.""" headers = {"X-Figma-Token": self.access_token} request = urllib.request.Request( self._construct_figma_api_url(), headers=headers ) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) return json_data [docs] def load(self) -> List[Document]: """Load file""" data = self._get_figma_file() text = stringify_dict(data) metadata = {"source": self._construct_figma_api_url()} return [Document(page_content=text, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/figma.html
38842346bad8-0
Source code for langchain.document_loaders.onedrive """Loader that loads data from OneDrive""" from __future__ import annotations import logging import os import tempfile from enum import Enum from pathlib import Path from typing import TYPE_CHECKING, Dict, List, Optional, Type, Union from pydantic import BaseModel, BaseSettings, Field, FilePath, SecretStr from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.onedrive_file import OneDriveFileLoader if TYPE_CHECKING: from O365 import Account from O365.drive import Drive, Folder SCOPES = ["offline_access", "Files.Read.All"] logger = logging.getLogger(__name__) class _OneDriveSettings(BaseSettings): client_id: str = Field(..., env="O365_CLIENT_ID") client_secret: SecretStr = Field(..., env="O365_CLIENT_SECRET") class Config: env_prefix = "" case_sentive = False env_file = ".env" class _OneDriveTokenStorage(BaseSettings): token_path: FilePath = Field(Path.home() / ".credentials" / "o365_token.txt") class _FileType(str, Enum): DOC = "doc" DOCX = "docx" PDF = "pdf" class _SupportedFileTypes(BaseModel): file_types: List[_FileType] def fetch_mime_types(self) -> Dict[str, str]: mime_types_mapping = {} for file_type in self.file_types: if file_type.value == "doc": mime_types_mapping[file_type.value] = "application/msword" elif file_type.value == "docx": mime_types_mapping[ file_type.value
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive.html
38842346bad8-1
mime_types_mapping[ file_type.value ] = "application/vnd.openxmlformats-officedocument.wordprocessingml.document" # noqa: E501 elif file_type.value == "pdf": mime_types_mapping[file_type.value] = "application/pdf" return mime_types_mapping [docs]class OneDriveLoader(BaseLoader, BaseModel): settings: _OneDriveSettings = Field(default_factory=_OneDriveSettings) drive_id: str = Field(...) folder_path: Optional[str] = None object_ids: Optional[List[str]] = None auth_with_token: bool = False def _auth(self) -> Type[Account]: """ Authenticates the OneDrive API client using the specified authentication method and returns the Account object. Returns: Type[Account]: The authenticated Account object. """ try: from O365 import FileSystemTokenBackend except ImportError: raise ImportError( "O365 package not found, please install it with `pip install o365`" ) if self.auth_with_token: token_storage = _OneDriveTokenStorage() token_path = token_storage.token_path token_backend = FileSystemTokenBackend( token_path=token_path.parent, token_filename=token_path.name ) account = Account( credentials=( self.settings.client_id, self.settings.client_secret.get_secret_value(), ), scopes=SCOPES, token_backend=token_backend, **{"raise_http_errors": False}, ) else: token_backend = FileSystemTokenBackend( token_path=Path.home() / ".credentials" ) account = Account( credentials=( self.settings.client_id,
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive.html
38842346bad8-2
) account = Account( credentials=( self.settings.client_id, self.settings.client_secret.get_secret_value(), ), scopes=SCOPES, token_backend=token_backend, **{"raise_http_errors": False}, ) # make the auth account.authenticate() return account def _get_folder_from_path(self, drive: Type[Drive]) -> Union[Folder, Drive]: """ Returns the folder or drive object located at the specified path relative to the given drive. Args: drive (Type[Drive]): The root drive from which the folder path is relative. Returns: Union[Folder, Drive]: The folder or drive object located at the specified path. Raises: FileNotFoundError: If the path does not exist. """ subfolder_drive = drive if self.folder_path is None: return subfolder_drive subfolders = [f for f in self.folder_path.split("/") if f != ""] if len(subfolders) == 0: return subfolder_drive items = subfolder_drive.get_items() for subfolder in subfolders: try: subfolder_drive = list(filter(lambda x: subfolder in x.name, items))[0] items = subfolder_drive.get_items() except (IndexError, AttributeError): raise FileNotFoundError("Path {} not exist.".format(self.folder_path)) return subfolder_drive def _load_from_folder(self, folder: Type[Folder]) -> List[Document]: """ Loads all supported document files from the specified folder and returns a list of Document objects. Args: folder (Type[Folder]): The folder object to load the documents from.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive.html
38842346bad8-3
folder (Type[Folder]): The folder object to load the documents from. Returns: List[Document]: A list of Document objects representing the loaded documents. """ docs = [] file_types = _SupportedFileTypes(file_types=["doc", "docx", "pdf"]) file_mime_types = file_types.fetch_mime_types() items = folder.get_items() with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}" os.makedirs(os.path.dirname(file_path), exist_ok=True) for file in items: if file.is_file: if file.mime_type in list(file_mime_types.values()): loader = OneDriveFileLoader(file=file) docs.extend(loader.load()) return docs def _load_from_object_ids(self, drive: Type[Drive]) -> List[Document]: """ Loads all supported document files from the specified OneDrive drive based on their object IDs and returns a list of Document objects. Args: drive (Type[Drive]): The OneDrive drive object to load the documents from. Returns: List[Document]: A list of Document objects representing the loaded documents. """ docs = [] file_types = _SupportedFileTypes(file_types=["doc", "docx", "pdf"]) file_mime_types = file_types.fetch_mime_types() with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}" os.makedirs(os.path.dirname(file_path), exist_ok=True) for object_id in self.object_ids if self.object_ids else [""]: file = drive.get_item(object_id) if not file: logging.warning( "There isn't a file with "
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive.html
38842346bad8-4
logging.warning( "There isn't a file with " f"object_id {object_id} in drive {drive}." ) continue if file.is_file: if file.mime_type in list(file_mime_types.values()): loader = OneDriveFileLoader(file=file) docs.extend(loader.load()) return docs [docs] def load(self) -> List[Document]: """ Loads all supported document files from the specified OneDrive drive a nd returns a list of Document objects. Returns: List[Document]: A list of Document objects representing the loaded documents. Raises: ValueError: If the specified drive ID does not correspond to a drive in the OneDrive storage. """ account = self._auth() storage = account.storage() drive = storage.get_drive(self.drive_id) docs: List[Document] = [] if not drive: raise ValueError(f"There isn't a drive with id {self.drive_id}.") if self.folder_path: folder = self._get_folder_from_path(drive=drive) docs.extend(self._load_from_folder(folder=folder)) elif self.object_ids: docs.extend(self._load_from_object_ids(drive=drive)) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive.html
b0cd5f675100-0
Source code for langchain.document_loaders.azure_blob_storage_file """Loading logic for loading documents from an Azure Blob Storage file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class AzureBlobStorageFileLoader(BaseLoader): """Loading logic for loading documents from Azure Blob Storage.""" def __init__(self, conn_str: str, container: str, blob_name: str): """Initialize with connection string, container and blob name.""" self.conn_str = conn_str self.container = container self.blob = blob_name [docs] def load(self) -> List[Document]: """Load documents.""" try: from azure.storage.blob import BlobClient except ImportError as exc: raise ValueError( "Could not import azure storage blob python package. " "Please install it with `pip install azure-storage-blob`." ) from exc client = BlobClient.from_connection_string( conn_str=self.conn_str, container_name=self.container, blob_name=self.blob ) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.container}/{self.blob}" os.makedirs(os.path.dirname(file_path), exist_ok=True) with open(f"{file_path}", "wb") as file: blob_data = client.download_blob() blob_data.readinto(file) loader = UnstructuredFileLoader(file_path) return loader.load() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/azure_blob_storage_file.html
9fd8d136b0ea-0
Source code for langchain.document_loaders.tomarkdown """Loader that loads HTML to markdown using 2markdown.""" from __future__ import annotations from typing import Iterator, List import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ToMarkdownLoader(BaseLoader): """Loader that loads HTML to markdown using 2markdown.""" def __init__(self, url: str, api_key: str): """Initialize with url and api key.""" self.url = url self.api_key = api_key [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazily load the file.""" response = requests.post( "https://2markdown.com/api/2md", headers={"X-Api-Key": self.api_key}, json={"url": self.url}, ) text = response.json()["article"] metadata = {"source": self.url} yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: """Load file.""" return list(self.lazy_load()) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/tomarkdown.html
8befafee81c6-0
Source code for langchain.document_loaders.html """Loader that uses unstructured to load HTML files.""" from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredHTMLLoader(UnstructuredFileLoader): """Loader that uses unstructured to load HTML files.""" def _get_elements(self) -> List: from unstructured.partition.html import partition_html return partition_html(filename=self.file_path, **self.unstructured_kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/html.html
68921f55b760-0
Source code for langchain.document_loaders.telegram """Loader that loads Telegram chat json dump.""" from __future__ import annotations import asyncio import json from pathlib import Path from typing import TYPE_CHECKING, Dict, List, Optional, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.text_splitter import RecursiveCharacterTextSplitter if TYPE_CHECKING: import pandas as pd from telethon.hints import EntityLike def concatenate_rows(row: dict) -> str: """Combine message information in a readable format ready to be used.""" date = row["date"] sender = row["from"] text = row["text"] return f"{sender} on {date}: {text}\n\n" [docs]class TelegramChatFileLoader(BaseLoader): """Loader that loads Telegram chat json directory dump.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) text = "".join( concatenate_rows(message) for message in d["messages"] if message["type"] == "message" and isinstance(message["text"], str) ) metadata = {"source": str(p)} return [Document(page_content=text, metadata=metadata)] def text_to_docs(text: Union[str, List[str]]) -> List[Document]: """Converts a string or list of strings to a list of Documents with metadata.""" if isinstance(text, str): # Take a single string as one page
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
68921f55b760-1
if isinstance(text, str): # Take a single string as one page text = [text] page_docs = [Document(page_content=page) for page in text] # Add page numbers as metadata for i, doc in enumerate(page_docs): doc.metadata["page"] = i + 1 # Split pages into chunks doc_chunks = [] for doc in page_docs: text_splitter = RecursiveCharacterTextSplitter( chunk_size=800, separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""], chunk_overlap=20, ) chunks = text_splitter.split_text(doc.page_content) for i, chunk in enumerate(chunks): doc = Document( page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i} ) # Add sources a metadata doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}" doc_chunks.append(doc) return doc_chunks [docs]class TelegramChatApiLoader(BaseLoader): """Loader that loads Telegram chat json directory dump.""" def __init__( self, chat_entity: Optional[EntityLike] = None, api_id: Optional[int] = None, api_hash: Optional[str] = None, username: Optional[str] = None, file_path: str = "telegram_data.json", ): """Initialize with API parameters.""" self.chat_entity = chat_entity self.api_id = api_id self.api_hash = api_hash self.username = username self.file_path = file_path [docs] async def fetch_data_from_telegram(self) -> None:
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
68921f55b760-2
[docs] async def fetch_data_from_telegram(self) -> None: """Fetch data from Telegram API and save it as a JSON file.""" from telethon.sync import TelegramClient data = [] async with TelegramClient(self.username, self.api_id, self.api_hash) as client: async for message in client.iter_messages(self.chat_entity): is_reply = message.reply_to is not None reply_to_id = message.reply_to.reply_to_msg_id if is_reply else None data.append( { "sender_id": message.sender_id, "text": message.text, "date": message.date.isoformat(), "message.id": message.id, "is_reply": is_reply, "reply_to_id": reply_to_id, } ) with open(self.file_path, "w", encoding="utf-8") as f: json.dump(data, f, ensure_ascii=False, indent=4) def _get_message_threads(self, data: pd.DataFrame) -> dict: """Create a dictionary of message threads from the given data. Args: data (pd.DataFrame): A DataFrame containing the conversation \ data with columns: - message.sender_id - text - date - message.id - is_reply - reply_to_id Returns: dict: A dictionary where the key is the parent message ID and \ the value is a list of message IDs in ascending order. """ def find_replies(parent_id: int, reply_data: pd.DataFrame) -> List[int]: """ Recursively find all replies to a given parent message ID. Args: parent_id (int): The parent message ID.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
68921f55b760-3
Args: parent_id (int): The parent message ID. reply_data (pd.DataFrame): A DataFrame containing reply messages. Returns: list: A list of message IDs that are replies to the parent message ID. """ # Find direct replies to the parent message ID direct_replies = reply_data[reply_data["reply_to_id"] == parent_id][ "message.id" ].tolist() # Recursively find replies to the direct replies all_replies = [] for reply_id in direct_replies: all_replies += [reply_id] + find_replies(reply_id, reply_data) return all_replies # Filter out parent messages parent_messages = data[~data["is_reply"]] # Filter out reply messages and drop rows with NaN in 'reply_to_id' reply_messages = data[data["is_reply"]].dropna(subset=["reply_to_id"]) # Convert 'reply_to_id' to integer reply_messages["reply_to_id"] = reply_messages["reply_to_id"].astype(int) # Create a dictionary of message threads with parent message IDs as keys and \ # lists of reply message IDs as values message_threads = { parent_id: [parent_id] + find_replies(parent_id, reply_messages) for parent_id in parent_messages["message.id"] } return message_threads def _combine_message_texts( self, message_threads: Dict[int, List[int]], data: pd.DataFrame ) -> str: """ Combine the message texts for each parent message ID based \ on the list of message threads. Args: message_threads (dict): A dictionary where the key is the parent message \
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
68921f55b760-4
message_threads (dict): A dictionary where the key is the parent message \ ID and the value is a list of message IDs in ascending order. data (pd.DataFrame): A DataFrame containing the conversation data: - message.sender_id - text - date - message.id - is_reply - reply_to_id Returns: str: A combined string of message texts sorted by date. """ combined_text = "" # Iterate through sorted parent message IDs for parent_id, message_ids in message_threads.items(): # Get the message texts for the message IDs and sort them by date message_texts = ( data[data["message.id"].isin(message_ids)] .sort_values(by="date")["text"] .tolist() ) message_texts = [str(elem) for elem in message_texts] # Combine the message texts combined_text += " ".join(message_texts) + ".\n" return combined_text.strip() [docs] def load(self) -> List[Document]: """Load documents.""" if self.chat_entity is not None: try: import nest_asyncio nest_asyncio.apply() asyncio.run(self.fetch_data_from_telegram()) except ImportError: raise ImportError( """`nest_asyncio` package not found. please install with `pip install nest_asyncio` """ ) p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) try: import pandas as pd except ImportError: raise ImportError( """`pandas` package not found. please install with `pip install pandas` """
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
68921f55b760-5
please install with `pip install pandas` """ ) normalized_messages = pd.json_normalize(d) df = pd.DataFrame(normalized_messages) message_threads = self._get_message_threads(df) combined_texts = self._combine_message_texts(message_threads, df) return text_to_docs(combined_texts) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
e7d7d8cbcf2f-0
Source code for langchain.document_loaders.sitemap """Loader that fetches a sitemap and loads those URLs.""" import itertools import re from typing import Any, Callable, Generator, Iterable, List, Optional from langchain.document_loaders.web_base import WebBaseLoader from langchain.schema import Document def _default_parsing_function(content: Any) -> str: return str(content.get_text()) def _default_meta_function(meta: dict, _content: Any) -> dict: return {"source": meta["loc"], **meta} def _batch_block(iterable: Iterable, size: int) -> Generator[List[dict], None, None]: it = iter(iterable) while item := list(itertools.islice(it, size)): yield item [docs]class SitemapLoader(WebBaseLoader): """Loader that fetches a sitemap and loads those URLs.""" def __init__( self, web_path: str, filter_urls: Optional[List[str]] = None, parsing_function: Optional[Callable] = None, blocksize: Optional[int] = None, blocknum: int = 0, meta_function: Optional[Callable] = None, is_local: bool = False, ): """Initialize with webpage path and optional filter URLs. Args: web_path: url of the sitemap. can also be a local path filter_urls: list of strings or regexes that will be applied to filter the urls that are parsed and loaded parsing_function: Function to parse bs4.Soup output blocksize: number of sitemap locations per block blocknum: the number of the block that should be loaded - zero indexed meta_function: Function to parse bs4.Soup output for metadata
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/sitemap.html
e7d7d8cbcf2f-1
meta_function: Function to parse bs4.Soup output for metadata remember when setting this method to also copy metadata["loc"] to metadata["source"] if you are using this field is_local: whether the sitemap is a local file """ if blocksize is not None and blocksize < 1: raise ValueError("Sitemap blocksize should be at least 1") if blocknum < 0: raise ValueError("Sitemap blocknum can not be lower then 0") try: import lxml # noqa:F401 except ImportError: raise ImportError( "lxml package not found, please install it with " "`pip install lxml`" ) super().__init__(web_path) self.filter_urls = filter_urls self.parsing_function = parsing_function or _default_parsing_function self.meta_function = meta_function or _default_meta_function self.blocksize = blocksize self.blocknum = blocknum self.is_local = is_local [docs] def parse_sitemap(self, soup: Any) -> List[dict]: """Parse sitemap xml and load into a list of dicts.""" els = [] for url in soup.find_all("url"): loc = url.find("loc") if not loc: continue # Strip leading and trailing whitespace and newlines loc_text = loc.text.strip() if self.filter_urls and not any( re.match(r, loc_text) for r in self.filter_urls ): continue els.append( { tag: prop.text for tag in ["loc", "lastmod", "changefreq", "priority"] if (prop := url.find(tag)) } )
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/sitemap.html
e7d7d8cbcf2f-2
if (prop := url.find(tag)) } ) for sitemap in soup.find_all("sitemap"): loc = sitemap.find("loc") if not loc: continue soup_child = self.scrape_all([loc.text], "xml")[0] els.extend(self.parse_sitemap(soup_child)) return els [docs] def load(self) -> List[Document]: """Load sitemap.""" if self.is_local: try: import bs4 except ImportError: raise ImportError( "beautifulsoup4 package not found, please install it" " with `pip install beautifulsoup4`" ) fp = open(self.web_path) soup = bs4.BeautifulSoup(fp, "xml") else: soup = self.scrape("xml") els = self.parse_sitemap(soup) if self.blocksize is not None: elblocks = list(_batch_block(els, self.blocksize)) blockcount = len(elblocks) if blockcount - 1 < self.blocknum: raise ValueError( "Selected sitemap does not contain enough blocks for given blocknum" ) else: els = elblocks[self.blocknum] results = self.scrape_all([el["loc"].strip() for el in els if "loc" in el]) return [ Document( page_content=self.parsing_function(results[i]), metadata=self.meta_function(els[i], results[i]), ) for i in range(len(results)) ] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/sitemap.html
dc52071ab363-0
Source code for langchain.document_loaders.mastodon """Mastodon document loader.""" from __future__ import annotations import os from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import mastodon def _dependable_mastodon_import() -> mastodon: try: import mastodon except ImportError: raise ValueError( "Mastodon.py package not found, " "please install it with `pip install Mastodon.py`" ) return mastodon [docs]class MastodonTootsLoader(BaseLoader): """Mastodon toots loader.""" def __init__( self, mastodon_accounts: Sequence[str], number_toots: Optional[int] = 100, exclude_replies: bool = False, access_token: Optional[str] = None, api_base_url: str = "https://mastodon.social", ): """Instantiate Mastodon toots loader. Args: mastodon_accounts: The list of Mastodon accounts to query. number_toots: How many toots to pull for each account. exclude_replies: Whether to exclude reply toots from the load. access_token: An access token if toots are loaded as a Mastodon app. Can also be specified via the environment variables "MASTODON_ACCESS_TOKEN". api_base_url: A Mastodon API base URL to talk to, if not using the default. """ mastodon = _dependable_mastodon_import() access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN")
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/mastodon.html
dc52071ab363-1
access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN") self.api = mastodon.Mastodon( access_token=access_token, api_base_url=api_base_url ) self.mastodon_accounts = mastodon_accounts self.number_toots = number_toots self.exclude_replies = exclude_replies [docs] def load(self) -> List[Document]: """Load toots into documents.""" results: List[Document] = [] for account in self.mastodon_accounts: user = self.api.account_lookup(account) toots = self.api.account_statuses( user.id, only_media=False, pinned=False, exclude_replies=self.exclude_replies, exclude_reblogs=True, limit=self.number_toots, ) docs = self._format_toots(toots, user) results.extend(docs) return results def _format_toots( self, toots: List[Dict[str, Any]], user_info: dict ) -> Iterable[Document]: """Format toots into documents. Adding user info, and selected toot fields into the metadata. """ for toot in toots: metadata = { "created_at": toot["created_at"], "user_info": user_info, "is_reply": toot["in_reply_to_id"] is not None, } yield Document( page_content=toot["content"], metadata=metadata, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/mastodon.html
357cef503597-0
Source code for langchain.document_loaders.notiondb """Notion DB loader for langchain""" from typing import Any, Dict, List, Optional import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader NOTION_BASE_URL = "https://api.notion.com/v1" DATABASE_URL = NOTION_BASE_URL + "/databases/{database_id}/query" PAGE_URL = NOTION_BASE_URL + "/pages/{page_id}" BLOCK_URL = NOTION_BASE_URL + "/blocks/{block_id}/children" [docs]class NotionDBLoader(BaseLoader): """Notion DB Loader. Reads content from pages within a Noton Database. Args: integration_token (str): Notion integration token. database_id (str): Notion database id. request_timeout_sec (int): Timeout for Notion requests in seconds. """ def __init__( self, integration_token: str, database_id: str, request_timeout_sec: Optional[int] = 10, ) -> None: """Initialize with parameters.""" if not integration_token: raise ValueError("integration_token must be provided") if not database_id: raise ValueError("database_id must be provided") self.token = integration_token self.database_id = database_id self.headers = { "Authorization": "Bearer " + self.token, "Content-Type": "application/json", "Notion-Version": "2022-06-28", } self.request_timeout_sec = request_timeout_sec [docs] def load(self) -> List[Document]: """Load documents from the Notion database. Returns: List[Document]: List of documents. """
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html
357cef503597-1
Returns: List[Document]: List of documents. """ page_ids = self._retrieve_page_ids() return list(self.load_page(page_id) for page_id in page_ids) def _retrieve_page_ids( self, query_dict: Dict[str, Any] = {"page_size": 100} ) -> List[str]: """Get all the pages from a Notion database.""" pages: List[Dict[str, Any]] = [] while True: data = self._request( DATABASE_URL.format(database_id=self.database_id), method="POST", query_dict=query_dict, ) pages.extend(data.get("results")) if not data.get("has_more"): break query_dict["start_cursor"] = data.get("next_cursor") page_ids = [page["id"] for page in pages] return page_ids [docs] def load_page(self, page_id: str) -> Document: """Read a page.""" data = self._request(PAGE_URL.format(page_id=page_id)) # load properties as metadata metadata: Dict[str, Any] = {} for prop_name, prop_data in data["properties"].items(): prop_type = prop_data["type"] if prop_type == "rich_text": value = ( prop_data["rich_text"][0]["plain_text"] if prop_data["rich_text"] else None ) elif prop_type == "title": value = ( prop_data["title"][0]["plain_text"] if prop_data["title"] else None ) elif prop_type == "multi_select": value = ( [item["name"] for item in prop_data["multi_select"]]
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html
357cef503597-2
[item["name"] for item in prop_data["multi_select"]] if prop_data["multi_select"] else [] ) elif prop_type == "url": value = prop_data["url"] else: value = None metadata[prop_name.lower()] = value metadata["id"] = page_id return Document(page_content=self._load_blocks(page_id), metadata=metadata) def _load_blocks(self, block_id: str, num_tabs: int = 0) -> str: """Read a block and its children.""" result_lines_arr: List[str] = [] cur_block_id: str = block_id while cur_block_id: data = self._request(BLOCK_URL.format(block_id=cur_block_id)) for result in data["results"]: result_obj = result[result["type"]] if "rich_text" not in result_obj: continue cur_result_text_arr: List[str] = [] for rich_text in result_obj["rich_text"]: if "text" in rich_text: cur_result_text_arr.append( "\t" * num_tabs + rich_text["text"]["content"] ) if result["has_children"]: children_text = self._load_blocks( result["id"], num_tabs=num_tabs + 1 ) cur_result_text_arr.append(children_text) result_lines_arr.append("\n".join(cur_result_text_arr)) cur_block_id = data.get("next_cursor") return "\n".join(result_lines_arr) def _request( self, url: str, method: str = "GET", query_dict: Dict[str, Any] = {} ) -> Any: res = requests.request( method,
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html
357cef503597-3
) -> Any: res = requests.request( method, url, headers=self.headers, json=query_dict, timeout=self.request_timeout_sec, ) res.raise_for_status() return res.json() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html
9be74a178413-0
Source code for langchain.document_loaders.toml import json from pathlib import Path from typing import Iterator, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class TomlLoader(BaseLoader): """ A TOML document loader that inherits from the BaseLoader class. This class can be initialized with either a single source file or a source directory containing TOML files. """ def __init__(self, source: Union[str, Path]): """Initialize the TomlLoader with a source file or directory.""" self.source = Path(source) [docs] def load(self) -> List[Document]: """Load and return all documents.""" return list(self.lazy_load()) [docs] def lazy_load(self) -> Iterator[Document]: """Lazily load the TOML documents from the source file or directory.""" import tomli if self.source.is_file() and self.source.suffix == ".toml": files = [self.source] elif self.source.is_dir(): files = list(self.source.glob("**/*.toml")) else: raise ValueError("Invalid source path or file type") for file_path in files: with file_path.open("r", encoding="utf-8") as file: content = file.read() try: data = tomli.loads(content) doc = Document( page_content=json.dumps(data), metadata={"source": str(file_path)}, ) yield doc except tomli.TOMLDecodeError as e: print(f"Error parsing TOML file {file_path}: {e}") By Harrison Chase © Copyright 2023, Harrison Chase.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/toml.html
9be74a178413-1
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/toml.html
0134cf25ae59-0
Source code for langchain.document_loaders.url_selenium """Loader that uses Selenium to load a page, then uses unstructured to load the html. """ import logging from typing import TYPE_CHECKING, List, Literal, Optional, Union if TYPE_CHECKING: from selenium.webdriver import Chrome, Firefox from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class SeleniumURLLoader(BaseLoader): """Loader that uses Selenium and to load a page and unstructured to load the html. This is useful for loading pages that require javascript to render. Attributes: urls (List[str]): List of URLs to load. continue_on_failure (bool): If True, continue loading other URLs on failure. browser (str): The browser to use, either 'chrome' or 'firefox'. binary_location (Optional[str]): The location of the browser binary. executable_path (Optional[str]): The path to the browser executable. headless (bool): If True, the browser will run in headless mode. arguments [List[str]]: List of arguments to pass to the browser. """ def __init__( self, urls: List[str], continue_on_failure: bool = True, browser: Literal["chrome", "firefox"] = "chrome", binary_location: Optional[str] = None, executable_path: Optional[str] = None, headless: bool = True, arguments: List[str] = [], ): """Load a list of URLs using Selenium and unstructured.""" try: import selenium # noqa:F401 except ImportError: raise ImportError( "selenium package not found, please install it with "
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/url_selenium.html
0134cf25ae59-1
raise ImportError( "selenium package not found, please install it with " "`pip install selenium`" ) try: import unstructured # noqa:F401 except ImportError: raise ImportError( "unstructured package not found, please install it with " "`pip install unstructured`" ) self.urls = urls self.continue_on_failure = continue_on_failure self.browser = browser self.binary_location = binary_location self.executable_path = executable_path self.headless = headless self.arguments = arguments def _get_driver(self) -> Union["Chrome", "Firefox"]: """Create and return a WebDriver instance based on the specified browser. Raises: ValueError: If an invalid browser is specified. Returns: Union[Chrome, Firefox]: A WebDriver instance for the specified browser. """ if self.browser.lower() == "chrome": from selenium.webdriver import Chrome from selenium.webdriver.chrome.options import Options as ChromeOptions chrome_options = ChromeOptions() for arg in self.arguments: chrome_options.add_argument(arg) if self.headless: chrome_options.add_argument("--headless") chrome_options.add_argument("--no-sandbox") if self.binary_location is not None: chrome_options.binary_location = self.binary_location if self.executable_path is None: return Chrome(options=chrome_options) return Chrome(executable_path=self.executable_path, options=chrome_options) elif self.browser.lower() == "firefox": from selenium.webdriver import Firefox from selenium.webdriver.firefox.options import Options as FirefoxOptions firefox_options = FirefoxOptions() for arg in self.arguments: firefox_options.add_argument(arg)
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/url_selenium.html
0134cf25ae59-2
for arg in self.arguments: firefox_options.add_argument(arg) if self.headless: firefox_options.add_argument("--headless") if self.binary_location is not None: firefox_options.binary_location = self.binary_location if self.executable_path is None: return Firefox(options=firefox_options) return Firefox( executable_path=self.executable_path, options=firefox_options ) else: raise ValueError("Invalid browser specified. Use 'chrome' or 'firefox'.") [docs] def load(self) -> List[Document]: """Load the specified URLs using Selenium and create Document instances. Returns: List[Document]: A list of Document instances with loaded content. """ from unstructured.partition.html import partition_html docs: List[Document] = list() driver = self._get_driver() for url in self.urls: try: driver.get(url) page_content = driver.page_source elements = partition_html(text=page_content) text = "\n\n".join([str(el) for el in elements]) metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exception: {e}") else: raise e driver.quit() return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/url_selenium.html
7f51cb288b56-0
Source code for langchain.document_loaders.apify_dataset """Logic for loading documents from Apify datasets.""" from typing import Any, Callable, Dict, List from pydantic import BaseModel, root_validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ApifyDatasetLoader(BaseLoader, BaseModel): """Logic for loading documents from Apify datasets.""" apify_client: Any dataset_id: str """The ID of the dataset on the Apify platform.""" dataset_mapping_function: Callable[[Dict], Document] """A custom function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class.""" def __init__( self, dataset_id: str, dataset_mapping_function: Callable[[Dict], Document] ): """Initialize the loader with an Apify dataset ID and a mapping function. Args: dataset_id (str): The ID of the dataset on the Apify platform. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. """ super().__init__( dataset_id=dataset_id, dataset_mapping_function=dataset_mapping_function ) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate environment.""" try: from apify_client import ApifyClient values["apify_client"] = ApifyClient() except ImportError: raise ImportError( "Could not import apify-client Python package. " "Please install it with `pip install apify-client`." ) return values
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/apify_dataset.html