date_collected
stringclasses
1 value
repo_name
stringlengths
6
116
file_name
stringlengths
2
220
file_contents
stringlengths
13
357k
prompts
sequence
2024-01-10
Putzeys/Gabriel
connection_gpt.py
import openai import os API_KEY = os.getenv("OPENAI_API_KEY") openai.api_key = API_KEY # Define a chave da API para a biblioteca openai class OpenAI_API: def __init__(self): pass def conversation(self, messages, max_tokens=600, temperature=0.3): response = openai.ChatCompletion.create( model="gpt-4", messages=messages[-5:], # Mantém as 5 últimas mensagens (contexto) max_tokens=max_tokens, n=1, temperature=temperature, ) return response.choices[0].message['content'].strip()
[]
2024-01-10
jason-cyun/ChatGPT
src~revChatGPT~V1.py
""" Standard ChatGPT """ from __future__ import annotations import base64 import binascii import contextlib import json import logging import secrets import subprocess import sys import time import uuid from functools import wraps from os import environ from os import getenv from pathlib import Path from typing import AsyncGenerator from typing import Generator from typing import NoReturn import httpx import requests import tls_client from httpx import AsyncClient from OpenAIAuth import Auth0 as Authenticator from rich.live import Live from rich.markdown import Markdown from . import __version__ from . import typings as t from .utils import create_completer from .utils import create_session from .utils import get_input def generate_random_hex(length: int = 17) -> str: """Generate a random hex string Args: length (int, optional): Length of the hex string. Defaults to 17. Returns: str: Random hex string """ return secrets.token_hex(length) def random_int(min: int, max: int) -> int: """Generate a random integer Args: min (int): Minimum value max (int): Maximum value Returns: int: Random integer """ return secrets.randbelow(max - min) + min if __name__ == "__main__": logging.basicConfig( format="%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s", ) log = logging.getLogger(__name__) def logger(is_timed: bool): """Logger decorator Args: is_timed (bool): Whether to include function running time in exit log Returns: _type_: decorated function """ def decorator(func): wraps(func) def wrapper(*args, **kwargs): log.debug( "Entering %s with args %s and kwargs %s", func.__name__, args, kwargs, ) start = time.time() out = func(*args, **kwargs) end = time.time() if is_timed: log.debug( "Exiting %s with return value %s. Took %s seconds.", func.__name__, out, end - start, ) else: log.debug("Exiting %s with return value %s", func.__name__, out) return out return wrapper return decorator BASE_URL = environ.get("CHATGPT_BASE_URL") or "https://bypass.churchless.tech/" bcolors = t.Colors() session = tls_client.Session( client_identifier="firefox110", random_tls_extension_order=True, ) def captcha_solver(images: list[str], challenge_details: dict) -> int: # mkdir captcha if not Path("captcha").exists(): Path("captcha").mkdir() filenames: list[Path] = [] for image in images: filename = Path("captcha", f"{time.time()}.jpeg") with open(filename, "wb") as f: f.write(base64.b64decode(image)) print(f"Saved captcha image to {filename}") # If MacOS, open the image if sys.platform == "darwin": subprocess.call(["open", filename]) if sys.platform == "linux": subprocess.call(["xdg-open", filename]) if sys.platform == "win32": subprocess.call(["start", filename]) filenames.append(filename) print(f'Captcha instructions: {challenge_details.get("instructions")}') print( "Developer instructions: The captcha images have an index starting from 0 from left to right", ) print("Enter the index of the images that matches the captcha instructions:") index = int(input()) # Delete the images for filename in filenames: filename.unlink() return index def get_arkose_token( download_images: bool = True, solver: function = captcha_solver, ) -> str: """ The solver function should take in a list of images in base64 and a dict of challenge details and return the index of the image that matches the challenge details Challenge details: game_type: str - Audio or Image instructions: str - Instructions for the captcha URLs: list[str] - URLs of the images or audio files """ captcha_url = BASE_URL.replace("/api/", "") + "/captcha/" resp = session.get( (captcha_url + "start?download_images=true") if download_images else captcha_url + "start", ) resp_json: dict = resp.json() if resp_json.get("status") == "success": return resp_json.get("token") if resp.status_code != 511: raise Exception(resp_json.get("error")) if resp_json.get("status") != "captcha": raise Exception("unknown error") challenge_details: dict = resp_json.get("session", {}).get("concise_challenge") if not challenge_details: raise Exception("missing details") images: list[str] = resp_json.get("images") index = solver(images, challenge_details) resp = session.post( captcha_url + "verify", json={"session": resp_json.get("session"), "index": index}, ) if resp.status_code != 200: raise Exception("Failed to verify captcha") return resp_json.get("token") class Chatbot: """ Chatbot class for ChatGPT """ @logger(is_timed=True) def __init__( self, config: dict[str, str], conversation_id: str | None = None, parent_id: str | None = None, lazy_loading: bool = True, base_url: str | None = None, captcha_solver: function = captcha_solver, captcha_download_images: bool = True, ) -> None: """Initialize a chatbot Args: config (dict[str, str]): Login and proxy info. Example: { "access_token": "<access_token>" "proxy": "<proxy_url_string>", "model": "<model_name>", "plugin": "<plugin_id>", } More details on these are available at https://github.com/acheong08/ChatGPT#configuration conversation_id (str | None, optional): Id of the conversation to continue on. Defaults to None. parent_id (str | None, optional): Id of the previous response message to continue on. Defaults to None. lazy_loading (bool, optional): Whether to load only the active conversation. Defaults to True. base_url (str | None, optional): Base URL of the ChatGPT server. Defaults to None. captcha_solver (function, optional): Function to solve captcha. Defaults to captcha_solver. captcha_download_images (bool, optional): Whether to download captcha images. Defaults to True. Raises: Exception: _description_ """ user_home = getenv("HOME") or getenv("USERPROFILE") if user_home is None: user_home = Path().cwd() self.cache_path = Path(Path().cwd(), ".chatgpt_cache.json") else: # mkdir ~/.config/revChatGPT if not Path(user_home, ".config").exists(): Path(user_home, ".config").mkdir() if not Path(user_home, ".config", "revChatGPT").exists(): Path(user_home, ".config", "revChatGPT").mkdir() self.cache_path = Path(user_home, ".config", "revChatGPT", "cache.json") self.config = config self.session = requests.Session() if "email" in config and "password" in config: try: cached_access_token = self.__get_cached_access_token( self.config.get("email", None), ) except t.Error as error: if error.code == 5: raise cached_access_token = None if cached_access_token is not None: self.config["access_token"] = cached_access_token if "proxy" in config: if not isinstance(config["proxy"], str): error = TypeError("Proxy must be a string!") raise error proxies = { "http": config["proxy"], "https": config["proxy"], } if isinstance(self.session, AsyncClient): proxies = { "http://": config["proxy"], "https://": config["proxy"], } self.session = AsyncClient(proxies=proxies) # type: ignore else: self.session.proxies.update(proxies) self.conversation_id = conversation_id or config.get("conversation_id", None) self.parent_id = parent_id or config.get("parent_id", None) self.conversation_mapping = {} self.conversation_id_prev_queue = [] self.parent_id_prev_queue = [] self.lazy_loading = lazy_loading self.base_url = base_url or BASE_URL self.disable_history = config.get("disable_history", False) self.__check_credentials() if self.config.get("plugin_ids", []): for plugin in self.config.get("plugin_ids"): self.install_plugin(plugin) if self.config.get("unverified_plugin_domains", []): for domain in self.config.get("unverified_plugin_domains"): if self.config.get("plugin_ids"): self.config["plugin_ids"].append( self.get_unverified_plugin(domain, install=True).get("id"), ) else: self.config["plugin_ids"] = [ self.get_unverified_plugin(domain, install=True).get("id"), ] # Get PUID cookie try: auth = Authenticator("blah", "blah") auth.access_token = self.config["access_token"] puid = auth.get_puid() self.session.headers.update({"PUID": puid}) print("Setting PUID (You are a Plus user!): " + puid) except: pass self.captcha_solver = captcha_solver self.captcha_download_images = captcha_download_images @logger(is_timed=True) def __check_credentials(self) -> None: """Check login info and perform login Any one of the following is sufficient for login. Multiple login info can be provided at the same time and they will be used in the order listed below. - access_token - email + password Raises: Exception: _description_ AuthError: _description_ """ if "access_token" in self.config: self.set_access_token(self.config["access_token"]) elif "email" not in self.config or "password" not in self.config: error = t.AuthenticationError("Insufficient login details provided!") raise error if "access_token" not in self.config: try: self.login() except Exception as error: print(error) raise error @logger(is_timed=False) def set_access_token(self, access_token: str) -> None: """Set access token in request header and self.config, then cache it to file. Args: access_token (str): access_token """ self.session.headers.clear() self.session.headers.update( { "Accept": "text/event-stream", "Authorization": f"Bearer {access_token}", "Content-Type": "application/json", "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36", }, ) self.config["access_token"] = access_token email = self.config.get("email", None) if email is not None: self.__cache_access_token(email, access_token) @logger(is_timed=False) def __get_cached_access_token(self, email: str | None) -> str | None: """Read access token from cache Args: email (str | None): email of the account to get access token Raises: Error: _description_ Error: _description_ Error: _description_ Returns: str | None: access token string or None if not found """ email = email or "default" cache = self.__read_cache() access_token = cache.get("access_tokens", {}).get(email, None) # Parse access_token as JWT if access_token is not None: try: # Split access_token into 3 parts s_access_token = access_token.split(".") # Add padding to the middle part s_access_token[1] += "=" * ((4 - len(s_access_token[1]) % 4) % 4) d_access_token = base64.b64decode(s_access_token[1]) d_access_token = json.loads(d_access_token) except binascii.Error: error = t.Error( source="__get_cached_access_token", message="Invalid access token", code=t.ErrorType.INVALID_ACCESS_TOKEN_ERROR, ) raise error from None except json.JSONDecodeError: error = t.Error( source="__get_cached_access_token", message="Invalid access token", code=t.ErrorType.INVALID_ACCESS_TOKEN_ERROR, ) raise error from None exp = d_access_token.get("exp", None) if exp is not None and exp < time.time(): error = t.Error( source="__get_cached_access_token", message="Access token expired", code=t.ErrorType.EXPIRED_ACCESS_TOKEN_ERROR, ) raise error return access_token @logger(is_timed=False) def __cache_access_token(self, email: str, access_token: str) -> None: """Write an access token to cache Args: email (str): account email access_token (str): account access token """ email = email or "default" cache = self.__read_cache() if "access_tokens" not in cache: cache["access_tokens"] = {} cache["access_tokens"][email] = access_token self.__write_cache(cache) @logger(is_timed=False) def __write_cache(self, info: dict) -> None: """Write cache info to file Args: info (dict): cache info, current format { "access_tokens":{"[email protected]": 'this account's access token', } } """ dirname = self.cache_path.home() or Path(".") dirname.mkdir(parents=True, exist_ok=True) json.dump(info, open(self.cache_path, "w", encoding="utf-8"), indent=4) @logger(is_timed=False) def __read_cache(self): try: cached = json.load(open(self.cache_path, encoding="utf-8")) except (FileNotFoundError, json.decoder.JSONDecodeError): cached = {} return cached @logger(is_timed=True) def login(self) -> None: """Login to OpenAI by email and password""" if not self.config.get("email") and not self.config.get("password"): log.error("Insufficient login details provided!") error = t.AuthenticationError("Insufficient login details provided!") raise error auth = Authenticator( email=self.config.get("email"), password=self.config.get("password"), proxy=self.config.get("proxy"), ) log.debug("Using authenticator to get access token") self.set_access_token(auth.get_access_token()) @logger(is_timed=True) def __send_request( self, data: dict, auto_continue: bool = False, timeout: float = 360, **kwargs, ) -> Generator[dict, None, None]: log.debug("Sending the payload") if ( data.get("model", "").startswith("gpt-4") and not self.config.get("SERVER_SIDE_ARKOSE") and not getenv("SERVER_SIDE_ARKOSE") ): try: data["arkose_token"] = get_arkose_token( self.captcha_download_images, self.captcha_solver, ) except Exception as e: print(e) raise e cid, pid = data["conversation_id"], data["parent_message_id"] message = "" self.conversation_id_prev_queue.append(cid) self.parent_id_prev_queue.append(pid) response = self.session.post( url=f"{self.base_url}conversation", data=json.dumps(data), timeout=timeout, stream=True, ) self.__check_response(response) finish_details = None for line in response.iter_lines(): # remove b' and ' at the beginning and end and ignore case line = str(line)[2:-1] if line.lower() == "internal server error": log.error(f"Internal Server Error: {line}") error = t.Error( source="ask", message="Internal Server Error", code=t.ErrorType.SERVER_ERROR, ) raise error if not line or line is None: continue if "data: " in line: line = line[6:] if line == "[DONE]": break # DO NOT REMOVE THIS line = line.replace('\\"', '"') line = line.replace("\\'", "'") line = line.replace("\\\\", "\\") try: line = json.loads(line) except json.decoder.JSONDecodeError: continue if not self.__check_fields(line): raise ValueError(f"Field missing. Details: {str(line)}") if line.get("message").get("author").get("role") != "assistant": continue cid = line["conversation_id"] pid = line["message"]["id"] metadata = line["message"].get("metadata", {}) message_exists = False author = {} if line.get("message"): author = metadata.get("author", {}) or line["message"].get("author", {}) if line["message"].get("content"): if line["message"]["content"].get("parts"): if len(line["message"]["content"]["parts"]) > 0: message_exists = True message: str = ( line["message"]["content"]["parts"][0] if message_exists else "" ) model = metadata.get("model_slug", None) finish_details = metadata.get("finish_details", {"type": None})["type"] yield { "author": author, "message": message, "conversation_id": cid, "parent_id": pid, "model": model, "finish_details": finish_details, "end_turn": line["message"].get("end_turn", True), "recipient": line["message"].get("recipient", "all"), "citations": metadata.get("citations", []), } self.conversation_mapping[cid] = pid if pid is not None: self.parent_id = pid if cid is not None: self.conversation_id = cid if not (auto_continue and finish_details == "max_tokens"): return message = message.strip("\n") for i in self.continue_write( conversation_id=cid, model=model, timeout=timeout, auto_continue=False, ): i["message"] = message + i["message"] yield i @logger(is_timed=True) def post_messages( self, messages: list[dict], conversation_id: str | None = None, parent_id: str | None = None, plugin_ids: list = [], model: str | None = None, auto_continue: bool = False, timeout: float = 360, **kwargs, ) -> Generator[dict, None, None]: """Ask a question to the chatbot Args: messages (list[dict]): The messages to send conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None. parent_id (str | None, optional): UUID for the message to continue on. Defaults to None. model (str | None, optional): The model to use. Defaults to None. auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False. timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360. Yields: Generator[dict, None, None] - The response from the chatbot dict: { "message": str, "conversation_id": str, "parent_id": str, "model": str, "finish_details": str, # "max_tokens" or "stop" "end_turn": bool, "recipient": str, "citations": list[dict], } """ if parent_id and not conversation_id: raise t.Error( source="User", message="conversation_id must be set once parent_id is set", code=t.ErrorType.USER_ERROR, ) if conversation_id and conversation_id != self.conversation_id: self.parent_id = None conversation_id = conversation_id or self.conversation_id parent_id = parent_id or self.parent_id or "" if not conversation_id and not parent_id: parent_id = str(uuid.uuid4()) if conversation_id and not parent_id: if conversation_id not in self.conversation_mapping: if self.lazy_loading: log.debug( "Conversation ID %s not found in conversation mapping, try to get conversation history for the given ID", conversation_id, ) try: history = self.get_msg_history(conversation_id) self.conversation_mapping[conversation_id] = history[ "current_node" ] except requests.exceptions.HTTPError: print("Conversation unavailable") else: self.__map_conversations() if conversation_id in self.conversation_mapping: parent_id = self.conversation_mapping[conversation_id] else: print( "Warning: Invalid conversation_id provided, treat as a new conversation", ) conversation_id = None parent_id = str(uuid.uuid4()) model = model or self.config.get("model") or "text-davinci-002-render-sha" data = { "action": "next", "messages": messages, "conversation_id": conversation_id, "parent_message_id": parent_id, "model": model, "history_and_training_disabled": self.disable_history, } plugin_ids = self.config.get("plugin_ids", []) or plugin_ids if len(plugin_ids) > 0 and not conversation_id: data["plugin_ids"] = plugin_ids yield from self.__send_request( data, timeout=timeout, auto_continue=auto_continue, ) @logger(is_timed=True) def ask( self, prompt: str, conversation_id: str | None = None, parent_id: str = "", model: str = "", plugin_ids: list = [], auto_continue: bool = False, timeout: float = 360, **kwargs, ) -> Generator[dict, None, None]: """Ask a question to the chatbot Args: prompt (str): The question conversation_id (str, optional): UUID for the conversation to continue on. Defaults to None. parent_id (str, optional): UUID for the message to continue on. Defaults to "". model (str, optional): The model to use. Defaults to "". auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False. timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360. Yields: The response from the chatbot dict: { "message": str, "conversation_id": str, "parent_id": str, "model": str, "finish_details": str, # "max_tokens" or "stop" "end_turn": bool, "recipient": str, } """ messages = [ { "id": str(uuid.uuid4()), "role": "user", "author": {"role": "user"}, "content": {"content_type": "text", "parts": [prompt]}, }, ] yield from self.post_messages( messages, conversation_id=conversation_id, parent_id=parent_id, plugin_ids=plugin_ids, model=model, auto_continue=auto_continue, timeout=timeout, ) @logger(is_timed=True) def continue_write( self, conversation_id: str | None = None, parent_id: str = "", model: str = "", auto_continue: bool = False, timeout: float = 360, ) -> Generator[dict, None, None]: """let the chatbot continue to write. Args: conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None. parent_id (str, optional): UUID for the message to continue on. Defaults to None. model (str, optional): The model to use. Defaults to None. auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False. timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360. Yields: dict: { "message": str, "conversation_id": str, "parent_id": str, "model": str, "finish_details": str, # "max_tokens" or "stop" "end_turn": bool, "recipient": str, } """ if parent_id and not conversation_id: raise t.Error( source="User", message="conversation_id must be set once parent_id is set", code=t.ErrorType.USER_ERROR, ) if conversation_id and conversation_id != self.conversation_id: self.parent_id = None conversation_id = conversation_id or self.conversation_id parent_id = parent_id or self.parent_id or "" if not conversation_id and not parent_id: parent_id = str(uuid.uuid4()) if conversation_id and not parent_id: if conversation_id not in self.conversation_mapping: if self.lazy_loading: log.debug( "Conversation ID %s not found in conversation mapping, try to get conversation history for the given ID", conversation_id, ) with contextlib.suppress(Exception): history = self.get_msg_history(conversation_id) self.conversation_mapping[conversation_id] = history[ "current_node" ] else: log.debug( f"Conversation ID {conversation_id} not found in conversation mapping, mapping conversations", ) self.__map_conversations() if conversation_id in self.conversation_mapping: parent_id = self.conversation_mapping[conversation_id] else: # invalid conversation_id provided, treat as a new conversation conversation_id = None parent_id = str(uuid.uuid4()) model = model or self.config.get("model") or "text-davinci-002-render-sha" data = { "action": "continue", "conversation_id": conversation_id, "parent_message_id": parent_id, "model": model or self.config.get("model") or ( "text-davinci-002-render-paid" if self.config.get("paid") else "text-davinci-002-render-sha" ), "history_and_training_disabled": self.disable_history, } yield from self.__send_request( data, timeout=timeout, auto_continue=auto_continue, ) @logger(is_timed=False) def __check_fields(self, data: dict) -> bool: try: data["message"]["content"] except (TypeError, KeyError): return False return True @logger(is_timed=False) def __check_response(self, response: requests.Response) -> None: """Make sure response is success Args: response (_type_): _description_ Raises: Error: _description_ """ try: response.raise_for_status() except requests.exceptions.HTTPError as ex: error = t.Error( source="OpenAI", message=response.text, code=response.status_code, ) raise error from ex @logger(is_timed=True) def get_conversations( self, offset: int = 0, limit: int = 20, encoding: str | None = None, ) -> list: """ Get conversations :param offset: Integer :param limit: Integer """ url = f"{self.base_url}conversations?offset={offset}&limit={limit}" response = self.session.get(url) self.__check_response(response) if encoding is not None: response.encoding = encoding data = json.loads(response.text) return data["items"] @logger(is_timed=True) def get_msg_history(self, convo_id: str, encoding: str | None = None) -> list: """ Get message history :param id: UUID of conversation :param encoding: String """ url = f"{self.base_url}conversation/{convo_id}" response = self.session.get(url) self.__check_response(response) if encoding is not None: response.encoding = encoding return response.json() def share_conversation( self, title: str = None, convo_id: str = None, node_id: str = None, anonymous: bool = True, ) -> str: """ Creates a share link to a conversation :param convo_id: UUID of conversation :param node_id: UUID of node :param anonymous: Boolean :param title: String Returns: str: A URL to the shared link """ convo_id = convo_id or self.conversation_id node_id = node_id or self.parent_id headers = { "Content-Type": "application/json", "origin": "https://chat.openai.com", "referer": f"https://chat.openai.com/c/{convo_id}", } # First create the share payload = { "conversation_id": convo_id, "current_node_id": node_id, "is_anonymous": anonymous, } url = f"{self.base_url}share/create" response = self.session.post(url, data=json.dumps(payload), headers=headers) self.__check_response(response) share_url = response.json().get("share_url") # Then patch the share to make public share_id = response.json().get("share_id") url = f"{self.base_url}share/{share_id}" payload = { "share_id": share_id, "highlighted_message_id": node_id, "title": title or response.json().get("title", "New chat"), "is_public": True, "is_visible": True, "is_anonymous": True, } response = self.session.patch(url, data=json.dumps(payload), headers=headers) self.__check_response(response) return share_url @logger(is_timed=True) def gen_title(self, convo_id: str, message_id: str) -> str: """ Generate title for conversation :param id: UUID of conversation :param message_id: UUID of message """ response = self.session.post( f"{self.base_url}conversation/gen_title/{convo_id}", data=json.dumps( {"message_id": message_id, "model": "text-davinci-002-render"}, ), ) self.__check_response(response) return response.json().get("title", "Error generating title") @logger(is_timed=True) def change_title(self, convo_id: str, title: str) -> None: """ Change title of conversation :param id: UUID of conversation :param title: String """ url = f"{self.base_url}conversation/{convo_id}" response = self.session.patch(url, data=json.dumps({"title": title})) self.__check_response(response) @logger(is_timed=True) def delete_conversation(self, convo_id: str) -> None: """ Delete conversation :param id: UUID of conversation """ url = f"{self.base_url}conversation/{convo_id}" response = self.session.patch(url, data='{"is_visible": false}') self.__check_response(response) @logger(is_timed=True) def clear_conversations(self) -> None: """ Delete all conversations """ url = f"{self.base_url}conversations" response = self.session.patch(url, data='{"is_visible": false}') self.__check_response(response) @logger(is_timed=False) def __map_conversations(self) -> None: conversations = self.get_conversations() histories = [self.get_msg_history(x["id"]) for x in conversations] for x, y in zip(conversations, histories): self.conversation_mapping[x["id"]] = y["current_node"] @logger(is_timed=False) def reset_chat(self) -> None: """ Reset the conversation ID and parent ID. :return: None """ self.conversation_id = None self.parent_id = str(uuid.uuid4()) @logger(is_timed=False) def rollback_conversation(self, num: int = 1) -> None: """ Rollback the conversation. :param num: Integer. The number of messages to rollback :return: None """ for _ in range(num): self.conversation_id = self.conversation_id_prev_queue.pop() self.parent_id = self.parent_id_prev_queue.pop() @logger(is_timed=True) def get_plugins(self, offset: int = 0, limit: int = 250, status: str = "approved"): """ Get plugins :param offset: Integer. Offset (Only supports 0) :param limit: Integer. Limit (Only below 250) :param status: String. Status of plugin (approved) """ url = f"{self.base_url}aip/p?offset={offset}&limit={limit}&statuses={status}" response = self.session.get(url) self.__check_response(response) # Parse as JSON return json.loads(response.text) @logger(is_timed=True) def install_plugin(self, plugin_id: str): """ Install plugin by ID :param plugin_id: String. ID of plugin """ url = f"{self.base_url}aip/p/{plugin_id}/user-settings" payload = {"is_installed": True} response = self.session.patch(url, data=json.dumps(payload)) self.__check_response(response) @logger(is_timed=True) def get_unverified_plugin(self, domain: str, install: bool = True) -> dict: """ Get unverified plugin by domain :param domain: String. Domain of plugin :param install: Boolean. Install plugin if found """ url = f"{self.base_url}aip/p/domain?domain={domain}" response = self.session.get(url) self.__check_response(response) if install: self.install_plugin(response.json().get("id")) return response.json() class AsyncChatbot(Chatbot): """Async Chatbot class for ChatGPT""" def __init__( self, config: dict, conversation_id: str | None = None, parent_id: str | None = None, base_url: str | None = None, lazy_loading: bool = True, ) -> None: """ Same as Chatbot class, but with async methods. """ super().__init__( config=config, conversation_id=conversation_id, parent_id=parent_id, base_url=base_url, lazy_loading=lazy_loading, ) # overwrite inherited normal session with async self.session = AsyncClient(headers=self.session.headers) async def __send_request( self, data: dict, auto_continue: bool = False, timeout: float = 360, **kwargs, ) -> AsyncGenerator[dict, None]: log.debug("Sending the payload") cid, pid = data["conversation_id"], data["parent_message_id"] message = "" self.conversation_id_prev_queue.append(cid) self.parent_id_prev_queue.append(pid) async with self.session.stream( "POST", url=f"{self.base_url}conversation", data=json.dumps(data), timeout=timeout, ) as response: await self.__check_response(response) finish_details = None async for line in response.aiter_lines(): if line.lower() == "internal server error": log.error(f"Internal Server Error: {line}") error = t.Error( source="ask", message="Internal Server Error", code=t.ErrorType.SERVER_ERROR, ) raise error if not line or line is None: continue if "data: " in line: line = line[6:] if line == "[DONE]": break try: line = json.loads(line) except json.decoder.JSONDecodeError: continue if not self.__check_fields(line): raise ValueError(f"Field missing. Details: {str(line)}") if line.get("message").get("author").get("role") != "assistant": continue cid = line["conversation_id"] pid = line["message"]["id"] metadata = line["message"].get("metadata", {}) message_exists = False author = {} if line.get("message"): author = metadata.get("author", {}) or line["message"].get( "author", {}, ) if line["message"].get("content"): if line["message"]["content"].get("parts"): if len(line["message"]["content"]["parts"]) > 0: message_exists = True message: str = ( line["message"]["content"]["parts"][0] if message_exists else "" ) model = metadata.get("model_slug", None) finish_details = metadata.get("finish_details", {"type": None})["type"] yield { "author": author, "message": message, "conversation_id": cid, "parent_id": pid, "model": model, "finish_details": finish_details, "end_turn": line["message"].get("end_turn", True), "recipient": line["message"].get("recipient", "all"), "citations": metadata.get("citations", []), } self.conversation_mapping[cid] = pid if pid is not None: self.parent_id = pid if cid is not None: self.conversation_id = cid if not (auto_continue and finish_details == "max_tokens"): return message = message.strip("\n") async for i in self.continue_write( conversation_id=cid, model=model, timeout=timeout, auto_continue=False, ): i["message"] = message + i["message"] yield i async def post_messages( self, messages: list[dict], conversation_id: str | None = None, parent_id: str | None = None, plugin_ids: list = [], model: str | None = None, auto_continue: bool = False, timeout: float = 360, **kwargs, ) -> AsyncGenerator[dict, None]: """Post messages to the chatbot Args: messages (list[dict]): the messages to post conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None. parent_id (str | None, optional): UUID for the message to continue on. Defaults to None. model (str | None, optional): The model to use. Defaults to None. auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False. timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360. Yields: AsyncGenerator[dict, None]: The response from the chatbot { "message": str, "conversation_id": str, "parent_id": str, "model": str, "finish_details": str, "end_turn": bool, "recipient": str, "citations": list[dict], } """ if parent_id and not conversation_id: raise t.Error( source="User", message="conversation_id must be set once parent_id is set", code=t.ErrorType.USER_ERROR, ) if conversation_id and conversation_id != self.conversation_id: self.parent_id = None conversation_id = conversation_id or self.conversation_id parent_id = parent_id or self.parent_id or "" if not conversation_id and not parent_id: parent_id = str(uuid.uuid4()) if conversation_id and not parent_id: if conversation_id not in self.conversation_mapping: if self.lazy_loading: log.debug( "Conversation ID %s not found in conversation mapping, try to get conversation history for the given ID", conversation_id, ) try: history = await self.get_msg_history(conversation_id) self.conversation_mapping[conversation_id] = history[ "current_node" ] except requests.exceptions.HTTPError: print("Conversation unavailable") else: await self.__map_conversations() if conversation_id in self.conversation_mapping: parent_id = self.conversation_mapping[conversation_id] else: print( "Warning: Invalid conversation_id provided, treat as a new conversation", ) conversation_id = None parent_id = str(uuid.uuid4()) model = model or self.config.get("model") or "text-davinci-002-render-sha" data = { "action": "next", "messages": messages, "conversation_id": conversation_id, "parent_message_id": parent_id, "model": model, "history_and_training_disabled": self.disable_history, } plugin_ids = self.config.get("plugin_ids", []) or plugin_ids if len(plugin_ids) > 0 and not conversation_id: data["plugin_ids"] = plugin_ids async for msg in self.__send_request( data, timeout=timeout, auto_continue=auto_continue, ): yield msg async def ask( self, prompt: str, conversation_id: str | None = None, parent_id: str = "", model: str = "", plugin_ids: list = [], auto_continue: bool = False, timeout: int = 360, **kwargs, ) -> AsyncGenerator[dict, None]: """Ask a question to the chatbot Args: prompt (str): The question to ask conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None. parent_id (str, optional): UUID for the message to continue on. Defaults to "". model (str, optional): The model to use. Defaults to "". auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False. timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360. Yields: AsyncGenerator[dict, None]: The response from the chatbot { "message": str, "conversation_id": str, "parent_id": str, "model": str, "finish_details": str, "end_turn": bool, "recipient": str, } """ messages = [ { "id": str(uuid.uuid4()), "author": {"role": "user"}, "content": {"content_type": "text", "parts": [prompt]}, }, ] async for msg in self.post_messages( messages=messages, conversation_id=conversation_id, parent_id=parent_id, plugin_ids=plugin_ids, model=model, auto_continue=auto_continue, timeout=timeout, ): yield msg async def continue_write( self, conversation_id: str | None = None, parent_id: str = "", model: str = "", auto_continue: bool = False, timeout: float = 360, ) -> AsyncGenerator[dict, None]: """let the chatbot continue to write Args: conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None. parent_id (str, optional): UUID for the message to continue on. Defaults to None. model (str, optional): Model to use. Defaults to None. auto_continue (bool, optional): Whether to continue writing automatically. Defaults to False. timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360. Yields: AsyncGenerator[dict, None]: The response from the chatbot { "message": str, "conversation_id": str, "parent_id": str, "model": str, "finish_details": str, "end_turn": bool, "recipient": str, } """ if parent_id and not conversation_id: error = t.Error( source="User", message="conversation_id must be set once parent_id is set", code=t.ErrorType.SERVER_ERROR, ) raise error if conversation_id and conversation_id != self.conversation_id: self.parent_id = None conversation_id = conversation_id or self.conversation_id parent_id = parent_id or self.parent_id or "" if not conversation_id and not parent_id: parent_id = str(uuid.uuid4()) if conversation_id and not parent_id: if conversation_id not in self.conversation_mapping: await self.__map_conversations() if conversation_id in self.conversation_mapping: parent_id = self.conversation_mapping[conversation_id] else: # invalid conversation_id provided, treat as a new conversation conversation_id = None parent_id = str(uuid.uuid4()) model = model or self.config.get("model") or "text-davinci-002-render-sha" data = { "action": "continue", "conversation_id": conversation_id, "parent_message_id": parent_id, "model": model or self.config.get("model") or ( "text-davinci-002-render-paid" if self.config.get("paid") else "text-davinci-002-render-sha" ), "history_and_training_disabled": self.disable_history, } async for msg in self.__send_request( data=data, auto_continue=auto_continue, timeout=timeout, ): yield msg async def get_conversations(self, offset: int = 0, limit: int = 20) -> list: """ Get conversations :param offset: Integer :param limit: Integer """ url = f"{self.base_url}conversations?offset={offset}&limit={limit}" response = await self.session.get(url) await self.__check_response(response) data = json.loads(response.text) return data["items"] async def get_msg_history( self, convo_id: str, encoding: str | None = "utf-8", ) -> dict: """ Get message history :param id: UUID of conversation """ url = f"{self.base_url}conversation/{convo_id}" response = await self.session.get(url) if encoding is not None: response.encoding = encoding await self.__check_response(response) return json.loads(response.text) return None async def share_conversation( self, title: str = None, convo_id: str = None, node_id: str = None, anonymous: bool = True, ) -> str: """ Creates a share link to a conversation :param convo_id: UUID of conversation :param node_id: UUID of node Returns: str: A URL to the shared link """ convo_id = convo_id or self.conversation_id node_id = node_id or self.parent_id # First create the share payload = { "conversation_id": convo_id, "current_node_id": node_id, "is_anonymous": anonymous, } url = f"{self.base_url}share/create" response = await self.session.post( url, data=json.dumps(payload), ) await self.__check_response(response) share_url = response.json().get("share_url") # Then patch the share to make public share_id = response.json().get("share_id") url = f"{self.base_url}share/{share_id}" print(url) payload = { "share_id": share_id, "highlighted_message_id": node_id, "title": title or response.json().get("title", "New chat"), "is_public": True, "is_visible": True, "is_anonymous": True, } response = await self.session.patch( url, data=json.dumps(payload), ) await self.__check_response(response) return share_url async def gen_title(self, convo_id: str, message_id: str) -> None: """ Generate title for conversation """ url = f"{self.base_url}conversation/gen_title/{convo_id}" response = await self.session.post( url, data=json.dumps( {"message_id": message_id, "model": "text-davinci-002-render"}, ), ) await self.__check_response(response) async def change_title(self, convo_id: str, title: str) -> None: """ Change title of conversation :param convo_id: UUID of conversation :param title: String """ url = f"{self.base_url}conversation/{convo_id}" response = await self.session.patch(url, data=f'{{"title": "{title}"}}') await self.__check_response(response) async def delete_conversation(self, convo_id: str) -> None: """ Delete conversation :param convo_id: UUID of conversation """ url = f"{self.base_url}conversation/{convo_id}" response = await self.session.patch(url, data='{"is_visible": false}') await self.__check_response(response) async def clear_conversations(self) -> None: """ Delete all conversations """ url = f"{self.base_url}conversations" response = await self.session.patch(url, data='{"is_visible": false}') await self.__check_response(response) async def __map_conversations(self) -> None: conversations = await self.get_conversations() histories = [await self.get_msg_history(x["id"]) for x in conversations] for x, y in zip(conversations, histories): self.conversation_mapping[x["id"]] = y["current_node"] def __check_fields(self, data: dict) -> bool: try: data["message"]["content"] except (TypeError, KeyError): return False return True async def __check_response(self, response: httpx.Response) -> None: # 改成自带的错误处理 try: response.raise_for_status() except httpx.HTTPStatusError as ex: await response.aread() error = t.Error( source="OpenAI", message=response.text, code=response.status_code, ) raise error from ex get_input = logger(is_timed=False)(get_input) @logger(is_timed=False) def configure() -> dict: """ Looks for a config file in the following locations: """ config_files: list[Path] = [Path("config.json")] if xdg_config_home := getenv("XDG_CONFIG_HOME"): config_files.append(Path(xdg_config_home, "revChatGPT/config.json")) if user_home := getenv("HOME"): config_files.append(Path(user_home, ".config/revChatGPT/config.json")) if windows_home := getenv("HOMEPATH"): config_files.append(Path(f"{windows_home}/.config/revChatGPT/config.json")) if config_file := next((f for f in config_files if f.exists()), None): with open(config_file, encoding="utf-8") as f: config = json.load(f) else: print("No config file found.") raise FileNotFoundError("No config file found.") return config @logger(is_timed=False) def main(config: dict) -> NoReturn: """ Main function for the chatGPT program. """ chatbot = Chatbot( config, conversation_id=config.get("conversation_id"), parent_id=config.get("parent_id"), ) def handle_commands(command: str) -> bool: if command == "!help": print( """ !help - Show this message !reset - Forget the current conversation !config - Show the current configuration !plugins - Show the current plugins !switch x - Switch to plugin x. Need to reset the conversation to ativate the plugin. !rollback x - Rollback the conversation (x being the number of messages to rollback) !setconversation - Changes the conversation !share - Creates a share link to the current conversation !exit - Exit this program """, ) elif command == "!reset": chatbot.reset_chat() print("Chat session successfully reset.") elif command == "!config": print(json.dumps(chatbot.config, indent=4)) elif command.startswith("!rollback"): try: rollback = int(command.split(" ")[1]) except IndexError: logging.exception( "No number specified, rolling back 1 message", stack_info=True, ) rollback = 1 chatbot.rollback_conversation(rollback) print(f"Rolled back {rollback} messages.") elif command.startswith("!setconversation"): try: chatbot.conversation_id = chatbot.config[ "conversation_id" ] = command.split(" ")[1] print("Conversation has been changed") except IndexError: log.exception( "Please include conversation UUID in command", stack_info=True, ) print("Please include conversation UUID in command") elif command.startswith("!continue"): print() print(f"{bcolors.OKGREEN + bcolors.BOLD}Chatbot: {bcolors.ENDC}") prev_text = "" for data in chatbot.continue_write(): message = data["message"][len(prev_text) :] print(message, end="", flush=True) prev_text = data["message"] print(bcolors.ENDC) print() elif command.startswith("!share"): print(f"Conversation shared at {chatbot.share_conversation()}") elif command == "!exit": if isinstance(chatbot.session, httpx.AsyncClient): chatbot.session.aclose() exit() else: return False return True session = create_session() completer = create_completer( [ "!help", "!reset", "!config", "!rollback", "!exit", "!setconversation", "!continue", "!plugins", "!switch", "!share", ], ) print() try: result = {} while True: print(f"{bcolors.OKBLUE + bcolors.BOLD}You: {bcolors.ENDC}") prompt = get_input(session=session, completer=completer) if prompt.startswith("!") and handle_commands(prompt): continue print() print(f"{bcolors.OKGREEN + bcolors.BOLD}Chatbot: {bcolors.ENDC}") if chatbot.config.get("model") == "gpt-4-browsing": print("Browsing takes a while, please wait...") with Live(Markdown(""), auto_refresh=False) as live: for data in chatbot.ask(prompt=prompt, auto_continue=True): if data["recipient"] != "all": continue result = data message = data["message"] live.update(Markdown(message), refresh=True) print() if result.get("citations", False): print( f"{bcolors.WARNING + bcolors.BOLD}Citations: {bcolors.ENDC}", ) for citation in result["citations"]: print( f'{citation["metadata"]["title"]}: {citation["metadata"]["url"]}', ) print() except (KeyboardInterrupt, EOFError): exit() except Exception as exc: error = t.CLIError("command line program unknown error") raise error from exc if __name__ == "__main__": print( f""" ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat) Repo: github.com/acheong08/ChatGPT Version: {__version__} """, ) print("Type '!help' to show a full list of commands") print( f"{bcolors.BOLD}{bcolors.WARNING}Press Esc followed by Enter or Alt+Enter to send a message.{bcolors.ENDC}", ) main(configure())
[ "text", "content_type" ]
2024-01-10
Csinclair0/transformers
src~transformers~models~auto~tokenization_auto.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Tokenizer class. """ import json import os from collections import OrderedDict from typing import Dict, Optional, Union from ... import GPTNeoConfig from ...configuration_utils import PretrainedConfig from ...file_utils import ( cached_path, hf_bucket_url, is_offline_mode, is_sentencepiece_available, is_tokenizers_available, ) from ...tokenization_utils_base import TOKENIZER_CONFIG_FILE from ...utils import logging from ..bart.tokenization_bart import BartTokenizer from ..bert.tokenization_bert import BertTokenizer from ..bert_japanese.tokenization_bert_japanese import BertJapaneseTokenizer from ..bertweet.tokenization_bertweet import BertweetTokenizer from ..blenderbot.tokenization_blenderbot import BlenderbotTokenizer from ..blenderbot_small.tokenization_blenderbot_small import BlenderbotSmallTokenizer from ..byt5.tokenization_byt5 import ByT5Tokenizer from ..canine.tokenization_canine import CanineTokenizer from ..convbert.tokenization_convbert import ConvBertTokenizer from ..ctrl.tokenization_ctrl import CTRLTokenizer from ..deberta.tokenization_deberta import DebertaTokenizer from ..distilbert.tokenization_distilbert import DistilBertTokenizer from ..dpr.tokenization_dpr import DPRQuestionEncoderTokenizer from ..electra.tokenization_electra import ElectraTokenizer from ..flaubert.tokenization_flaubert import FlaubertTokenizer from ..fsmt.tokenization_fsmt import FSMTTokenizer from ..funnel.tokenization_funnel import FunnelTokenizer from ..gpt2.tokenization_gpt2 import GPT2Tokenizer from ..herbert.tokenization_herbert import HerbertTokenizer from ..layoutlm.tokenization_layoutlm import LayoutLMTokenizer from ..led.tokenization_led import LEDTokenizer from ..longformer.tokenization_longformer import LongformerTokenizer from ..luke.tokenization_luke import LukeTokenizer from ..lxmert.tokenization_lxmert import LxmertTokenizer from ..mobilebert.tokenization_mobilebert import MobileBertTokenizer from ..mpnet.tokenization_mpnet import MPNetTokenizer from ..openai.tokenization_openai import OpenAIGPTTokenizer from ..phobert.tokenization_phobert import PhobertTokenizer from ..prophetnet.tokenization_prophetnet import ProphetNetTokenizer from ..rag.tokenization_rag import RagTokenizer from ..retribert.tokenization_retribert import RetriBertTokenizer from ..roberta.tokenization_roberta import RobertaTokenizer from ..roformer.tokenization_roformer import RoFormerTokenizer from ..squeezebert.tokenization_squeezebert import SqueezeBertTokenizer from ..tapas.tokenization_tapas import TapasTokenizer from ..transfo_xl.tokenization_transfo_xl import TransfoXLTokenizer from ..wav2vec2.tokenization_wav2vec2 import Wav2Vec2CTCTokenizer from ..xlm.tokenization_xlm import XLMTokenizer from .configuration_auto import ( AlbertConfig, AutoConfig, BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BlenderbotConfig, BlenderbotSmallConfig, CamembertConfig, CanineConfig, ConvBertConfig, CTRLConfig, DebertaConfig, DebertaV2Config, DistilBertConfig, DPRConfig, ElectraConfig, EncoderDecoderConfig, FlaubertConfig, FSMTConfig, FunnelConfig, GPT2Config, HubertConfig, IBertConfig, LayoutLMConfig, LEDConfig, LongformerConfig, LukeConfig, LxmertConfig, M2M100Config, MarianConfig, MBartConfig, MobileBertConfig, MPNetConfig, MT5Config, OpenAIGPTConfig, PegasusConfig, ProphetNetConfig, RagConfig, ReformerConfig, RetriBertConfig, RobertaConfig, RoFormerConfig, Speech2TextConfig, SqueezeBertConfig, T5Config, TapasConfig, TransfoXLConfig, Wav2Vec2Config, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLNetConfig, replace_list_option_in_docstrings, ) if is_sentencepiece_available(): from ..albert.tokenization_albert import AlbertTokenizer from ..barthez.tokenization_barthez import BarthezTokenizer from ..bert_generation.tokenization_bert_generation import BertGenerationTokenizer from ..big_bird.tokenization_big_bird import BigBirdTokenizer from ..camembert.tokenization_camembert import CamembertTokenizer from ..cpm.tokenization_cpm import CpmTokenizer from ..deberta_v2.tokenization_deberta_v2 import DebertaV2Tokenizer from ..m2m_100 import M2M100Tokenizer from ..marian.tokenization_marian import MarianTokenizer from ..mbart.tokenization_mbart import MBartTokenizer from ..mbart.tokenization_mbart50 import MBart50Tokenizer from ..mt5 import MT5Tokenizer from ..pegasus.tokenization_pegasus import PegasusTokenizer from ..reformer.tokenization_reformer import ReformerTokenizer from ..speech_to_text import Speech2TextTokenizer from ..t5.tokenization_t5 import T5Tokenizer from ..xlm_prophetnet.tokenization_xlm_prophetnet import XLMProphetNetTokenizer from ..xlm_roberta.tokenization_xlm_roberta import XLMRobertaTokenizer from ..xlnet.tokenization_xlnet import XLNetTokenizer else: AlbertTokenizer = None BarthezTokenizer = None BertGenerationTokenizer = None BigBirdTokenizer = None CamembertTokenizer = None CpmTokenizer = None DebertaV2Tokenizer = None MarianTokenizer = None MBartTokenizer = None MBart50Tokenizer = None MT5Tokenizer = None PegasusTokenizer = None ReformerTokenizer = None T5Tokenizer = None XLMRobertaTokenizer = None XLNetTokenizer = None XLMProphetNetTokenizer = None M2M100Tokenizer = None Speech2TextTokenizer = None if is_tokenizers_available(): from ...tokenization_utils_fast import PreTrainedTokenizerFast from ..albert.tokenization_albert_fast import AlbertTokenizerFast from ..bart.tokenization_bart_fast import BartTokenizerFast from ..barthez.tokenization_barthez_fast import BarthezTokenizerFast from ..bert.tokenization_bert_fast import BertTokenizerFast from ..big_bird.tokenization_big_bird_fast import BigBirdTokenizerFast from ..camembert.tokenization_camembert_fast import CamembertTokenizerFast from ..convbert.tokenization_convbert_fast import ConvBertTokenizerFast from ..deberta.tokenization_deberta_fast import DebertaTokenizerFast from ..distilbert.tokenization_distilbert_fast import DistilBertTokenizerFast from ..dpr.tokenization_dpr_fast import DPRQuestionEncoderTokenizerFast from ..electra.tokenization_electra_fast import ElectraTokenizerFast from ..funnel.tokenization_funnel_fast import FunnelTokenizerFast from ..gpt2.tokenization_gpt2_fast import GPT2TokenizerFast from ..herbert.tokenization_herbert_fast import HerbertTokenizerFast from ..layoutlm.tokenization_layoutlm_fast import LayoutLMTokenizerFast from ..led.tokenization_led_fast import LEDTokenizerFast from ..longformer.tokenization_longformer_fast import LongformerTokenizerFast from ..lxmert.tokenization_lxmert_fast import LxmertTokenizerFast from ..mbart.tokenization_mbart50_fast import MBart50TokenizerFast from ..mbart.tokenization_mbart_fast import MBartTokenizerFast from ..mobilebert.tokenization_mobilebert_fast import MobileBertTokenizerFast from ..mpnet.tokenization_mpnet_fast import MPNetTokenizerFast from ..mt5 import MT5TokenizerFast from ..openai.tokenization_openai_fast import OpenAIGPTTokenizerFast from ..pegasus.tokenization_pegasus_fast import PegasusTokenizerFast from ..reformer.tokenization_reformer_fast import ReformerTokenizerFast from ..retribert.tokenization_retribert_fast import RetriBertTokenizerFast from ..roberta.tokenization_roberta_fast import RobertaTokenizerFast from ..roformer.tokenization_roformer_fast import RoFormerTokenizerFast from ..squeezebert.tokenization_squeezebert_fast import SqueezeBertTokenizerFast from ..t5.tokenization_t5_fast import T5TokenizerFast from ..xlm_roberta.tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast from ..xlnet.tokenization_xlnet_fast import XLNetTokenizerFast else: AlbertTokenizerFast = None BartTokenizerFast = None BarthezTokenizerFast = None BertTokenizerFast = None BigBirdTokenizerFast = None CamembertTokenizerFast = None ConvBertTokenizerFast = None DebertaTokenizerFast = None DistilBertTokenizerFast = None DPRQuestionEncoderTokenizerFast = None ElectraTokenizerFast = None FunnelTokenizerFast = None GPT2TokenizerFast = None HerbertTokenizerFast = None LayoutLMTokenizerFast = None LEDTokenizerFast = None LongformerTokenizerFast = None LxmertTokenizerFast = None MBartTokenizerFast = None MBart50TokenizerFast = None MobileBertTokenizerFast = None MPNetTokenizerFast = None MT5TokenizerFast = None OpenAIGPTTokenizerFast = None PegasusTokenizerFast = None ReformerTokenizerFast = None RetriBertTokenizerFast = None RobertaTokenizerFast = None RoFormerTokenizerFast = None SqueezeBertTokenizerFast = None T5TokenizerFast = None XLMRobertaTokenizerFast = None XLNetTokenizerFast = None PreTrainedTokenizerFast = None logger = logging.get_logger(__name__) TOKENIZER_MAPPING = OrderedDict( [ (RetriBertConfig, (RetriBertTokenizer, RetriBertTokenizerFast)), (RoFormerConfig, (RoFormerTokenizer, RoFormerTokenizerFast)), (T5Config, (T5Tokenizer, T5TokenizerFast)), (MT5Config, (MT5Tokenizer, MT5TokenizerFast)), (MobileBertConfig, (MobileBertTokenizer, MobileBertTokenizerFast)), (DistilBertConfig, (DistilBertTokenizer, DistilBertTokenizerFast)), (AlbertConfig, (AlbertTokenizer, AlbertTokenizerFast)), (CamembertConfig, (CamembertTokenizer, CamembertTokenizerFast)), (PegasusConfig, (PegasusTokenizer, PegasusTokenizerFast)), (MBartConfig, (MBartTokenizer, MBartTokenizerFast)), (XLMRobertaConfig, (XLMRobertaTokenizer, XLMRobertaTokenizerFast)), (MarianConfig, (MarianTokenizer, None)), (BlenderbotSmallConfig, (BlenderbotSmallTokenizer, None)), (BlenderbotConfig, (BlenderbotTokenizer, None)), (BartConfig, (BartTokenizer, BartTokenizerFast)), (LongformerConfig, (LongformerTokenizer, LongformerTokenizerFast)), (RobertaConfig, (RobertaTokenizer, RobertaTokenizerFast)), (ReformerConfig, (ReformerTokenizer, ReformerTokenizerFast)), (ElectraConfig, (ElectraTokenizer, ElectraTokenizerFast)), (FunnelConfig, (FunnelTokenizer, FunnelTokenizerFast)), (LxmertConfig, (LxmertTokenizer, LxmertTokenizerFast)), (LayoutLMConfig, (LayoutLMTokenizer, LayoutLMTokenizerFast)), (DPRConfig, (DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast)), (SqueezeBertConfig, (SqueezeBertTokenizer, SqueezeBertTokenizerFast)), (BertConfig, (BertTokenizer, BertTokenizerFast)), (OpenAIGPTConfig, (OpenAIGPTTokenizer, OpenAIGPTTokenizerFast)), (GPT2Config, (GPT2Tokenizer, GPT2TokenizerFast)), (TransfoXLConfig, (TransfoXLTokenizer, None)), (XLNetConfig, (XLNetTokenizer, XLNetTokenizerFast)), (FlaubertConfig, (FlaubertTokenizer, None)), (XLMConfig, (XLMTokenizer, None)), (CTRLConfig, (CTRLTokenizer, None)), (FSMTConfig, (FSMTTokenizer, None)), (BertGenerationConfig, (BertGenerationTokenizer, None)), (DebertaConfig, (DebertaTokenizer, DebertaTokenizerFast)), (DebertaV2Config, (DebertaV2Tokenizer, None)), (RagConfig, (RagTokenizer, None)), (XLMProphetNetConfig, (XLMProphetNetTokenizer, None)), (Speech2TextConfig, (Speech2TextTokenizer, None)), (M2M100Config, (M2M100Tokenizer, None)), (ProphetNetConfig, (ProphetNetTokenizer, None)), (MPNetConfig, (MPNetTokenizer, MPNetTokenizerFast)), (TapasConfig, (TapasTokenizer, None)), (LEDConfig, (LEDTokenizer, LEDTokenizerFast)), (ConvBertConfig, (ConvBertTokenizer, ConvBertTokenizerFast)), (BigBirdConfig, (BigBirdTokenizer, BigBirdTokenizerFast)), (IBertConfig, (RobertaTokenizer, RobertaTokenizerFast)), (Wav2Vec2Config, (Wav2Vec2CTCTokenizer, None)), (HubertConfig, (Wav2Vec2CTCTokenizer, None)), (GPTNeoConfig, (GPT2Tokenizer, GPT2TokenizerFast)), (LukeConfig, (LukeTokenizer, None)), (BigBirdPegasusConfig, (PegasusTokenizer, PegasusTokenizerFast)), (CanineConfig, (CanineTokenizer, None)), ] ) # For tokenizers which are not directly mapped from a config NO_CONFIG_TOKENIZER = [ BertJapaneseTokenizer, BertweetTokenizer, ByT5Tokenizer, CpmTokenizer, HerbertTokenizer, HerbertTokenizerFast, PhobertTokenizer, BarthezTokenizer, BarthezTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast, PreTrainedTokenizerFast, ] SLOW_TOKENIZER_MAPPING = { k: (v[0] if v[0] is not None else v[1]) for k, v in TOKENIZER_MAPPING.items() if (v[0] is not None or v[1] is not None) } def tokenizer_class_from_name(class_name: str): all_tokenizer_classes = ( [v[0] for v in TOKENIZER_MAPPING.values() if v[0] is not None] + [v[1] for v in TOKENIZER_MAPPING.values() if v[1] is not None] + [v for v in NO_CONFIG_TOKENIZER if v is not None] ) for c in all_tokenizer_classes: if c.__name__ == class_name: return c def get_tokenizer_config( pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, **kwargs, ): """ Loads the tokenizer configuration from a pretrained model tokenizer configuration. Args: pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`): This can be either: - a string, the `model id` of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing a configuration file saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g., ``./my_model_directory/``. cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (:obj:`Dict[str, str]`, `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (:obj:`str` or `bool`, `optional`): The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`). revision(:obj:`str`, `optional`, defaults to :obj:`"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any identifier allowed by git. local_files_only (:obj:`bool`, `optional`, defaults to :obj:`False`): If :obj:`True`, will only try to load the tokenizer configuration from local files. .. note:: Passing :obj:`use_auth_token=True` is required when you want to use a private model. Returns: :obj:`Dict`: The configuration of the tokenizer. Examples:: # Download configuration from huggingface.co and cache. tokenizer_config = get_tokenizer_config("bert-base-uncased") # This model does not have a tokenizer config so the result will be an empty dict. tokenizer_config = get_tokenizer_config("xlm-roberta-base") # Save a pretrained tokenizer locally and you can reload its config from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") tokenizer.save_pretrained("tokenizer-test") tokenizer_config = get_tokenizer_config("tokenizer-test") """ if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True pretrained_model_name_or_path = str(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): config_file = os.path.join(pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE) else: config_file = hf_bucket_url( pretrained_model_name_or_path, filename=TOKENIZER_CONFIG_FILE, revision=revision, mirror=None ) try: # Load from URL or cache if already cached resolved_config_file = cached_path( config_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, ) except EnvironmentError: logger.info("Could not locate the tokenizer configuration file, will try to use the model config instead.") return {} with open(resolved_config_file, encoding="utf-8") as reader: return json.load(reader) class AutoTokenizer: r""" This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when created with the :meth:`AutoTokenizer.from_pretrained` class method. This class cannot be instantiated directly using ``__init__()`` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoTokenizer is designed to be instantiated " "using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(SLOW_TOKENIZER_MAPPING) def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): r""" Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary. The tokenizer class to instantiate is selected based on the :obj:`model_type` property of the config object (either passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's missing, by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`: List options Params: pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`): Can be either: - A string, the `model id` of a predefined tokenizer hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g., ``./my_model_directory/``. - A path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (like Bert or XLNet), e.g.: ``./my_model_directory/vocab.txt``. (Not applicable to all derived classes) inputs (additional positional arguments, `optional`): Will be passed along to the Tokenizer ``__init__()`` method. config (:class:`~transformers.PretrainedConfig`, `optional`) The configuration object used to dertermine the tokenizer class to instantiate. cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force the (re-)download the model weights and configuration files and override the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (:obj:`Dict[str, str]`, `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. revision(:obj:`str`, `optional`, defaults to :obj:`"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any identifier allowed by git. subfolder (:obj:`str`, `optional`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for facebook/rag-token-base), specify it here. use_fast (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to try to load the fast version of the tokenizer. kwargs (additional keyword arguments, `optional`): Will be passed to the Tokenizer ``__init__()`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the ``__init__()`` for more details. Examples:: >>> from transformers import AutoTokenizer >>> # Download vocabulary from huggingface.co and cache. >>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') >>> # Download vocabulary from huggingface.co (user-uploaded) and cache. >>> tokenizer = AutoTokenizer.from_pretrained('dbmdz/bert-base-german-cased') >>> # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`) >>> tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/') """ config = kwargs.pop("config", None) kwargs["_from_auto"] = True use_fast = kwargs.pop("use_fast", True) # First, let's try to use the tokenizer_config file to get the tokenizer class. tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs) config_tokenizer_class = tokenizer_config.get("tokenizer_class") # If that did not work, let's try to use the config. if config_tokenizer_class is None: if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) config_tokenizer_class = config.tokenizer_class # If we have the tokenizer class from the tokenizer config or the model config we're good! if config_tokenizer_class is not None: tokenizer_class = None if use_fast and not config_tokenizer_class.endswith("Fast"): tokenizer_class_candidate = f"{config_tokenizer_class}Fast" tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate) if tokenizer_class is None: tokenizer_class_candidate = config_tokenizer_class tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate) if tokenizer_class is None: raise ValueError( f"Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported." ) return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) # Otherwise we have to be creative. # if model is an encoder decoder, the encoder tokenizer class is used by default if isinstance(config, EncoderDecoderConfig): if type(config.decoder) is not type(config.encoder): # noqa: E721 logger.warning( f"The encoder model config class: {config.encoder.__class__} is different from the decoder model " f"config class: {config.decoder.__class__}. It is not recommended to use the " "`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder " "specific tokenizer classes." ) config = config.encoder if type(config) in TOKENIZER_MAPPING.keys(): tokenizer_class_py, tokenizer_class_fast = TOKENIZER_MAPPING[type(config)] if tokenizer_class_fast and (use_fast or tokenizer_class_py is None): return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) else: if tokenizer_class_py is not None: return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) else: raise ValueError( "This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed " "in order to use this tokenizer." ) raise ValueError( f"Unrecognized configuration class {config.__class__} to build an AutoTokenizer.\n" f"Model type should be one of {', '.join(c.__name__ for c in TOKENIZER_MAPPING.keys())}." )
[]
2024-01-10
5l1v3r1/gpt-pilot
pilot~helpers~AgentConvo.py
import subprocess from termcolor import colored from database.database import get_development_step_from_hash_id, save_development_step, delete_all_subsequent_steps from utils.utils import array_of_objects_to_string from utils.llm_connection import get_prompt, create_gpt_chat_completion from utils.utils import get_sys_message, find_role_from_step, capitalize_first_word_with_underscores from logger.logger import logger from prompts.prompts import ask_user from const.llm import END_RESPONSE class AgentConvo: """ Represents a conversation with an agent. Args: agent: An instance of the agent participating in the conversation. """ def __init__(self, agent): self.messages = [] self.branches = {} self.log_to_user = True self.agent = agent self.high_level_step = self.agent.project.current_step # add system message self.messages.append(get_sys_message(self.agent.role)) def send_message(self, prompt_path=None, prompt_data=None, function_calls=None): """ Sends a message in the conversation. Args: prompt_path: The path to a prompt. prompt_data: Data associated with the prompt. function_calls: Optional function calls to be included in the message. Returns: The response from the agent. """ # craft message self.construct_and_add_message_from_prompt(prompt_path, prompt_data) # check if we already have the LLM response saved if self.agent.__class__.__name__ == 'Developer': self.agent.project.llm_req_num += 1 development_step = get_development_step_from_hash_id(self.agent.project, prompt_path, prompt_data, self.agent.project.llm_req_num) if development_step is not None and self.agent.project.skip_steps: # if we do, use it print(colored(f'Restoring development step with id {development_step.id}', 'yellow')) self.agent.project.checkpoints['last_development_step'] = development_step self.agent.project.restore_files(development_step.id) response = development_step.llm_response self.messages = development_step.messages if self.agent.project.skip_until_dev_step and str(development_step.id) == self.agent.project.skip_until_dev_step: self.agent.project.skip_steps = False delete_all_subsequent_steps(self.agent.project) if 'delete_unrelated_steps' in self.agent.project.args and self.agent.project.args['delete_unrelated_steps']: self.agent.project.delete_all_steps_except_current_branch() else: # if we don't, get the response from LLM response = create_gpt_chat_completion(self.messages, self.high_level_step, function_calls=function_calls) if self.agent.__class__.__name__ == 'Developer': development_step = save_development_step(self.agent.project, prompt_path, prompt_data, self.messages, response) self.agent.project.checkpoints['last_development_step'] = development_step # TODO handle errors from OpenAI if response == {}: raise Exception("OpenAI API error happened.") response = self.postprocess_response(response, function_calls) # TODO remove this once the database is set up properly message_content = response[0] if type(response) == tuple else response if isinstance(message_content, list): if 'to_message' in function_calls: string_response = function_calls['to_message'](message_content) elif len(message_content) > 0 and isinstance(message_content[0], dict): string_response = [ f'#{i}\n' + array_of_objects_to_string(d) for i, d in enumerate(message_content) ] else: string_response = ['- ' + r for r in message_content] message_content = '\n'.join(string_response) # TODO END # TODO we need to specify the response when there is a function called # TODO maybe we can have a specific function that creates the GPT response from the function call self.messages.append({"role": "assistant", "content": message_content}) self.log_message(message_content) return response def continuous_conversation(self, prompt_path, prompt_data, function_calls=None): """ Conducts a continuous conversation with the agent. Args: prompt_path: The path to a prompt. prompt_data: Data associated with the prompt. function_calls: Optional function calls to be included in the conversation. Returns: List of accepted messages in the conversation. """ self.log_to_user = False accepted_messages = [] response = self.send_message(prompt_path, prompt_data, function_calls) # Continue conversation until GPT response equals END_RESPONSE while response != END_RESPONSE: print(colored("Do you want to add anything else? If not, ", 'yellow') + colored('just press ENTER.', 'yellow', attrs=['bold'])) user_message = ask_user(self.agent.project, response, False) if user_message == "": accepted_messages.append(response) self.messages.append({"role": "user", "content": user_message}) response = self.send_message(None, None, function_calls) self.log_to_user = True return accepted_messages def save_branch(self, branch_name): self.branches[branch_name] = self.messages.copy() def load_branch(self, branch_name): self.messages = self.branches[branch_name].copy() def convo_length(self): return len([msg for msg in self.messages if msg['role'] != 'system']) def postprocess_response(self, response, function_calls): """ Post-processes the response from the agent. Args: response: The response from the agent. function_calls: Optional function calls associated with the response. Returns: The post-processed response. """ if 'function_calls' in response and function_calls is not None: if 'send_convo' in function_calls: response['function_calls']['arguments']['convo'] = self response = function_calls['functions'][response['function_calls']['name']](**response['function_calls']['arguments']) elif 'text' in response: response = response['text'] return response def log_message(self, content): """ Logs a message in the conversation. Args: content: The content of the message to be logged. """ print_msg = capitalize_first_word_with_underscores(self.high_level_step) if self.log_to_user: if self.agent.project.checkpoints['last_development_step'] is not None: print(colored("\nDev step ", 'yellow') + colored(self.agent.project.checkpoints['last_development_step'], 'yellow', attrs=['bold']) + '\n', end='') print(f"\n{content}\n") logger.info(f"{print_msg}: {content}\n") def to_playground(self): with open('const/convert_to_playground_convo.js', 'r', encoding='utf-8') as file: content = file.read() process = subprocess.Popen('pbcopy', stdin=subprocess.PIPE) process.communicate(content.replace('{{messages}}', str(self.messages)).encode('utf-8')) def remove_last_x_messages(self, x): self.messages = self.messages[:-x] def construct_and_add_message_from_prompt(self, prompt_path, prompt_data): if prompt_path is not None and prompt_data is not None: prompt = get_prompt(prompt_path, prompt_data) self.messages.append({"role": "user", "content": prompt})
[]
2024-01-10
5l1v3r1/gpt-pilot
pilot~utils~llm_connection.py
import re import requests import os import sys import time import json import tiktoken import questionary from typing import List from jinja2 import Environment, FileSystemLoader from const.llm import MIN_TOKENS_FOR_GPT_RESPONSE, MAX_GPT_MODEL_TOKENS, MAX_QUESTIONS, END_RESPONSE from logger.logger import logger from termcolor import colored from utils.utils import get_prompt_components, fix_json from utils.spinner import spinner_start, spinner_stop def connect_to_llm(): pass def get_prompt(prompt_name, data=None): if data is None: data = {} data.update(get_prompt_components()) logger.debug(f"Getting prompt for {prompt_name}") # logging here # Create a file system loader with the directory of the templates file_loader = FileSystemLoader('prompts') # Create the Jinja2 environment env = Environment(loader=file_loader) # Load the template template = env.get_template(prompt_name) # Render the template with the provided data output = template.render(data) return output def get_tokens_in_messages(messages: List[str]) -> int: tokenizer = tiktoken.get_encoding("cl100k_base") # GPT-4 tokenizer tokenized_messages = [tokenizer.encode(message['content']) for message in messages] return sum(len(tokens) for tokens in tokenized_messages) #get endpoint and model name from .ENV file model = os.getenv('MODEL_NAME') endpoint = os.getenv('ENDPOINT') def num_tokens_from_functions(functions, model=model): """Return the number of tokens used by a list of functions.""" encoding = tiktoken.get_encoding("cl100k_base") num_tokens = 0 for function in functions: function_tokens = len(encoding.encode(function['name'])) function_tokens += len(encoding.encode(function['description'])) if 'parameters' in function: parameters = function['parameters'] if 'properties' in parameters: for propertiesKey in parameters['properties']: function_tokens += len(encoding.encode(propertiesKey)) v = parameters['properties'][propertiesKey] for field in v: if field == 'type': function_tokens += 2 function_tokens += len(encoding.encode(v['type'])) elif field == 'description': function_tokens += 2 function_tokens += len(encoding.encode(v['description'])) elif field == 'enum': function_tokens -= 3 for o in v['enum']: function_tokens += 3 function_tokens += len(encoding.encode(o)) # else: # print(f"Warning: not supported field {field}") function_tokens += 11 num_tokens += function_tokens num_tokens += 12 return num_tokens def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TOKENS_FOR_GPT_RESPONSE, function_calls=None): """ Called from: - AgentConvo.send_message() - these calls often have `function_calls`, usually from `pilot/const/function_calls.py` - convo.continuous_conversation() - prompts.get_additional_info_from_openai() - prompts.get_additional_info_from_user() after the user responds to each "Please check this message and say what needs to be changed... {message}" :param messages: [{ "role": "system"|"assistant"|"user", "content": string }, ... ] :param req_type: 'project_description' etc. See common.STEPS :param min_tokens: defaults to 600 :param function_calls: (optional) {'definitions': [{ 'name': str }, ...]} see `IMPLEMENT_CHANGES` etc. in `pilot/const/function_calls.py` :return: {'text': new_code} or if `function_calls` param provided {'function_calls': {'name': str, arguments: {...}}} """ gpt_data = { 'model': os.getenv('MODEL_NAME', 'gpt-4'), 'n': 1, 'max_tokens': 4096, 'temperature': 1, 'top_p': 1, 'presence_penalty': 0, 'frequency_penalty': 0, 'messages': messages, 'stream': True } # delete some keys if using "OpenRouter" API if os.getenv('ENDPOINT') == "OPENROUTER": keys_to_delete = ['n', 'max_tokens', 'temperature', 'top_p', 'presence_penalty', 'frequency_penalty'] for key in keys_to_delete: if key in gpt_data: del gpt_data[key] if function_calls is not None: # Advise the LLM of the JSON response schema we are expecting gpt_data['functions'] = function_calls['definitions'] if len(function_calls['definitions']) > 1: gpt_data['function_call'] = 'auto' else: gpt_data['function_call'] = {'name': function_calls['definitions'][0]['name']} try: response = stream_gpt_completion(gpt_data, req_type) return response except Exception as e: error_message = str(e) # Check if the error message is related to token limit if "context_length_exceeded" in error_message.lower(): raise Exception('Too many tokens in the request. Please try to continue the project with some previous development step.') else: print('The request to OpenAI API failed. Here is the error message:') print(e) def delete_last_n_lines(n): for _ in range(n): # Move the cursor up one line sys.stdout.write('\033[F') # Clear the current line sys.stdout.write('\033[K') def count_lines_based_on_width(content, width): lines_required = sum(len(line) // width + 1 for line in content.split('\n')) return lines_required def retry_on_exception(func): def wrapper(*args, **kwargs): while True: try: return func(*args, **kwargs) except Exception as e: # Convert exception to string err_str = str(e) # If the specific error "context_length_exceeded" is present, simply return without retry if "context_length_exceeded" in err_str: raise Exception("context_length_exceeded") if "rate_limit_exceeded" in err_str: # Extracting the duration from the error string match = re.search(r"Please try again in (\d+)ms.", err_str) if match: wait_duration = int(match.group(1)) / 1000 time.sleep(wait_duration) continue print(colored('There was a problem with request to openai API:', 'red')) print(err_str) user_message = questionary.text( "Do you want to try make the same request again? If yes, just press ENTER. Otherwise, type 'no'.", style=questionary.Style([ ('question', 'fg:red'), ('answer', 'fg:orange') ])).ask() if user_message != '': return {} return wrapper @retry_on_exception def stream_gpt_completion(data, req_type): """ Called from create_gpt_chat_completion() :param data: :param req_type: 'project_description' etc. See common.STEPS :return: {'text': str} or {'function_calls': {'name': str, arguments: '{...}'}} """ terminal_width = os.get_terminal_size().columns lines_printed = 2 buffer = "" # A buffer to accumulate incoming data def return_result(result_data, lines_printed): if buffer: lines_printed += count_lines_based_on_width(buffer, terminal_width) logger.info(f'lines printed: {lines_printed} - {terminal_width}') delete_last_n_lines(lines_printed) return result_data # spinner = spinner_start(colored("Waiting for OpenAI API response...", 'yellow')) # print(colored("Stream response from OpenAI:", 'yellow')) logger.info(f'Request data: {data}') # Check if the ENDPOINT is AZURE if endpoint == 'AZURE': # If yes, get the AZURE_ENDPOINT from .ENV file endpoint_url = os.getenv('AZURE_ENDPOINT') + '/openai/deployments/' + model + '/chat/completions?api-version=2023-05-15' headers = {'Content-Type': 'application/json', 'api-key': os.getenv('AZURE_API_KEY')} elif endpoint == 'OPENROUTER': # If so, send the request to the OpenRouter API endpoint headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + os.getenv("OPENROUTER_API_KEY"), 'HTTP-Referer': 'http://localhost:3000', 'X-Title': 'GPT Pilot (LOCAL)'} endpoint_url = os.getenv("OPENROUTER_ENDPOINT", 'https://openrouter.ai/api/v1/chat/completions') else: # If not, send the request to the OpenAI endpoint headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + os.getenv("OPENAI_API_KEY")} endpoint_url = os.getenv("OPENAI_ENDPOINT", 'https://api.openai.com/v1/chat/completions') response = requests.post( endpoint_url, headers=headers, json=data, stream=True ) # Log the response status code and message logger.info(f'Response status code: {response.status_code}') if response.status_code != 200: logger.debug(f'problem with request: {response.text}') raise Exception(f"API responded with status code: {response.status_code}. Response text: {response.text}") gpt_response = '' function_calls = {'name': '', 'arguments': ''} for line in response.iter_lines(): # Ignore keep-alive new lines if line: line = line.decode("utf-8") # decode the bytes to string if line.startswith('data: '): line = line[6:] # remove the 'data: ' prefix # Check if the line is "[DONE]" before trying to parse it as JSON if line == "[DONE]": continue try: json_line = json.loads(line) if len(json_line['choices']) == 0: continue if 'error' in json_line: logger.error(f'Error in LLM response: {json_line}') raise ValueError(f'Error in LLM response: {json_line["error"]["message"]}') if json_line['choices'][0]['finish_reason'] == 'function_call': function_calls['arguments'] = load_data_to_json(function_calls['arguments']) return return_result({'function_calls': function_calls}, lines_printed) json_line = json_line['choices'][0]['delta'] except json.JSONDecodeError: logger.error(f'Unable to decode line: {line}') continue # skip to the next line # handle the streaming response if 'function_call' in json_line: if 'name' in json_line['function_call']: function_calls['name'] = json_line['function_call']['name'] print(f'Function call: {function_calls["name"]}') if 'arguments' in json_line['function_call']: function_calls['arguments'] += json_line['function_call']['arguments'] print(json_line['function_call']['arguments'], end='', flush=True) if 'content' in json_line: content = json_line.get('content') if content: buffer += content # accumulate the data # If you detect a natural breakpoint (e.g., line break or end of a response object), print & count: if buffer.endswith("\n"): # or some other condition that denotes a breakpoint lines_printed += count_lines_based_on_width(buffer, terminal_width) buffer = "" # reset the buffer gpt_response += content print(content, end='', flush=True) print('\n') if function_calls['arguments'] != '': logger.info(f'Response via function call: {function_calls["arguments"]}') function_calls['arguments'] = load_data_to_json(function_calls['arguments']) return return_result({'function_calls': function_calls}, lines_printed) logger.info(f'Response message: {gpt_response}') new_code = postprocessing(gpt_response, req_type) # TODO add type dynamically return return_result({'text': new_code}, lines_printed) def postprocessing(gpt_response, req_type): return gpt_response def load_data_to_json(string): return json.loads(fix_json(string))
[]
2024-01-10
kingler/Role_Playing_Chatbot
main_st.py
import streamlit as st import os from termcolor import colored from typing import List from langchain.chat_models import ChatOpenAI from langchain.prompts.chat import ( SystemMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain.schema import ( AIMessage, HumanMessage, SystemMessage, BaseMessage, ) from camel_agent import CAMELAgent from inception_prompts import assistant_inception_prompt, user_inception_prompt import json # Function to select roles def select_role(role_type, roles): selected_role = st.selectbox(f"Select {role_type} role:", ["Custom Role"] + roles ) if selected_role == "Custom Role": custom_role = st.text_input(f"Enter the {role_type} (Custom Role):") return custom_role else: return selected_role # Function to get system messages for AI assistant and AI user from role names and the task def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str): assistant_sys_template = SystemMessagePromptTemplate.from_template(template=assistant_inception_prompt) assistant_sys_msg = assistant_sys_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task)[0] user_sys_template = SystemMessagePromptTemplate.from_template(template=user_inception_prompt) user_sys_msg = user_sys_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task)[0] return assistant_sys_msg, user_sys_msg def generate_unique_task_name(task: str, chat_history_items: List[dict]) -> str: task_name = task count = 1 task_names = [item["task"] for item in chat_history_items] while task_name in task_names: task_name = f"{task} ({count})" count += 1 return task_name def load_chat_history_items() -> List[dict]: chat_history_items = [] try: with open("chat_history.json", "r") as history_file: for line in history_file: chat_history_items.append(json.loads(line.strip())) except FileNotFoundError: pass return chat_history_items chat_history_items = [] st.set_page_config(layout="centered") st.title("OmniSolver 🔆", help="This app uses the CAMEL framework to solve problems. This app uses GPT models and the responses may not be accurate") # Sidebar: API Key input st.sidebar.title("Configuration") # comment this out if you want to use the API key from the environment variable locally api_key = st.sidebar.text_input("Enter your OpenAI API Key:", type="password") # uncomment this if you want to use the API key from the environment variable locally # api_key = os.getenv("OPENAI_API_KEY") if api_key: os.environ["OPENAI_API_KEY"] = api_key elif api_key == "": st.sidebar.warning("Please enter your OpenAI API Key.") # Sidebar: Model selection model = st.sidebar.radio("Select the model:", ("gpt-3.5-turbo", "gpt-4")) with open("stats.txt", "r") as stats_file: stats = stats_file.readlines() tasks_solved = int(stats[0].strip()) tasks_solved += 1 st.write(f"<p style='color: green; font-weight: bold;'>This App was used to solve *{tasks_solved}* tasks so far since deployed</p>", unsafe_allow_html=True) # Main: Load roles from roles.txt with open("roles.txt", "r") as roles_file: roles_list = [line.strip() for line in roles_file.readlines()] # Main: Role selection user_role_name = select_role("AI user", roles_list) assistant_role_name = select_role("AI assistant", roles_list) if assistant_role_name and user_role_name: # Main: Task input task = st.text_input("Please enter the task:") if task: # Main: Task specifier task_specifier = st.checkbox("Do you want to use the task specifier feature?", help="Use the task specifier feature to make a task more specific by GPT. May not work as expected.") if task_specifier: word_limit = st.number_input("Please enter the word limit for the specified task:", min_value=1, value=50, step=1) if word_limit: task_specifier_sys_msg = SystemMessage(content="You can make a task more specific.") task_specifier_prompt = ( """Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}. Please make it more specific. Be creative and imaginative. Please reply with the specified task in {word_limit} words or less. Do not add anything else.""" ) task_specifier_template = HumanMessagePromptTemplate.from_template(template=task_specifier_prompt) task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(model=model, temperature=1.0)) task_specifier_msg = task_specifier_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task, word_limit=word_limit)[0] specified_task_msg = task_specify_agent.step(task_specifier_msg) st.write(f"<p style='font-weight: bold;'>Specified task:</p> {specified_task_msg.content}", unsafe_allow_html=True) specified_task = specified_task_msg.content else: specified_task = task if specified_task: # Main: Chat turn limit input chat_turn_limit = st.number_input("Please enter the chat turn limit:", min_value=1, step=1) if st.button("Start Solving Task"): if api_key == "": st.warning("Please enter your OpenAI API Key.") st.stop() with open("stats.txt", "w") as stats_file: stats_file.write(str(tasks_solved)) chat_history_items = load_chat_history_items() with st.spinner("Thinking..."): # Main: Initialize agents and start role-playing session assistant_sys_msg, user_sys_msg = get_sys_msgs(assistant_role_name, user_role_name, specified_task) assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(model=model, temperature=0.2)) user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(model=model, temperature=0.2)) assistant_agent.reset() user_agent.reset() assistant_msg = HumanMessage( content=(f"{user_sys_msg.content}. " "Now start to give me introductions one by one. " "Only reply with Instruction and Input.")) user_msg = HumanMessage(content=f"{assistant_sys_msg.content}") user_msg = assistant_agent.step(user_msg) st.write(f"<p style='color: red;'><b>Original task prompt:</b></p>\n\n{task}\n", unsafe_allow_html=True) st.write(f"<p style='color: red;'><b>Specified task prompt:</b></p>\n\n{specified_task}\n", unsafe_allow_html=True) chat_history = [] with st.spinner("Running role-playing session to solve the task..."): # Replace the for loop with the following code: progress = st.progress(0) for n in range(chat_turn_limit): user_ai_msg = user_agent.step(assistant_msg) user_msg = HumanMessage(content=user_ai_msg.content) chat_history.append({"role": user_role_name, "content": user_msg.content}) st.markdown(f"<p style='color: blue; font-weight: bold;'>{user_role_name}</p>\n\n{user_msg.content}\n\n", unsafe_allow_html=True) assistant_ai_msg = assistant_agent.step(user_msg) assistant_msg = HumanMessage(content=assistant_ai_msg.content) chat_history.append({"role": assistant_role_name, "content": assistant_msg.content}) st.markdown(f"<p style='color: green; font-weight: bold;'>{assistant_role_name}</p>\n\n{assistant_msg.content}\n\n", unsafe_allow_html=True) progress.progress((n+1)/chat_turn_limit) if "<CAMEL_TASK_DONE>" in user_msg.content: break progress.empty() # Main: Save chat history to file task_name = generate_unique_task_name(task, chat_history_items) history_dict = { "task": task_name, "settings": { "assistant_role_name": assistant_role_name, "user_role_name": user_role_name, "model": model, "chat_turn_limit": chat_turn_limit, }, "conversation": chat_history, } with open("chat_history.json", "a") as history_file: json.dump(history_dict, history_file) history_file.write("\n") else: st.warning("Please enter the chat turn limit.") else: st.warning("Please specify the task.") else: st.warning("Please enter the task.") else: st.warning("Please select both AI assistant and AI user roles.") # Sidebar: Load chat history chat_history_titles = [item["task"] for item in chat_history_items] try: chat_history_items = load_chat_history_items() chat_history_titles = [item["task"] for item in chat_history_items] selected_history = st.sidebar.selectbox("Select chat history:", ["None"] + chat_history_titles) if selected_history != "None": delete_history_button = st.sidebar.button("Delete Selected Chat History") if delete_history_button and selected_history != "None": chat_history_items.pop(chat_history_titles.index(selected_history)) # Save the updated chat history to file with open("chat_history.json", "w") as history_file: for item in chat_history_items: json.dump(item, history_file) history_file.write("\n") st.sidebar.success("Selected chat history deleted.") st.experimental_rerun() # Main: Display selected chat history if selected_history != "None": selected_history_item = chat_history_items[chat_history_titles.index(selected_history)] settings = selected_history_item["settings"] conversation = selected_history_item["conversation"] st.write(f"<p style='color: green; font-weight: bold;'>Task:</p> {selected_history}\n", unsafe_allow_html=True) st.write(f"""<p style='color: green; font-weight: bold;'>Settings:</p> <p>- AI assistant role: <span >{settings['assistant_role_name']}</span></p> <p>- AI user role: <span >{settings['user_role_name']}</span></p> <p>- Model: {settings['model']}</p> <p>- Chat turn limit: {settings['chat_turn_limit']}</p> """, unsafe_allow_html=True) for msg in conversation: st.markdown(f"<p style='color: green; font-weight: bold;'>{msg['role']}</p>\n\n{msg['content']}\n\n", unsafe_allow_html=True) except FileNotFoundError: st.sidebar.warning("No chat history available.") st.sidebar.title("Search for more projects like this at [echohive](https://www.echohive.live)")
[ "You can make a task more specific.", "Now start to give me introductions one by one. ", "Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}.\n Please make it more specific. Be creative and imaginative.\n Please reply with the specified task in {word_limit} words or less. Do not add anything else.", "Only reply with Instruction and Input." ]
2024-01-10
kingler/Role_Playing_Chatbot
camel_agent.py
from typing import List from langchain.chat_models import ChatOpenAI from langchain.prompts.chat import ( SystemMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain.schema import ( AIMessage, HumanMessage, SystemMessage, BaseMessage, ) class CAMELAgent: def __init__( self, system_message: SystemMessage, model: ChatOpenAI, ) -> None: self.system_message = system_message self.model = model self.init_messages() def reset(self) -> None: self.init_messages() return self.stored_messages def init_messages(self) -> None: self.stored_messages = [self.system_message] def update_messages(self, message: BaseMessage) -> List[BaseMessage]: self.stored_messages.append(message) return self.stored_messages def step( self, input_message: HumanMessage, ) -> AIMessage: messages = self.update_messages(input_message) output_message = self.model(messages) self.update_messages(output_message) return output_message
[]
2024-01-10
DOlivertech/tfInterpreter
tfInterpret.py
#!/usr/bin/env python3 import openai import argparse import textwrap import keyring import sys SERVICE_ID = 'tfinterpreter' USER_ID = 'openai' TOKEN_LIMIT = 8000 def get_api_key(): api_key = keyring.get_password(SERVICE_ID, USER_ID) if api_key is None: print("No API key found. Please enter your OpenAI API key:") api_key = input() keyring.set_password(SERVICE_ID, USER_ID, api_key) print("Your API key has been securely stored.") return api_key def set_api_key(api_key=None): api_key_stored = keyring.get_password(SERVICE_ID, USER_ID) if api_key_stored is not None: print("An API key is already set. Please clear the existing key first.") return if api_key is None: print("Please enter your OpenAI API key:") api_key = input() keyring.set_password(SERVICE_ID, USER_ID, api_key) print("Your API key has been securely stored.") def clear_api_key(): api_key = keyring.get_password(SERVICE_ID, USER_ID) if api_key is None: print("No API key is stored. Nothing to clear.") return keyring.delete_password(SERVICE_ID, USER_ID) print("Your API key has been deleted.") class CustomArgumentParser(argparse.ArgumentParser): def error(self, message): sys.stderr.write('error: %s\n' % message) if 'unrecognized arguments' in message: sys.stderr.write('This argument is not supported.\n') if 'argument --set-key: expected one argument' in message: sys.stderr.write( 'No key was provided. Please provide an API key. Take a look at the help file for available arguments\n') self.print_help() sys.exit(2) parser = CustomArgumentParser( description='''This program interprets a Terraform plan output and translates it into simple terms. It splits the plan into chunks and provides a detailed yet easily understandable explanation of all the changes that will be made, highlighting any additions, deletions, or modifications. Usage: python tfInterpret.py <path_to_your_file> Replace <path_to_your_file> with the path to your Terraform plan output file. The program will read the file and provide an analysis of the Terraform plan. For example: python tfInterpret.py /home/user/plan.txt Use the --clear-key option to clear the stored OpenAI API key: python tfInterpret.py --clear-key Use the --set-key option to input a new OpenAI API key: python tfInterpret.py --set-key <your_api_key> If <your_api_key> is not provided, you will be prompted to enter it. ''') parser.add_argument( 'file', type=str, nargs='?', default=None, help='The path to the Terraform plan output file.') parser.add_argument( '--clear-key', action='store_true', help='Clear the stored OpenAI API key.') parser.add_argument( '--set-key', type=str, default='prompt', help='Input a new OpenAI API key. If no key is provided with this option, you will be prompted to enter it.') args = parser.parse_args() if args.clear_key: clear_api_key() sys.exit(0) if args.set_key != 'prompt': set_api_key(args.set_key) sys.exit(0) openai.api_key = get_api_key() def read_terraform_plan(file_path): with open(file_path, 'r') as file: data = file.read() return data def split_plan(plan): return textwrap.wrap(plan, TOKEN_LIMIT) def is_relevant(chunk): return not chunk.isspace() def interpret_plan_chunk(chunk, chunk_number, total_chunks): print(f"Analyzing chunk {chunk_number} of {total_chunks}...") messages = [ {"role": "system", "content": "You are a helpful assistant that translates Terraform plans into simple terms. You should provide a detailed yet easily understandable explanation of all the changes that will be made, highlighting any additions, deletions, or modifications. Avoid explaining what Terraform is doing or details about the '-out' option. Just state the facts and provide a brief analysis."}, {"role": "user", "content": f"Please explain this part of the Terraform plan concisely and factually:\n{chunk}"}, {"role": "user", "content": "What resources will be added, modified, or deleted? Provide a brief and factual explanation."}, ] response = openai.ChatCompletion.create( model="gpt-3.5-turbo-16k", messages=messages, max_tokens=TOKEN_LIMIT, ) result = f"Chunk {chunk_number} of {total_chunks}:\n{response['choices'][0]['message']['content'].strip()}\n\n---\nTokens used for this chunk: {response['usage']['total_tokens']}\n---" return result def main(): if args.file is None: print("Error: A file argument is required. Please provide the path to the Terraform plan output file.") return print("Reading and analyzing your plan output...") plan = read_terraform_plan(args.file) plan_chunks = split_plan(plan) print(f"The analysis has been split into {len(plan_chunks)} chunks.") interpretations = [] for i, chunk in enumerate(plan_chunks): if is_relevant(chunk): interpretations.append(interpret_plan_chunk( chunk, i+1, len(plan_chunks))) else: print( f"Skipping chunk {i+1} of {len(plan_chunks)} as it does not contain relevant information.") print("\n".join(interpretations)) if __name__ == "__main__": main()
[ "You are a helpful assistant that translates Terraform plans into simple terms. You should provide a detailed yet easily understandable explanation of all the changes that will be made, highlighting any additions, deletions, or modifications. Avoid explaining what Terraform is doing or details about the '-out' option. Just state the facts and provide a brief analysis.", "Please explain this part of the Terraform plan concisely and factually:\nPLACEHOLDER", "What resources will be added, modified, or deleted? Provide a brief and factual explanation." ]
2024-01-10
PrimeIntellect-ai/automated-lit-review
create_agent.py
# create OpenAIAssistantAgent from pydantic import BaseModel, Field# define pydantic model for auto-retrieval function from typing import Tuple, List from llama_index.tools import FunctionTool from llama_index.agent import OpenAIAssistantAgent from llama_index import ( SimpleDirectoryReader, VectorStoreIndex, StorageContext, ) from llama_index.vector_stores import SupabaseVectorStore from llama_index.tools import QueryEngineTool, ToolMetadata import os from dotenv import load_dotenv load_dotenv() vector_store = SupabaseVectorStore( postgres_connection_string=( f"postgresql://postgres:{os.getenv('VECTOR_DATABASE_PW')}@db.rgvrtfssleyejerbzqbv.supabase.co:5432/postgres" ), collection_name="research_papers", ) # storage_context = StorageContext.from_defaults(vector_store=vector_store) # laod index from supabase index = VectorStoreIndex.from_vector_store(vector_store) c_elegans_tool = QueryEngineTool( query_engine=index.as_query_engine(similarity_top_k=3), metadata=ToolMetadata( name="c-elegans-research", description=( "Given a query, find the most relevant interventions for increasing the max lifespan of C. Elegans." ), ), ) ''' Output tool outputs list of triples: (List of 1-3 combined interventions, Explanation, Probability for what % it increases max lifespan of C.Elegans) ''' def output_tool(interventions: str, explanation: str, max_lifespan_increase: str) -> int: return "Interventions: " + interventions + "\nExplanation: " + explanation + "\nMax Lifespan Increase Prediction: " + str(max_lifespan_increase) description = """ Output a tuple of intervations, with the explanation of why it is chosen, and the probability of how much it increases the max lifespan of C. Elegans. """ class InterventionsOutput(BaseModel): interventions: str = Field(..., description="1-3 combined interventions from interventions.txt") explanation: str = Field(..., description="Explanation for the choice") max_lifespan_increase: float = Field(..., description="Multiplier prediction on how much it would increase the max lifespan of C.Elegans") output_interventions_tool = FunctionTool.from_defaults( fn=output_tool, name="output_interventions_tool", description=description, fn_schema=InterventionsOutput, ) instructions = """ You are helping longevity researchers choose promising life extending interventions for C. Elegans. The proposed interventions should be a combination of 1-3 interventions that are listed in the interventions.txt file that you can read with the code interpreter. You have acccess to a database of research papers on C. Elegans via the c_elegans_tool. Read all the longevity interventions research papers. Interpolate from the experiments, hypotheses and results of the paper to propose novel interventions to prolong the lifespan of C. Elegans. Then, reference check the interventions you propose with the uploaded csv files by writing code to check if they have been proposed before. Update your hypotheses based on the results of the reference check. Do additional literature review if necessary with the c_elegans_tool. Based on the data, propose the most promising interventions to prolong the lifespan of C. Elegans. Each suggestion should include a rationale for its potential efficacy and estimated probabilities of lifespan extension in C.Elegans. The Assistant ensures that all recommendations are evidence-based and reflect the latest research insights. You should use the output_interventions_tool to output your proposed interventions in a structured format. Return the structured format at the end. """ agent = OpenAIAssistantAgent.from_new( name="Longevity Scientist Assistant (llama index) - 9", instructions=instructions, tools=[c_elegans_tool, output_interventions_tool], verbose=True, run_retrieve_sleep_time=1.0, openai_tools=[{"type": "code_interpreter"}], files=["./c-elegans-data/interventions.txt", "./c-elegans-data/DrugAge-database.csv"], ) def create_agent(): return agent
[]
2024-01-10
dorucioclea/rescuerepo
api~llm_utils.py
#!/usr/bin/env python import os import re from loguru import logger from typing import List, Optional, Union import openai from langchain.chat_models import ChatAnthropic from langchain.prompts.chat import ( ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain.schema import ( AIMessage, HumanMessage, SystemMessage ) def authenticate_openai(openai_api_key: str) -> None: openai.api_key = openai_api_key def list_openai_engines() -> Optional[List[dict]]: # Ensure the user is authenticated if not openai.api_key: raise ValueError( "OpenAI API key not set. Please authenticate using 'authenticate_openai(api_key)' function." ) try: response = openai.Engine.list() engines = response["data"] return engines except openai.error.APIError as api_error: logger.error(f"API error: {api_error}") return None except Exception as e: logger.error(f"Unexpected error: {e}") return None def extract_code_blocks_from_llm_output(text: str) -> List[str]: code_blocks = re.findall(r"```([\s\S]*?)```", text) code_blocks = [code_block.strip() for code_block in code_blocks] return code_blocks def get_code_block_openai( prompt: str, model: str = "gpt-3.5-turbo-0301" # Switch to "gpt-4-32k" if you can ) -> Union[str, None]: # Ensure the user is authenticated if not openai.api_key: raise ValueError( "OpenAI API key not set. Please authenticate using 'authenticate_openai(api_key)' function." ) # Set up the API request data = { "model": model, "messages": [{"role": "user", "content": prompt}], "max_tokens": 150, "n": 1, "stop": None, "temperature": 1.0, } try: logger.info("Sending request to OpenAI API") logger.debug(f"Using the following parameters:\n Model:\n{model}\n Prompt:\n{prompt}") # Make the API request response = openai.ChatCompletion.create(**data) # Check if there are choices in the response if not response.choices: logger.error( "Error: The API response does not contain any generated text." ) return None logger.trace(str(response)) logger.trace(str(response.choices[0].message.content)) generated_text = response.choices[0].message.content.strip() logger.success("Generated text successfully received.") code_blocks = extract_code_blocks_from_llm_output(generated_text) if len(code_blocks) > 0: return code_blocks[0] else: return generated_text except openai.error.APIError as api_error: logger.error(f"API error: {api_error}") return None except Exception as e: logger.error(f"Unexpected error: {e}") return None def get_code_block_anthropic( prompt: str, model: str = "claude-v1.3-100k", #@param ["claude-v1.3","claude-v1.3-100k","claude-v1.2","claude-v1.0"] ) -> Union[str, None]: anthropic_chat = ChatAnthropic( model=model, anthropic_api_key=os.environ["ANTHROPIC_API_KEY"] ) try: logger.info("Sending request to Anthropic Claude API") logger.debug(f"Using the following parameters:\n Model:\n{model}\n Prompt:\n{prompt}") # Make the API request messages = [ HumanMessage(content=prompt) ] ai_response: AIMessage = anthropic_chat(messages) # Check if there are choices in the response if not ai_response.content: logger.error( "Error: The AIMessage response from Anthropic does not contain any generated text." ) return None logger.trace(ai_response.content) logger.trace(ai_response.additional_kwargs) logger.trace(ai_response) generated_text = ai_response.content logger.success("Generated text successfully received.") code_blocks = extract_code_blocks_from_llm_output(generated_text) if len(code_blocks) > 0: return code_blocks[0] else: return generated_text except Exception as e: logger.error(f"Unexpected error: {e}") return None if __name__ == "__main__": logger.info( get_code_block_anthropic( prompt = "Generate a sample python code that adds two numbers. Generate only code, and put it between two markdown code block markers.", model = "claude-v1.3" ) )
[]
2024-01-10
Turing-Project/EssayTopicPredictV2
examples.py
import datetime, shutil import logging import os from typing import Dict, Any, List import openai import jsonlines TEST_RESULT = "./test_result/examples.txt" os.environ["http_proxy"] = "127.0.0.1:7890" os.environ["https_proxy"] = "127.0.0.1:7890" def get_chat_response(title: str) -> str: """ 加入prompt话术范文写作,获取GPT-4模型的返回结果 :param array: :param title: str :return: """ global response openai.api_key = "your_key" # Make a request to the ChatGPT API messages = [{"role": "system", "content": "你是一个正在参加中国高考的考生,请基于用户输入的命题,用中文写出一篇800字左右的高考作文。 " "作文必须贴合主题,首尾呼应,结构匀称,立意明确,中心突出,感情真挚,语言流畅,意境深远, " "引经据典,善于运用修辞方法,构思精巧,见解新颖,具有积极作用。"}, {"role": 'user', "content": title}] # print(f"[{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] Load {len(messages)} few-shot data.") try: response = openai.ChatCompletion.create( model="gpt-4", messages=messages, temperature=0.75, max_tokens=2048, top_p=1, frequency_penalty=1, presence_penalty=0, ) except Exception as e: response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=messages, temperature=0.75, max_tokens=2048, top_p=1, frequency_penalty=1, presence_penalty=0, ) logging.warning(e) finally: # Print the generated code print(response["choices"][0]["message"]['content'].strip()) with jsonlines.open(TEST_RESULT + f"{datetime.datetime.now().strftime('%Y-%m-%d-%H')}" + ".jsonl", mode='a') as writer: writer.write({"title": title, "essay": response["choices"][0]["message"]['content'].strip()}) def main(): inputs = input("请输入高考作文题目:") get_chat_response(inputs) if __name__ == "__main__": main()
[ "你是一个正在参加中国高考的考生,请基于用户输入的命题,用中文写出一篇800字左右的高考作文。 作文必须贴合主题,首尾呼应,结构匀称,立意明确,中心突出,感情真挚,语言流畅,意境深远, 引经据典,善于运用修辞方法,构思精巧,见解新颖,具有积极作用。" ]
2024-01-10
zhubarb/Reinforcement_Learning
FrozenLake~frozen_lake_value_iter.py
""" Solving FrozenLake8x8 from OpenAI using Value Iteration https://github.com/realdiganta/solving_openai/blob/master/FrozenLake8x8/frozenLake8x8.py Author: Diganta Kalita ([email protected]) """ import gym import numpy as np def value_iteration(env, max_iterations=100000, lmbda=0.9): stateValue = [0 for i in range(env.env.nS)] newStateValue = stateValue.copy() for iter in range(max_iterations): for state in range(env.env.nS): action_values = [] for action in range(env.env.nA): state_value = 0 for i in range(len(env.env.P[state][action])): prob, next_state, reward, done = env.env.P[state][action][i] state_action_value = prob * (reward + lmbda * stateValue[next_state]) state_value += state_action_value action_values.append(state_value) # the value of each action best_action = np.argmax(np.asarray(action_values)) # choose the action which gives the maximum value newStateValue[state] = action_values[best_action] # update the value of the state if iter > 1000: if sum(stateValue) - sum(newStateValue) < 1e-04: # if there is negligible difference break the loop break print(iter) else: stateValue = newStateValue.copy() return stateValue def get_policy(env, stateValue, gamma=0.9): ''' Get optimal policy for s, based on the action that maximises long term reward (R+gamma*V(s')) :param env: :param stateValue: :param gamma: :return: ''' policy = [0 for i in range(env.env.nS)] for state in range(env.env.nS): action_values = [] for action in range(env.env.nA): action_value = 0 for i in range(len(env.env.P[state][action])): prob, next_state, r, _ = env.env.P[state][action][i] action_value += prob * (r + gamma * stateValue[next_state]) action_values.append(action_value) best_action = np.argmax(np.asarray(action_values)) policy[state] = best_action return policy def get_score(env, policy, episodes=1000): misses = 0 steps_list = [] for episode in range(episodes): observation = env.reset() steps = 0 while True: action = policy[observation] observation, reward, done, _ = env.step(action) steps += 1 if done and reward == 1: # print('You have got the fucking Frisbee after {} steps'.format(steps)) steps_list.append(steps) break elif done and reward == 0: # print("You fell in a hole!") misses += 1 break print('----------------------------------------------') print('You took an average of {:.0f} steps to get the frisbee'.format(np.mean(steps_list))) print('And you fell in the hole {:.2f} % of the times'.format((misses / episodes) * 100)) print('----------------------------------------------') if __name__ == '__main__': env = gym.make('FrozenLake-v0') stateValues = value_iteration(env, max_iterations=100000) policy = get_policy(env, stateValues) get_score(env, policy, episodes=1000)
[]
2024-01-10
johannesschiessl/AI-Assistant
assistant~openai_handler~text_generation.py
from openai import OpenAI import datetime def get_current_date(): return datetime.datetime.now().strftime("%Y-%m-%d") def generate_text(user_name, model, prompt): client = OpenAI() response = client.chat.completions.create( model=model, messages=[ {"role": "system", "content": f"You are a helpful assistant, a large language model trained by OpenAI, based on the GPT-3.5 architecture. Answer briefly and accurately. Current User's name: {user_name} - Knowledge cutoff: 2022-01 - Current date: {get_current_date()}"}, {"role": "user", "content": prompt}, ] ) return response.choices[0].message.content
[]
2024-01-10
johannesschiessl/AI-Assistant
assistant~openai_handler~audio_generation.py
from openai import OpenAI import datetime def get_current_date_time(): current_date_time = datetime.datetime.now() formatted_date_time = current_date_time.strftime("%Y-%m-%d%H:%M:%S.%f")[:-3] return formatted_date_time def generate_audio(prompt): client = OpenAI() response = client.audio.speech.create( model="tts-1", voice="alloy", input=prompt, ) response.stream_to_file("data\\ai_assistant_tts.mp3") return "data\\ai_assistant_tts.mp3"
[]
2024-01-10
johannesschiessl/AI-Assistant
assistant~openai_handler~audio_transcription.py
from openai import OpenAI def write_output_to_file(output, file_path): with open(file_path, 'w') as file: file.write(output) def transcribe_audio(file_path): client = OpenAI() response = client.audio.transcriptions.create( model="whisper-1", file=open(file_path, "rb"), response_format="text", ) write_output_to_file(response, "data\\ai_assistant_transcription.txt") return response
[]
2024-01-10
gvariable/ChatAffairs
summary_affair.py
import openai from dotenv import dotenv_values import asyncio from typing import List, Dict import json import json_stream from pathlib import Path from tqdm.asyncio import tqdm import backoff CONFIG = dotenv_values(".env") openai.api_key = CONFIG["OPENAI_API_KEY"] def get_meta(fn): """ Open the file specified by `fn` and yield each metadata object from the JSON stream. """ with open(fn) as f: for meta in json_stream.load(f): meta = json_stream.to_standard_types(meta) yield meta @backoff.on_exception(backoff.expo, Exception, max_time=240) async def get_completion(prompt, model="gpt-3.5-turbo-16k"): messages = [{"role": "user", "content": prompt}] response = await openai.ChatCompletion.acreate( model=model, messages=messages, temperature=0.4, # the lower, the better? ) return response.choices[0].message["content"] async def get_completions(metas: List[Dict]) -> List[str]: # TODO(gpl): change metas to generator basic_prompt = """ 你的任务是总结湖北省政务文件的内容,构造两个问答对,并使用json格式输出问答对。 构造的回答应该尽量详细且至少包含办理政务的地点、时间、所需材料等,地点信息尽可能详细,精确到楼栋,请保证输出能清晰解释相关政务事项。 提取政务标题内容为<title>,构造的问题应该类似于: - 我想了解一下<title> - <title>办理的整个流程是什么样的 - 告诉我关于<title>的基本信息 输出示例如下: {"input": 问题, "output": 回答} 湖北省政务文件是json格式,内容如下: """ prompts = [basic_prompt + json.dumps(meta, ensure_ascii=False) for meta in metas] completions = [] for coro in tqdm.as_completed( [get_completion(prompt) for prompt in prompts], leave=False ): completions.append(await coro) return completions async def main(): fn = Path("./dataset/data.json") workers = 200 with open("tmp.jsonl", "w") as f: samples = [] for idx, meta in tqdm(enumerate(get_meta(fn))): samples.append(meta) if (idx + 1) % workers == 0: completions = await get_completions(samples) f.write("\n".join(completions)) samples = [] if samples: completions = await get_completions(samples) f.write("\n".join(completions)) if __name__ == "__main__": asyncio.run(main())
[ "input", "\n你的任务是总结湖北省政务文件的内容,构造两个问答对,并使用json格式输出问答对。\n构造的回答应该尽量详细且至少包含办理政务的地点、时间、所需材料等,地点信息尽可能详细,精确到楼栋,请保证输出能清晰解释相关政务事项。\n提取政务标题内容为<title>,构造的问题应该类似于:\n- 我想了解一下<title>\n- <title>办理的整个流程是什么样的\n- 告诉我关于<title>的基本信息 \n输出示例如下:\n{\"input\": 问题, \"output\": 回答}\n湖北省政务文件是json格式,内容如下:\n" ]
2024-01-10
gvariable/ChatAffairs
affair.py
import openai from dotenv import dotenv_values import asyncio from typing import List, Dict import json import json_stream from pathlib import Path from tqdm.asyncio import tqdm import backoff CONFIG = dotenv_values(".env") openai.api_key = CONFIG["OPENAI_API_KEY"] def get_meta(fn): """ Open the file specified by `fn` and yield each metadata object from the JSON stream. """ with open(fn) as f: for meta in json_stream.load(f): meta = json_stream.to_standard_types(meta) yield meta @backoff.on_exception(backoff.expo, Exception, max_time=240) async def get_completion(prompt, model="gpt-3.5-turbo-16k"): messages = [{"role": "user", "content": prompt}] response = await openai.ChatCompletion.acreate( model=model, messages=messages, temperature=0.4, # the lower, the better? ) return response.choices[0].message["content"] async def get_completions(metas: List[Dict]) -> List[str]: # TODO(gpl): change metas to generator basic_prompt = """ 你的任务是从湖北省政务文件中提取关键信息,整理出问题及对应的答案,并使用jsonl的输出格式,你可以按照以下步骤进行: 1. 编写问题和回答对:从湖北省政务文件中提取关键信息,编写8-12对问题和对应的详细回答,请额外关注办理政务的地点、时间、材料、人员等信息。问题中应携带政务标题,回答应该尽量详细且能够清晰解释相关政务事项。 2. 输出为jsonl格式: 将每个问题和回答对整理成一个json对象,并按照jsonl格式输出到文件中。使用"input"作为键来表示问题,使用"output"作为键来表示回答,不同问题和回答之间使用\n分隔。例如: {"input": 问题1, “output”: 回答1}\n{"input": 问题2, “output”: 回答2} 湖北省政务文件是json格式,内容如下: """ prompts = [basic_prompt + json.dumps(meta, ensure_ascii=False) for meta in metas] completions = [] for coro in tqdm.as_completed( [get_completion(prompt) for prompt in prompts], leave=False ): completions.append(await coro) return completions async def main(): fn = Path("data.json") workers = 15 with open("tmp.jsonl", "w") as f: samples = [] for idx, meta in tqdm(enumerate(get_meta(fn))): samples.append(meta) if (idx + 1) % workers == 0: completions = await get_completions(samples) f.write("\n".join(completions)) samples = [] if samples: completions = await get_completions(samples) f.write("\n".join(completions)) if __name__ == "__main__": asyncio.run(main())
[ "\n 你的任务是从湖北省政务文件中提取关键信息,整理出问题及对应的答案,并使用jsonl的输出格式,你可以按照以下步骤进行:\n 1. 编写问题和回答对:从湖北省政务文件中提取关键信息,编写8-12对问题和对应的详细回答,请额外关注办理政务的地点、时间、材料、人员等信息。问题中应携带政务标题,回答应该尽量详细且能够清晰解释相关政务事项。\n 2. 输出为jsonl格式: 将每个问题和回答对整理成一个json对象,并按照jsonl格式输出到文件中。使用\"input\"作为键来表示问题,使用\"output\"作为键来表示回答,不同问题和回答之间使用\n分隔。例如:\n {\"input\": 问题1, “output”: 回答1}\n{\"input\": 问题2, “output”: 回答2}\n 湖北省政务文件是json格式,内容如下:\n ", "input" ]
2024-01-10
fabriceyhc/llm-psych-depth
story_generation~writer_profile.py
from langchain.prompts.chat import ( ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain.prompts.chat import ChatPromptTemplate from langchain.prompts import PromptTemplate from langchain.schema import BaseOutputParser, StrOutputParser from langchain.chains import LLMChain from utils import * writer_profile =\ """ You are a seasoned writer who has won several accolades for your emotionally rich stories. When you write, you delve deep into the human psyche, pulling from the reservoir of universal experiences that every reader, regardless of their background, can connect to. Your writing is renowned for painting vivid emotional landscapes, making readers not just observe but truly feel the world of your characters. Every piece you produce aims to draw readers in, encouraging them to reflect on their own lives and emotions. Your stories are a complex tapestry of relationships, emotions, and conflicts, each more intricate than the last. """ story_prompt =\ """ Now write a 500-word story on the following prompt: {prompt} Only respond with the story. """ class WriterProfilePromptsGenerator: def __init__(self, llm) -> None: self.llm = llm class OutputParser(BaseOutputParser): def parse(self, text: str): return text def generate_prompts(self, prompts): prompts_to_run = [] for prompt_id, prompt in enumerate(prompts): system_profile_prompt = SystemMessagePromptTemplate.from_template(writer_profile) human_message_prompt = HumanMessagePromptTemplate.from_template(story_prompt) chat_prompt = ChatPromptTemplate( messages=[system_profile_prompt, human_message_prompt], output_parser=self.OutputParser(), ) _input = chat_prompt.format_messages(prompt=prompt) prompts_to_run.append({ "prompt_id": prompt_id, "reddit_prompt": prompt, "story_generation_prompt": extract_string_prompt(_input) }) return prompts_to_run def prompt_llm(self, prompts, save_dir, model_name, regen_ids=None, template_type='writer_profile'): save_path = os.path.join(save_dir, model_name, template_type) os.makedirs(save_path, exist_ok=True) chat_prompt = ChatPromptTemplate.from_messages([ ("system", writer_profile), ("human", story_prompt), ]) chain = chat_prompt | self.llm | self.OutputParser() indexed_prompts = [(id, prompt) for id, prompt in enumerate(prompts)] if not regen_ids: indexed_prompts = [(i, prompt) for i, prompt in indexed_prompts if i in regen_ids] for id, prompt in indexed_prompts: max_tries = 3 min_words = 100 num_words, tries = 0, 0 while num_words < min_words and tries < max_tries: output = chain.invoke({'prompt': prompt}) num_words = len(output.split()) if num_words < min_words: tries += 1 print(f"Generated {num_words} words, fewer than {min_words} words. Trying {max_tries-tries} more times") print(id) print("-" * 20) print(output) print("=" * 50) save_info = { "id": id, "model_name": model_name, "story_prompt": prompt, "output": output } filename = f"{save_info['id']}_{first_n_words(save_info['story_prompt'])}_{generate_random_id()}.json" with open(os.path.join(save_path, filename), 'w') as f: json.dump(save_info, f, indent=4)
[ "[]", "[(0, 'PLACEHOLDER')]", "human", "[PLACEHOLDER, PLACEHOLDER]", "\nNow write a 500-word story on the following prompt:\n \n{prompt}\n\nOnly respond with the story.\n", "[('system', PLACEHOLDER), ('human', PLACEHOLDER)]" ]
2024-01-10
fabriceyhc/llm-psych-depth
story_generation~plan_write.py
from langchain.prompts.chat import ( ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain.prompts.chat import ChatPromptTemplate from langchain.schema import BaseOutputParser from langchain.output_parsers import PydanticOutputParser from pydantic import BaseModel, Field, validator from typing import List, Dict from utils import * from loader import * premise =\ """ Premise: {prompt} """ character_prompt =\ """ Task: Based on the premise, describe the names and details of 2-3 major characters. Focus on each character's emotional states and inner thoughts. Only respond with the characters' names and descriptions. """ plan_info =\ """ Premise: {prompt} Character Portraits: {characters} """ story_prompt =\ """ Task: Write a 500-word story based on the premise and character portraits. The story should be emotionally deep and impactful. Only respond with the story. """ class CharactersOutput(BaseModel): character_list: List[str] = Field(description="character list") class PlanWritePromptsGenerator: def __init__(self, llm) -> None: self.llm = llm class OutputParser(BaseOutputParser): def parse(self, text: str): return text def generate_character_prompts(self, prompts): parser = PydanticOutputParser(pydantic_object=CharactersOutput) prompts_to_run = [] for prompt_id, prompt in enumerate(prompts): system_profile_prompt = SystemMessagePromptTemplate.from_template(premise) human_message_prompt = HumanMessagePromptTemplate.from_template(character_prompt) chat_prompt = ChatPromptTemplate( messages=[system_profile_prompt, human_message_prompt], partial_variables={"format_instructions": parser.get_format_instructions()}, template_format='jinja2', output_parser=parser, ) _input = chat_prompt.format_messages(prompt=prompt) prompts_to_run.append({ "id": prompt_id, "premise": prompt, "characters_prompt": extract_string_prompt(_input) }) return prompts_to_run def generate_story_prompts(self, planwrite_df): prompts_to_run = [] for prompt_id, prompt in planwrite_df.iterrows(): system_profile_prompt = SystemMessagePromptTemplate.from_template(plan_info) human_message_prompt = HumanMessagePromptTemplate.from_template(story_prompt) chat_prompt = ChatPromptTemplate( messages=[system_profile_prompt, human_message_prompt], output_parser=self.OutputParser(), ) _input = chat_prompt.format_messages(prompt=prompt['premise'], characters=create_numbered_string(prompt['character_list'])) new_prompt = prompt.to_dict() new_prompt.update({ "story_prompt": extract_string_prompt(_input) }) prompts_to_run.append(new_prompt) return prompts_to_run def prompt_llm(self, prompts, save_dir, model_name, regen_ids=None, template_type='plan_write'): save_path = os.path.join(save_dir, model_name, template_type) os.makedirs(save_path, exist_ok=True) character_chat_prompt = ChatPromptTemplate.from_messages([ ("system", premise), ("human", character_prompt), ]) story_chat_prompt = ChatPromptTemplate.from_messages([ ("system", plan_info), ("human", story_prompt), ]) indexed_prompts = [(id, prompt) for id, prompt in enumerate(prompts)] if not regen_ids: indexed_prompts = [(i, prompt) for i, prompt in indexed_prompts if i in regen_ids] for id, prompt in indexed_prompts: character_chain = character_chat_prompt | self.llm | self.OutputParser() character_output = character_chain.invoke({"prompt": prompt}) story_chain = story_chat_prompt | self.llm | self.OutputParser() max_tries = 3 min_words = 100 num_words, tries = 0, 0 while num_words < min_words and tries < max_tries: output = story_chain.invoke({"prompt": prompt, "characters": character_output}) num_words = len(output.split()) if num_words < min_words: tries += 1 print(f"Generated fewer than {min_words} words. Trying {max_tries-tries} more times") print(id) print("-" * 20) print(output) print("=" * 50) save_info = { "id": id, "model_name": model_name, "story_prompt": prompt, "characters": character_output, "output": output } filename = f"{save_info['id']}_{first_n_words(save_info['story_prompt'])}_{generate_random_id()}.json" with open(os.path.join(save_path, filename), 'w') as f: json.dump(save_info, f, indent=4)
[ "[(0, 'PLACEHOLDER')]", "\nTask: Based on the premise, describe the names and details of 2-3 major characters. Focus on each character's emotional states and inner thoughts.\nOnly respond with the characters' names and descriptions.\n", "human", "format_instructions", "\nTask: Write a 500-word story based on the premise and character portraits. The story should be emotionally deep and impactful.\nOnly respond with the story.\n", "jinja2", "[PLACEHOLDER, PLACEHOLDER]", "[]", "[('system', PLACEHOLDER), ('human', PLACEHOLDER)]" ]
2024-01-10
fabriceyhc/llm-psych-depth
story_generation~bard_llm.py
from typing import Any, List, Mapping, Optional from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from bardapi import Bard, SESSION_HEADERS class BardLLM(LLM): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @property def _llm_type(self) -> str: return "bard" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: bard = Bard(token_from_browser=True) if stop is not None: raise ValueError("stop kwargs are not permitted.") return bard.get_answer(prompt)['content'] if __name__ == "__main__": llm = BardLLM() prompt = \ """ You are a seasoned writer who has won several accolades for your emotionally rich stories. When you write, you delve deep into the human psyche, pulling from the reservoir of universal experiences that every reader, regardless of their background, can connect to. Your writing is renowned for painting vivid emotional landscapes, making readers not just observe but truly feel the world of your characters. Every piece you produce aims to draw readers in, encouraging them to reflect on their own lives and emotions. Your stories are a complex tapestry of relationships, emotions, and conflicts, each more intricate than the last. Now write a 500-word story on the following prompt: A centuries old vampire gets really into video games because playing a character who can walk around in the sun is the closest thing they have to experiencing the day again in centuries. """ output = llm(prompt) print(output)
[ "\nYou are a seasoned writer who has won several accolades for your emotionally rich stories. When you write, you delve deep into the human psyche, pulling from the reservoir of universal experiences that every reader, regardless of their background, can connect to. Your writing is renowned for painting vivid emotional landscapes, making readers not just observe but truly feel the world of your characters. Every piece you produce aims to draw readers in, encouraging them to reflect on their own lives and emotions. Your stories are a complex tapestry of relationships, emotions, and conflicts, each more intricate than the last.\n\nNow write a 500-word story on the following prompt:\n\nA centuries old vampire gets really into video games because playing a character who can walk around in the sun is the closest thing they have to experiencing the day again in centuries.\n" ]
2024-01-10
limoiie/aichains
aichains~chains~arxiv.py
from langchain import OpenAI from langchain.agents import AgentType, initialize_agent, load_tools from aichains.annotators import render_output @render_output def arxiv(question: str, temperature=0.2, verbose=False): """Chat with arxiv.""" llm = OpenAI(temperature=temperature) tools = load_tools(["serpapi", "arxiv"], llm=llm) agent = initialize_agent( tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=verbose ) result = agent.run(question) return result
[]
2024-01-10
huanfengc/odoo-gpt-chatbot
misc~xml_SQL.py
import openai import json import psycopg2 import tiktoken db = 'ogpt2' port = '5432' username = 'postgres' password = 'admin' # ALTER USER postgres WITH PASSWORD 'admin'; openai.api_key = "" conn = psycopg2.connect(database=db, host='localhost', user=username, password=password, port=port) def get_foreign_key_relations(conn, table_name): cursor = conn.cursor() query = """ SELECT conname AS constraint_name, att2.attname AS column_name, cl.relname AS referenced_table, att.attname AS referenced_column FROM pg_constraint AS con JOIN pg_class AS cl ON con.confrelid = cl.oid JOIN pg_attribute AS att ON con.confrelid = att.attrelid AND con.confkey[1] = att.attnum JOIN pg_attribute AS att2 ON con.conrelid = att2.attrelid AND con.conkey[1] = att2.attnum WHERE con.conrelid = (SELECT oid FROM pg_class WHERE relname = %s) AND contype = 'f' """ cursor.execute(query, (table_name,)) relations = cursor.fetchall() cursor.close() return relations def ask_database(conn, query): try: cursor = conn.cursor() cursor.execute(query) query_results = cursor.fetchall() return query_results except Exception as e: error_message = str(e) # Get the error message as a string return error_message def get_table_names(conn): cursor = conn.cursor() cursor.execute("SELECT table_name FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE';") table_names = cursor.fetchall() table_names = [table_name[0] for table_name in table_names] return table_names def get_column_names(conn, table_name): cursor = conn.cursor() cursor.execute("SELECT column_name FROM information_schema.columns WHERE table_name = %s", (table_name,)) column_names = cursor.fetchall() column_names = [column_name[0] for column_name in column_names] return column_names # SELECT # field_description.name AS field_name, # field_description.field_description AS field_description, # field_description.ttype AS field_type, # field_description.relation AS relation, # model.model AS model_name # FROM # ir_model AS model # JOIN # ir_model_fields AS field_description # ON model.id = field_description.model_id # WHERE # model.model = 'sale.order'; def get_model_cols(conn, model): cursor = conn.cursor() cursor.execute(""" SELECT field_description.name AS field_name, field_description.field_description AS field_description, field_description.ttype AS field_type, field_description.relation AS relation, model.model AS model_name FROM ir_model AS model JOIN ir_model_fields AS field_description ON model.id = field_description.model_id WHERE model.model = %s """, (model,)) model_fields = cursor.fetchall() cursor.close() # turn into csv format first_line = "column_name,description,type,related_model\n" # field[3] may be none turn into str for field in model_fields: first_line += field[0] + "," + field[1]["en_US"] + "," + field[2] + "," + str(field[3]) + "\n" return first_line def get_model_fields(conn, list_of_models): database_info = [] for model in list_of_models: table_name = model.replace(".", "_") # column_names = get_column_names(conn, table_name) column_names = get_model_cols(conn, model) database_info.append({"table_name": table_name, "column_names": column_names}) database_info_schema_str = json.dumps(database_info) return database_info_schema_str models = get_table_names(conn) models_str = ",".join(models) encoding = tiktoken.encoding_for_model("gpt-3.5-turbo-0613") num_tokens = len(encoding.encode(models_str)) def read_id(conn, model, id, fields): cursor = conn.cursor() table_name = model.replace(".", "_") fields_str = ",".join(fields) cursor.execute(f"SELECT {fields_str} FROM {table_name} WHERE id = {id}") return cursor.fetchall() functions = [ { "name": "get_model_fields", "description": "Use this function to get additional information (columns/fields) about certain models in odoo. Use this function if you aren't sure about a models columns/fields.", "parameters": { "type": "object", "properties": { "list_of_models": { "type": "array", "description": f""" Getting additional information about certain models to answer the user's question. """, "items": { "type": "string", "description": "The name of the model to get additional information about.", }, } }, "required": ["models"], }, }, { "name": "ask_database", "description": "You must call get_model_fields before using this. Use this function to answer user questions about Odoo. get_model_fields should always be called before using this. Output should be a fully formed SQL query.", "parameters": { "type": "object", "properties": { "query": { "type": "string", "description": f""" SQL query extracting info to answer the user's question. The query should be returned in plain text, not in JSON. """, } }, "required": ["query"], }, }, ] system_prompt = """ ODOOGPT is a tool that allows you to ask questions about Odoo. You are Oopo, a friendly AI assistant. You are here to help you with your Odoo questions and interact with your Odoo database. You should always call get_model_fields before using ask_database. Before I make a CRUD operation on a model I should get additional info about the model and make sure that it indeed exists. You have access to all tables and columns. You should never return technical jargon or SQL to the user. You should never say you don't have access to a table or column. Use a function call to find it. Try to return names instead of id references You have access to all records and information in the database. """ def execute_function_call(message): function_name = message["function_call"]["name"] print("\033[95m" + "ODOOGPT FUNCTION CALL: " + function_name + "\033[0m") args = json.loads(message["function_call"]["arguments"]) print("\033[95m" + "ODOOGPT FUNCTION ARGUMENTS: " + str(args) + "\033[0m") if function_name == "ask_database": query = json.loads(message["function_call"]["arguments"])["query"] results = ask_database(conn, query) elif function_name == "get_model_fields": models = json.loads(message["function_call"]["arguments"])["list_of_models"] results = get_model_fields(conn, models) else: results = f"Error: function {message['function_call']['name']} does not exist" chat_result = { "role": "function", "name": message["function_call"]["name"], "content": str(results), } print("\033[95m" + "ODOOGPT FUNCTION RESULT: " + str(results) + "\033[0m") return chat_result def run_conversation(): total_output_tokens = 0 total_input_tokens = 0 messages = [ {"role": "system", "content": system_prompt} ] while True: # check if previous message isn't a function call if messages[-1]["role"] != "function": prompt = input("Ask Oopo a question (type 'exit' to end the conversation): ") # Check if the user wants to exit if prompt.lower() == "exit": break # Add user message to the messages list messages.append({"role": "user", "content": prompt}) # Call the Chat API response = openai.ChatCompletion.create( model="gpt-3.5-turbo-0613", messages=messages, functions=functions, function_call="auto", ) # Get the response message if response["choices"][0]["message"].get("function_call"): response_message = execute_function_call(response["choices"][0]["message"]) else: response_message = response["choices"][0]["message"] print("\033[1m" + response_message["content"] + "\033[1m") token_count = response['usage']['total_tokens'] total_output_tokens = response['usage']['completion_tokens'] total_input_tokens = response['usage']['prompt_tokens'] input_cost = total_output_tokens / 1000 * 0.0015 output_cost = total_input_tokens / 1000 * 0.002 print("\033[92m" + f"Total cost: ${input_cost + output_cost:.6f} ({token_count})" + "\033[0m") print("\033[92m" + f"Total Input cost: ${input_cost:.6f} ({total_input_tokens})" + "\033[0m") print("\033[92m" + f"Total Output cost: ${output_cost:.6f} ({total_output_tokens})" + "\033[0m") messages.append(response_message) return "Conversation ended." print(run_conversation())
[ "Ask Oopo a question (type 'exit' to end the conversation): ", "\n ODOOGPT is a tool that allows you to ask questions about Odoo.\n \n You are Oopo, a friendly AI assistant. \n \n You are here to help you with your Odoo questions and interact with your Odoo database. \n \n You should always call get_model_fields before using ask_database. \n\n Before I make a CRUD operation on a model I should get additional info about the model and make sure that it indeed exists. \n You have access to all tables and columns.\n\n You should never return technical jargon or SQL to the user. \n You should never say you don't have access to a table or column. Use a function call to find it.\n Try to return names instead of id references\n\n You have access to all records and information in the database. \n" ]
2024-01-10
huanfengc/odoo-gpt-chatbot
mail_oopo~models~mail_bot.py
import openai import tiktoken import json import re from odoo import models, fields, api # Function definitions functions = [ { "name": "read_record", "description": "Read a record or records based on the given fields and search domains", "parameters": { "type": "object", "properties": { "model": { "type": "string", "description": "The name of the model to be read" }, "field": { "type": "array", "items": { "type": "string" }, "description": "Array of field names to be read from the model, " }, "search_domains": { "type": "array", "items": { "type": "array", "items": { "type": "string", "description": """Each item is a must be formatted as a tuple, e.g. ("name", "=", "Mark Cheng")""" } }, "description": """Odoo search domains to filter the data. Each domain should be an tuple of strings. \ e.g. [("name", "=", "Mark Cheng"), ("phone", "=", "123")] denotes a domain containing two conditions.""", "default": [] }, "limit": { "type": "integer", "description": "Limit the number of records to be read", }, "order": { "type": "string", "description": "Order the records by the given field - example name asc", } }, "required": ["model", "field"] } }, { "name": "create_record", "description": "Create a new record in a model based on the given fields.", "parameters": { "type": "object", "properties": { "model": { "type": "string", "description": "The name of the model to be read" }, "values": { "type": "array", "items": { "type": "string", "description": """A key-value mapping, both key and value must be strings, \ e.g. {"name": "Hello"}, {"partner_ids": "1", "name": "Hello"}""" }, "description": """Array of field names to be created as a new record. \ Each field must be a mapping, e.g. [{"name": "Mark Cheng", "phone": 9993336666}]""" }, }, "required": ["model", "values"] } }, { "name": "update_record", "description": """Update an existing record in a model. \ The record to update is identified based on the fields and search domain. \ The field to update is extracted from the user message.""", "parameters": { "type": "object", "properties": { "model": { "type": "string", "description": "The name of the model to be read" }, "field": { "type": "array", "items": { "type": "string" }, "description": "Array of field names in order for searching for the record to update." }, "field_to_update": { "type": "array", "items": { "type": "string" }, "description": """Array of field names to be updated. \ Each field must be a key-value pair, e.g. [{"phone": "9993336666"}]""" }, "search_domains": { "type": "array", "items": { "type": "array", "items": { "type": "string" } }, "description": "Odoo search domains to filter the data. Each domain should be an array of strings", "default": [] }, "limit": { "type": "integer", "description": "Limit the number of records to be read", } }, "required": ["model", "field", "field_to_update"] } }, ] # For the summary command in the chatter this array serves to provide # the 'important' information about a related field so we aren't prompting every field of every relational field # TODO make .name ^ .display_name default for all relational fields then add addons here # TODO add support for depth2 relational fields when specified e.g res.partner.user_id.name relational_bindings = { "res.partner": ["name"], "res.users": ["name"], "sale.order.line": ["name", "qty_to_deliver", "price_unit"], "account.move.line": ["name", "quantity", "price_unit", "price_subtotal"], "stock.move": ["display_name","product_id","product_uom_qty", "forecast_availability", "quantity_done"], "product.product": ["name", "lst_price", "standard_price", "detailed_type"], "mrp.bom.line": ["display_name", "product_qty"], "account.payment.term": ["name", "note"] } summary_prompt = """ You are a friendly AI Odoo Assistant. Use the provided record information below to process the user query. Your response must be professional and concise. The record information starts with a record description and model_name(record_id), and is followed by the fields information, i.e. field description [field_name] = field_value. <Record information starts> {prompt} <Record information ends> Selection fields return tuples with (technical_name, display_name) Please keep the summaries concise and professional. Only include the most relevant information. Summarize it in a business context - not a technical one. Don't just list the fields, rather summarize the record in a business context as a whole """ class MailBot(models.AbstractModel): _inherit = "mail.bot" first_msg = "Hi, I'm Oopo, an AI assistant. Feel free to ask my any questions." def _apply_logic(self, record, values, command=None): odoobot_id = self.env["ir.model.data"]._xmlid_to_res_id("base.partner_root") if len(record) != 1 or values.get("author_id") == odoobot_id or values.get("message_type") != "comment" and not command: return if self._is_bot_pinged(values) or self._is_bot_in_private_channel(record): body = values.get("body", "").replace(u"\xa0", u" ").strip().lower().strip(".!") answer, message_type = self._get_answer(record, body, values, command) if answer: subtype_id = self.env["ir.model.data"]._xmlid_to_res_id("mail.mt_comment") record.with_context(mail_create_nosubscribe=True).sudo().message_post(body=answer, author_id=odoobot_id, message_type=message_type, subtype_id=subtype_id) def _get_answer(self, channel, body, values, command): odoobot_id = self.env["ir.model.data"]._xmlid_to_res_id("base.partner_root") api_key = self.env["ir.config_parameter"].sudo().get_param("mail_oopo.openapi_api_key") if not api_key: return "Please set the OpenAI API key in the settings under integrations", "comment" openai.api_key = api_key fail_moderation_check = self._get_chat_completion(messages=body) if isinstance(fail_moderation_check, str): return fail_moderation_check, "comment" if fail_moderation_check: return "[Request Decline] The request violates OpenAI usage policy, please try another request.", "comment" if not isinstance(channel, type(self.env["mail.channel"])): response = self._process_query_in_chatter(channel, body) return response msgs = self._get_relevant_chat_history(channel) gpt_arr = self._build_chatgpt_request(msgs) response = self._pre_prompt(gpt_arr) if isinstance(response, str): return response, "comment" is_function_call, function_call_fail, response = True, False, None loop_count, timeout = 0, 20 functional_msg_saved = [] while is_function_call: if loop_count >= timeout: break response = self._get_chat_completion(messages=gpt_arr, callable_functions=functions, temperature=0.5) if isinstance(response, str): return response, "comment" gpt_arr.append(dict(response["choices"][0]["message"])) is_function_call = response["choices"][0]["message"].get("function_call") is not None if not is_function_call: break print("\033[96m" + "CALL #: " + str(loop_count+1) + "\033[0m") function_response, function_call_fail, error_message, model_name = self._execute_function_call(response["choices"][0]["message"]) gpt_arr.append(function_response) if function_call_fail: user_prompt = self._tailor_user_prompt(body, error_message, model_name) gpt_arr.append({"role": "user", "content": user_prompt}) if not function_call_fail: functional_msg_saved.append((dict(response["choices"][0]["message"]), "bot_function_request")) functional_msg_saved.append((function_response, "bot_function")) loop_count +=1 final_response = response["choices"][0]["message"]["content"] if not function_call_fail and final_response: for message, message_type in functional_msg_saved: self._create_functional_message(channel, message, message_type) if not final_response: final_response = "I am sorry that I failed to process your query, please provide more details/instructions and retry!" final_response = self._def_transform_links(final_response) return final_response, "comment" def _get_field_info(self, field, target_field, field_val, relational_bindings): field_string = target_field['string'] field_info = None if target_field["type"] == "boolean": field_info = f"'{field_string}' [{field}] = {field_val}\n" elif field_val: field_info = None if target_field["type"] in ("text", "integer", "float", "char", "datetime"): field_info = f"'{field_string}' [{field}] = {field_val}\n" elif target_field["type"] == "selection": selected_item = next(item for item in target_field['selection'] if item[0] == field_val) field_info = f"'{field_string}' [{field}] = {selected_item}\n" elif target_field["type"] == "many2one" and target_field["relation"] in relational_bindings: deep_field_metadata = field_val.fields_get() relevant_field_names = relational_bindings[target_field["relation"]] related_values = [f"{deep_field_metadata[rel_field]['string']} = {field_val[rel_field]}" for rel_field in relevant_field_names] field_info = f"'{field_string}' [{field}] = {field_string} with the following values: {', '.join(related_values)}\n" elif target_field["type"] == "one2many" and target_field["relation"] in relational_bindings: field_info = f"'{field_string}' [{field}] = {field_string} with the following values:\n" for record in field_val: deep_field_metadata = record.fields_get() relevant_field_names = relational_bindings[target_field["relation"]] related_values = [f"{deep_field_metadata[rel_field]['string']} = {record[rel_field]}" for rel_field in relevant_field_names] field_info += ", ".join(related_values) + "\n" else: field_info = f"'{field_string}' [{field}] = {field_val}\n" return field_info def _construct_summary_prompt(self, channel, fields_metadata, relational_bindings): prompt = f"Record Information: {channel._description} {getattr(channel, 'name', channel.display_name)} [{str(channel)}]\n" prompt_list = [prompt] ignored_fields = ("Followers", "Followers (Partners)", "Messages", "Website Messages") for field, target_field in fields_metadata.items(): if target_field["string"] in ignored_fields: continue field_val = channel[field] field_info = self._get_field_info(field, target_field, field_val, relational_bindings) if field_info: prompt_list.append(field_info) prompt = "".join(prompt_list) constructed_prompt = summary_prompt.format(prompt=prompt) return constructed_prompt def _process_query_in_chatter(self, channel, body): fields_metadata = channel.fields_get() constructed_prompt = self._construct_summary_prompt(channel, fields_metadata, relational_bindings) msgs = [{'role': 'system', 'content': constructed_prompt}] response = self._get_chat_completion(messages=msgs, model="gpt-3.5-turbo-0613") return response["choices"][0]["message"]["content"], "notification" def _build_chatgpt_request(self, msgs): odoobot_id = self.env["ir.model.data"]._xmlid_to_res_id("base.partner_root") chatgpt_msgs_arr = [{"role":"system", "content": self._select_system_message()}, {"role": "assistant", "content": self.first_msg}] for message in msgs[1:]: body = str(message["body"]).replace("<p>", "").replace("</p>", "") if message["author"]["id"] == odoobot_id: if body != self.first_msg and message["message_type"] == "comment": chatgpt_msgs_arr.append({"role": "assistant", "content": body}) elif message["message_type"] == "bot_function": chatgpt_msgs_arr.append(message["function_content"]) elif message["message_type"] == "bot_function_request": chatgpt_msgs_arr.append(message["function_content"]) else: chatgpt_msgs_arr.append({"role": "user", "content": body}) return chatgpt_msgs_arr def _get_relevant_chat_history(self, channel): msgs = channel._channel_fetch_message(limit=None) msgs = [msg for msg in msgs if msg["body"] != "" or msg.get("message_type") == "bot_function" or msg.get("message_type") == "bot_function_request"] msgs.reverse() return msgs def _create_functional_message(self, channel, content, message_type): odoobot_id = self.env["ir.model.data"]._xmlid_to_res_id("base.partner_root") subtype_id = self.env["ir.model.data"]._xmlid_to_res_id("mail.mt_comment") vals = { "body": "", "author_id": odoobot_id, "message_type": message_type, "subtype_id": subtype_id, "model": channel._name, "res_id": channel.id, "function_content": content } return channel._message_create(vals) def _is_bot_pinged(self, values): odoobot_id = self.env["ir.model.data"]._xmlid_to_res_id("base.partner_root") return odoobot_id in values.get("partner_ids", []) def _is_bot_in_private_channel(self, record): odoobot_id = self.env["ir.model.data"]._xmlid_to_res_id("base.partner_root") if record._name == "mail.channel" and record.channel_type == "chat": return odoobot_id in record.with_context(active_test=False).channel_partner_ids.ids return False def change_model(self, gptmodel): self.env.user.openai_model = gptmodel return True def get_model(self): return self.env.user.openai_model def _execute_function_call(self, message): function_name = message["function_call"]["name"] print("\033[95m" + "ODOOGPT FUNCTION CALL: " + function_name + "\033[0m") kwargs = None function_call_fail, model_name, error_message = False, None, None try: kwargs = json.loads(message["function_call"]["arguments"]) # kwargs = ast.literal_eval(message["function_call"]["arguments"]) except Exception as e: function_call_fail, error_message = True, e chat_result = self._construct_function_response(function_name, "JSON Error:" + str(e)) # print as red print("\033[91m" + "ODOOGPT FUNCTION ERROR: " + str(e) + "\033[0m") print("\033[91m" + "ODOOGPT FUNCTION ERROR: " + message["function_call"]["arguments"] + "\033[0m") return chat_result, function_call_fail, error_message, model_name print("\033[95m" + "ODOOGPT FUNCTION ARGUMENTS: " + str(kwargs) + "\033[0m") chat_result = None model_name = kwargs["model"] savepoint = self.env.cr.savepoint(flush=True) try: function_to_call = self._get_avalaible_function_dict()[function_name] result = function_to_call(**kwargs) print("\033[95m" + "ODOOGPT FUNCTION RESULT: " + str(result) + "\033[0m") chat_result = self._construct_function_response(function_name, str(result)) except Exception as e: savepoint.rollback() function_call_fail, error_message = True, e print("\033[91m" + "ODOOGPT FUNCTION ERROR: " + str(e) + "\033[0m") chat_result = self._construct_function_response(function_name, str(e)) return chat_result, function_call_fail, error_message, model_name def _construct_function_response(self, function_name, result): return {"role": "function", "name": function_name, "content": result} def _fix_errorneous_domain(self, search_domains): if len(search_domains) == 1: if len(search_domains[0]) == 3: if not all(isinstance(domain, (str, int)) for domain in search_domains[0]): search_domains = search_domains[0] else: if any(isinstance(operator, str) for operator in search_domains[0]): search_domains = search_domains[0] return search_domains def _def_transform_links(self, input_string): # Define the pattern to match the bracket-parenthesis links pattern = r'\[(.*?)\]\(#&data-oe-model=(.*?)&data-oe-id=(.*?)\)' # Function to replace the matches with the desired format def replace_link(match): link_text, model_name, id_number = match.groups() return f"<a href='#' data-oe-model='{model_name}' data-oe-id='{id_number}'>{link_text}</a>" # Use re.sub to replace the matches with the function result transformed_string = re.sub(pattern, replace_link, input_string) return transformed_string ### START ORM METHODS ### def _read_record(self, model, field, search_domains=None, limit=None, order=None): """Search and read records in the model based on search domains.""" available_fields = self._get_fields_for_model(model) # Try to always include 'name' in the fields to be read, if the field 'name' exists in the model if 'name' not in field: field.append('name') if 'name' in available_fields else field.append('display_name') search_domains = self._fix_errorneous_domain(search_domains) if search_domains else [] search_domains = [tuple(domain) if isinstance(domain, list) else domain for domain in search_domains] if search_domains else [] return self.env[model].search_read(domain=search_domains, fields=field, limit=limit, order=order) def _create_record(self, model, values): """Create new records in the model with fields filled by given values.""" return self.env[model].create(values) def _update_record(self, model, field, field_to_update, search_domains=None, limit=None): """Update existing records in the model with new values for the fields.""" record_id = self._read_record(model, field, search_domains, limit)[0]["id"] record_to_update = self.env[model].browse([record_id]) return record_to_update.write(vals=field_to_update[0]) def _get_fields_for_model(self, model_name): try: available_fields = set(self.env[model_name].fields_get().keys()) return available_fields except: return set() def _get_avalaible_function_dict(self): """Available funcitons that OpenAI API funciton call has access to.""" avalaible_function_dict = { "read_record": self._read_record, "create_record": self._create_record, "update_record": self._update_record, } return avalaible_function_dict def _get_chat_completion(self, messages, callable_functions=None, temperature=0.1, model=None): """Get completion for prompt via ChatCompletion model of OpenAI API.``messages`` should be a list of message. If ``messages`` is a string, i.e. single user prompt, perform moderation check.""" if model is None: model = self.get_model() try: if isinstance(messages, str): response = openai.Moderation.create(input=messages) response = response["results"][0]["flagged"] else: params = { "model": model, "messages": messages, "temperature": temperature, "request_timeout": 60, # This parameter helps raise Timeout error, but is not officially documented. } if callable_functions is not None: params["functions"] = callable_functions params["function_call"] = "auto" response = openai.ChatCompletion.create(**params) except openai.error.AuthenticationError as e: return f"""[OpenAI API Key Error] Your OpenAI API key is invalid, expired or revoked. \ Please provide a valid API key in Settings/General Settings/Integrations. See details: {e}""" except openai.error.ServiceUnavailableError as e: return f"""[OpenAI Server Error] There is an ongoing outage of OpenAI server. \ Please retry your query after a brief wait. See details: {e}""" except openai.error.APIError as e: return f"""[OpenAI Server Error] There is an ongoing outage or temporary error of OpenAI server. \ Please retry your query after a brief wait. See details: {e}""" except openai.error.APIConnectionError as e: return f"""[OpenAI Connection Error] Connection fails, please check your network settings, proxy configuration, \ SSL certificates or firewall rules. See details: {e}""" except openai.error.RateLimitError as e: return f"""[OpenAI Rate Limit Error] The rate limit of OpenAI API request has been reached. \ Please reduce the frequency of sending queries. See details: {e}""" except openai.error.Timeout as e: return f"""[OpenAI Request Timeout] Query timed out, please retry your query after a brief wait. \ See details: {e}""" except openai.error.InvalidRequestError as e: # ``InvalidRequestError`` should not be displayed to the user and has to be handled only on developer side. error_message = str(e) if "reduce" in error_message: return f"""[Token Limit Warning] {error_message} Or choose a GPT model with higher token limit to afford longer context. \ Please contact Odoo Inc for suggestions. (To release token usage, please enter "clear" to clear current message history.)""" return f"""[Query Fails] Your query can not be processed at current version of OdooBot, \ please contact Odoo Inc to upgrade OdooBot.""" if not isinstance(messages, str): print(f"\033[92m Total Conversation Tokens: {str(response['usage']['total_tokens'])} \033[0m") return response def _select_system_message(self): """System message sets up the tone of GPT, basic context of chat and requirements that GPT has to follow. Note: It is not guaranteed that GPT would strictly follow the requirements.""" prompts = { "base_system_message": """You are Oopo a friendly AI Assistant, users might ask questions, or ask to perform any actions. \ You have full access to the current Odoo environment""", "inline_link_instruction": """You can add links into the text too by adding an <a> tag in this format: <a href='#' data-oe-model='model name' data-oe-id='id number'>test</a> e.g., <a href='#' data-oe-model='sale.order' data-oe-id='7'>My sale order</a> You should always use links to reference records in the system, as it will make it easier for me to understand what you are referring to. Anything that is returned from the `read_record` function should be linked to: <a href='#' data-oe-model='model name' data-oe-id='id number'>Link text</a> For instance, if the `read_record` function returns a sale order with ID 7, create the link like this: <a href='#' data-oe-model='sale.order' data-oe-id='7'>Sale Order 7</a> By consistently including links in the responses, we can maintain a more structured and interactive conversation. Please avoid using the square bracket format like this [Product 45](#&data-oe-model=product.product&data-oe-id=45) for links, as it is not the correct format. Always use the "<a>" tag as shown in the examples above to create links.""", "relation_fields_prompt": """provides several functions to interact with the database, including querying records, creating new records, and updating existing records. To query records, you can use the `read_record` function, which retrieves specific fields from the given model. For example, to get the most recent three sale orders, you can use the `read_record` function with the appropriate arguments (`model`, `field`, `order`, and `limit`) as shown below: ``` ODOOGPT FUNCTION CALL: read_record ODOOGPT FUNCTION ARGUMENTS: {'model': 'sale.order', 'field': ['name', 'date_order'], 'order': 'date_order desc', 'limit': 3} ``` To create a new record, you can use the `create_record` function. For instance, to create a new customer named "Diego," you can use the `create_record` function with the desired `model` and `values` as shown below: ``` ODOOGPT FUNCTION CALL: create_record ODOOGPT FUNCTION ARGUMENTS: {'model': 'res.partner', 'values': [{'name': 'Diego'}]} ``` To update an existing record, you can use the `update_record` function. For example, if you want to update the phone number and email of the customer named "Diego" to "99999999" and "[email protected]" respectively, you can use the `update_record` function with the appropriate arguments (`model`, `field`, `field_to_update`, `search_domains`, and `limit`) as shown below: ``` ODOOGPT FUNCTION CALL: update_record ODOOGPT FUNCTION ARGUMENTS: {'model': 'res.partner', 'field': ['name'], 'field_to_update': [{'phone': '99999999', 'email': '[email protected]'}], 'search_domains': [['name', '=', 'Diego']], 'limit': 1} ``` One important concept to understand is the usage of relational fields. In some cases, you might need to reference the ID of a record when creating or updating another record with a relationship. For example, to create a sale order for a customer, you need to pass the customer's ID as the value for the `partner_id` field in the `sale.order` model. When you are unsure about the ID of a record, you can perform a search using the `read_record` function with appropriate search filters. For instance, if you want to find the ID of a product with a name containing "cabinet," you can use the `read_record` function with the search domain `[['name', '=ilike', '%cabinet%']]` as shown below: ``` ODOOGPT FUNCTION CALL: read_record ODOOGPT FUNCTION ARGUMENTS: {'model': 'product.product', 'field': ['id'], 'search_domains': [['name', '=ilike', '%cabinet%']], 'limit': 1} ``` Remember, the IDs returned from previous function calls can be used as arguments in subsequent function calls to establish relationships between records.""", "search_domains": """Domain criteria can be combined using 3 logical operators than can be added between tuples: '&' (logical AND, default) '|' (logical OR) '!' (logical NOT) These are prefix operators and the arity of the '&' and '|' operator is 2, while the arity of the '!' is just 1. Be very careful about this when you combine them the first time. Here is an example of searching for Partners named ABC from Belgium and Germany whose language is not english :: [('name','=','ABC'),'!',('language.code','=','en_US'),'|', ('country_id.code','=','be'),('country_id.code','=','de')] The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is:: [(name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany))] For example if I ask, are you familiar with product x,y,z And you need to get the product ids of x,y,z You can use the search domain like this: search_domains: ["|", "|", ['name', 'ilike', '%x%'], ['name', 'ilike', '%y%'], ['name', 'ilike', '%z%']] or if you just wanted X or Y, you could do: search_domains: ["|", ['name', 'ilike', '%x%'], ['name', 'ilike', '%y%']] """ } encoding = tiktoken.encoding_for_model("gpt-3.5-turbo") master_prompt = "" for prompt_module,prompt in prompts.items(): print(f"\033[92m Loaded Prompt Module: {prompt_module} - Tokens: {len(encoding.encode(prompt))}\033[0m") master_prompt += prompt # print tokens of master print(f"\033[92m Loaded Master Prompt - Tokens: {len(encoding.encode(master_prompt))}\033[0m") return master_prompt def _get_delimiter(self): """Delimiter helps to prevent prompt injections from users, and tailor a use prompt with internal helper prompts.""" return "####" def _tailor_user_prompt(self, user_prompt, error_message=None, model_name=None): """Encapsulate a user prompt with internal helper prompts, for error handling or requirement enforcement.""" if error_message: if isinstance(error_message, KeyError): error_type = "model" elif isinstance(error_message, ValueError) and model_name: error_type = "field" elif isinstance(error_message, TypeError): error_type = "type" else: error_type = "else" helper_prompt = self._get_user_prompt_helper((error_type, model_name)) user_prompt = " ".join([helper_prompt, user_prompt]) return user_prompt def _get_user_prompt_helper(self, helper_type): """Inject error message into the user prompt to help GPT self-correct and retry the failed user prompt. Note: the data type of helper_type is discussable, e.g. a tuple (key_word, model_name)""" error_type, model_name = helper_type if error_type == "model": model_prefix = model_name.split(".")[0] model_list = str([m["model"] for m in self.env["ir.model"].search_read([]) if model_prefix == m["model"].split(".")[0]]) helper_prompt = f"""The model {model_name} is invalid, you are required to only use the model defined in Odoo, \ the valid model names are listed as follows: {model_list}.""" elif error_type == "field": available_fields = str(list(self.env[model_name].fields_get().keys())) helper_prompt = f"""In {model_name} model of Odoo, the defined field names are listed as follows: {available_fields}. \ You are mandatory to use defined field names only.""" elif error_type == "type": helper_prompt = """You are mandatory to use the correct value type of the field. If it is a relational field, \ e.g. partner_id, res_model_id, you must perform a read operation first to find the corrrect id.""" else: helper_prompt = """ Please correct the error you made based on the error message shown in the last function response. If it was a JSON error, you must generate in correct JSON schema next time. If you used "(" and ")", change it to "[" and "]" to avoid error. If it violate not-null constraint, it means that field is required, and you must use the required field. If the required field is some id, you must perform a read operation to find the correct id. """ required_instruction = "Based on aforementioned information, please response the following user query again:" return " ".join([helper_prompt, required_instruction]) def _pre_prompt(self, gpt_arr): """Apply Chain of Thought (CoT) to help GPT decompose a user query into basic CRUD operations.""" user_prompt = gpt_arr[-1:] gpt_arr.append({"role":"user", "content": """ Instructions while running a query: search domain in read_record() must not have duplicate fields. While trying to get the id of a single record, every read_record() call must correspond to a single record eg: {'model': 'res.partner', 'field': ['id'], 'search_domains': [['name', '=', 'Odoo Wheel'],['name', '=', 'Odoo Frame']], 'limit': 1} is wrong, name is duplicated. Instead it should be {'model': 'res.partner', 'field': ['id'], 'search_domains': [['name', '=', 'Odoo Wheel']], 'limit': 1} followed by {'model': 'res.partner', 'field': ['id'], 'search_domains': [['name', '=', 'Odoo Frame']], 'limit': 1} Always try to read only a single record at once. If the user request doesn't require data operations - do not return anything - otherwise state what CRUD operations are required for the above(only give in read,create, update)? Which models are required(only give technical odoo model names)? Summarize within 100 words. Perform these CRUD operations"""}) response = self._get_chat_completion(messages=gpt_arr, callable_functions=functions, temperature=0.5) if not isinstance(response, str): gpt_arr.append(dict(response["choices"][0]["message"])) gpt_arr += user_prompt return response
[ "res.partner", "\n You are a friendly AI Odoo Assistant.\n\n Use the provided record information below to process the user query. \n Your response must be professional and concise.\n\n The record information starts with a record description and model_name(record_id),\n and is followed by the fields information, i.e. field description [field_name] = field_value.\n\n <Record information starts>\n {prompt}\n <Record information ends>\n\n Selection fields return tuples with (technical_name, display_name)\n\n Please keep the summaries concise and professional. Only include the most relevant information. Summarize it in a business context - not a technical one.\n\n Don't just list the fields, rather summarize the record in a business context as a whole\n ", "Odoo Frame", "search_domains", "]], ", "name", "You are mandatory to use the correct value type of the field. If it is a relational field, e.g. partner_id, res_model_id, you must perform a read operation first to find the corrrect id.", "{'base_system_message': 'You are Oopo a friendly AI Assistant, users might ask questions, or ask to perform any actions. \\\\ You have full access to the current Odoo environment', 'inline_link_instruction': 'You can add links into the text too by adding an <a> tag in this format:\\n <a href=\\'#\\' data-oe-model=\\'model name\\' data-oe-id=\\'id number\\'>test</a>\\n\\n e.g., <a href=\\'#\\' data-oe-model=\\'sale.order\\' data-oe-id=\\'7\\'>My sale order</a>\\n\\n You should always use links to reference records in the system, as it will make it easier for me to understand what you are referring to.\\n\\n Anything that is returned from the `read_record` function should be linked to:\\n <a href=\\'#\\' data-oe-model=\\'model name\\' data-oe-id=\\'id number\\'>Link text</a>\\n\\n For instance, if the `read_record` function returns a sale order with ID 7, create the link like this:\\n <a href=\\'#\\' data-oe-model=\\'sale.order\\' data-oe-id=\\'7\\'>Sale Order 7</a>\\n\\n By consistently including links in the responses, we can maintain a more structured and interactive conversation.\\n \\n Please avoid using the square bracket format like this [Product 45](#&data-oe-model=product.product&data-oe-id=45) for links, as it is not the correct format. Always use the \"<a>\" tag as shown in the examples above to create links.', 'relation_fields_prompt': 'provides several functions to interact with the database, including querying records, creating new records, and updating existing records.\\n\\n To query records, you can use the `read_record` function, which retrieves specific fields from the given model. For example, to get the most recent three sale orders, you can use the `read_record` function with the appropriate arguments (`model`, `field`, `order`, and `limit`) as shown below:\\n\\n ```\\n ODOOGPT FUNCTION CALL: read_record\\n ODOOGPT FUNCTION ARGUMENTS: {\\'model\\': \\'sale.order\\', \\'field\\': [\\'name\\', \\'date_order\\'], \\'order\\': \\'date_order desc\\', \\'limit\\': 3}\\n ```\\n\\n To create a new record, you can use the `create_record` function. For instance, to create a new customer named \"Diego,\" you can use the `create_record` function with the desired `model` and `values` as shown below:\\n\\n ```\\n ODOOGPT FUNCTION CALL: create_record\\n ODOOGPT FUNCTION ARGUMENTS: {\\'model\\': \\'res.partner\\', \\'values\\': [{\\'name\\': \\'Diego\\'}]}\\n ```\\n\\n To update an existing record, you can use the `update_record` function. For example, if you want to update the phone number and email of the customer named \"Diego\" to \"99999999\" and \"[email protected]\" respectively, you can use the `update_record` function with the appropriate arguments (`model`, `field`, `field_to_update`, `search_domains`, and `limit`) as shown below:\\n\\n ```\\n ODOOGPT FUNCTION CALL: update_record\\n ODOOGPT FUNCTION ARGUMENTS: {\\'model\\': \\'res.partner\\', \\'field\\': [\\'name\\'], \\'field_to_update\\': [{\\'phone\\': \\'99999999\\', \\'email\\': \\'[email protected]\\'}], \\'search_domains\\': [[\\'name\\', \\'=\\', \\'Diego\\']], \\'limit\\': 1}\\n ```\\n\\n One important concept to understand is the usage of relational fields. In some cases, you might need to reference the ID of a record when creating or updating another record with a relationship. For example, to create a sale order for a customer, you need to pass the customer\\'s ID as the value for the `partner_id` field in the `sale.order` model.\\n\\n When you are unsure about the ID of a record, you can perform a search using the `read_record` function with appropriate search filters. For instance, if you want to find the ID of a product with a name containing \"cabinet,\" you can use the `read_record` function with the search domain `[[\\'name\\', \\'=ilike\\', \\'%cabinet%\\']]` as shown below:\\n\\n ```\\n ODOOGPT FUNCTION CALL: read_record\\n ODOOGPT FUNCTION ARGUMENTS: {\\'model\\': \\'product.product\\', \\'field\\': [\\'id\\'], \\'search_domains\\': [[\\'name\\', \\'=ilike\\', \\'%cabinet%\\']], \\'limit\\': 1}\\n ```\\n\\n Remember, the IDs returned from previous function calls can be used as arguments in subsequent function calls to establish relationships between records.', 'search_domains': 'Domain criteria can be combined using 3 logical operators than can be added between tuples:\\n\\n \\'&\\' (logical AND, default)\\n \\'|\\' (logical OR)\\n \\'!\\' (logical NOT)\\n These are prefix operators and the arity of the \\'&\\' and \\'|\\' operator is 2, while the arity of the \\'!\\' is just 1. Be very careful about this when you combine them the first time.\\n\\n Here is an example of searching for Partners named ABC from Belgium and Germany whose language is not english ::\\n\\n [(\\'name\\',\\'=\\',\\'ABC\\'),\\'!\\',(\\'language.code\\',\\'=\\',\\'en_US\\'),\\'|\\',\\n (\\'country_id.code\\',\\'=\\',\\'be\\'),(\\'country_id.code\\',\\'=\\',\\'de\\')]\\n The \\'&\\' is omitted as it is the default, and of course we could have used \\'!=\\' for the language, but what this domain really represents is::\\n\\n [(name is \\'ABC\\' AND (language is NOT english) AND (country is Belgium OR Germany))]\\n\\n For example if I ask, are you familiar with product x,y,z \\n\\n And you need to get the product ids of x,y,z\\n\\n You can use the search domain like this:\\n\\n search_domains: [\"|\", \"|\", [\\'name\\', \\'ilike\\', \\'%x%\\'], [\\'name\\', \\'ilike\\', \\'%y%\\'], [\\'name\\', \\'ilike\\', \\'%z%\\']]\\n\\n or if you just wanted X or Y, you could do:\\n\\n search_domains: [\"|\", [\\'name\\', \\'ilike\\', \\'%x%\\'], [\\'name\\', \\'ilike\\', \\'%y%\\']]\\n \\n '}", "\n Please correct the error you made based on the error message shown in the last function response.\n\n If it was a JSON error, you must generate in correct JSON schema next time. \n If you used \"(\" and \")\", change it to \"[\" and \"]\" to avoid error.\n\n If it violate not-null constraint, it means that field is required, and you must use the required field.\n If the required field is some id, you must perform a read operation to find the correct id. \n ", "The model PLACEHOLDER is invalid, you are required to only use the model defined in Odoo, the valid model names are listed as follows: PLACEHOLDER.", " ", "Odoo Wheel", "In PLACEHOLDER model of Odoo, the defined field names are listed as follows: PLACEHOLDER. You are mandatory to use defined field names only.", ", " ]
2024-01-10
huanfengc/odoo-gpt-chatbot
misc~xml_ORM.py
import xmlrpc.client import openai import json url = 'http://localhost:8069/' db = 'ogpt' username = 'admin' password = 'admin' common = xmlrpc.client.ServerProxy('{}/xmlrpc/2/common'.format(url)) models = xmlrpc.client.ServerProxy('{}/xmlrpc/2/object'.format(url)) uid = common.authenticate(db, username, password, {}) def read_model(model, fields, search_domains=None, limit=None, order=None): search_param = {'fields': fields} if limit: search_param['limit'] = limit if order: search_param['order'] = order if search_domains: return models.execute_kw(db, uid, password, model, 'search_read', [search_domains], search_param) else: return models.execute_kw(db, uid, password, model, 'search_read', [[]], search_param) partners = read_model('res.partner', ['name']) openai.api_key = "" functions = [{ "name": "read_model", "description": "Read a model based on the given fields and search domains", "parameters": { "type": "object", "properties": { "model": { "type": "string", "description": "The name of the model to be read" }, "fields": { "type": "array", "items": { "type": "string" }, "description": "Array of field names to be read from the model" }, "search_domains": { "type": "array", "items": { "type": "array", "items": { "type": "string" } }, "description": "Odoo search domains to filter the data. Each domain should be an array of strings", "default": [] }, "limit": { "type": "integer", "description": "Limit the number of records to be read", }, "order": { "type": "string", "description": "Order the records by the given field - example 'name asc'", } }, "required": ["model", "fields"] } } ] def run_conversation(): # Step 1: send the conversation and available functions to GPT # prompt terminal for str prompt = input("Ask Odoo GPT a question: ") messages = [{"role": "user", "content": prompt}] response = openai.ChatCompletion.create( model="gpt-3.5-turbo-0613", messages=messages, functions=functions, function_call="auto", # auto is default, but we'll be explicit ) response_message = response["choices"][0]["message"] # Step 2: check if GPT wanted to call a function if response_message.get("function_call"): # Step 3: call the function # Note: the JSON response may not always be valid; be sure to handle errors available_functions = { "read_model": read_model, } # only one function in this example, but you can have multiple # print("ODOOGPT FUNCTION CALL: ", response_message["function_call"]) # pretty print in pink in terminal print("\033[95m" + "ODOOGPT FUNCTION CALL: " + response_message["function_call"]["name"] + "\033[0m") args = json.loads(response_message["function_call"]["arguments"]) print("\033[95m" + "ODOOGPT FUNCTION ARGUMENTS: " + str(args) + "\033[0m") # print(json.dumps(response_message["function_call"])) # parse all arguments and pretty print # print("\033[95m" + "ODOOGPT FUNCTION RESPONSE: " + "\033[0m") function_name = response_message["function_call"]["name"] fuction_to_call = available_functions[function_name] function_args = json.loads(response_message["function_call"]["arguments"]) function_response = str(fuction_to_call( model=function_args["model"], fields=function_args["fields"], search_domains=function_args.get("search_domains", None), limit=function_args.get("limit", None), order=function_args.get("order", None), )) # Step 4: send the info on the function call and function response to GPT messages.append(response_message) # extend conversation with assistant's reply messages.append( { "role": "function", "name": function_name, "content": function_response, } ) # extend conversation with function response second_response = openai.ChatCompletion.create( model="gpt-3.5-turbo-0613", messages=messages, ) # get a new response from GPT where it can see the function response return second_response['choices'][0]['message']['content'] print(run_conversation())
[ "Ask Odoo GPT a question: " ]
2024-01-10
UKPLab/CATfOOD
src~cf_generation~llm_generation~alpaca_prompter.py
import os import jsonlines from tqdm import tqdm import torch from peft import PeftModel from typing import Optional from datasets import load_dataset from langchain import PromptTemplate, FewShotPromptTemplate from transformers import AutoModelForCausalLM, T5Tokenizer, AutoTokenizer, AutoConfig from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer # from src.few_shot.utils import save_to_disk # from src.few_shot.together import infer # Set the seed seed = 0 torch.manual_seed(seed) BASE_PATH = "/storage/ukp/work/sachdeva/research_projects/exp_calibration/" def save_to_disk(data, file_name): with jsonlines.open(file_name, "a") as writer: for example in tqdm(data, total=len(data), desc="Saving samples ... "): writer.write(example) class LLMClient: def __init__( self, template, model_name_or_path: str = None, tokenizer_name_or_path: Optional[str] = None, data_path: str = None, threshold: float = 0.5, task: str = "qg", max_new_tokens: int = 50, temperature: float = 0.01, top_p: float = 1, top_k: int = 40, repetition_penalty: float = 1.0, save_results: bool = True, max_samples: int = None, stop: str = "\n", ): self.template = template self.base_model = model_name_or_path self.tokenizer = tokenizer_name_or_path self.max_new_tokens = max_new_tokens self.temperature = temperature self.top_p = top_p self.top_k = top_k self.repetition_penalty = repetition_penalty self.data_path = data_path self.task = task self.threshold = threshold self.save_results = save_results self.max_samples = max_samples self.stop = stop.split(";") self.device = "cuda" if torch.cuda.is_available() else "cpu" self.lora_weights = f"{BASE_PATH}lora-alpaca-13b-10ep" def _load_model(self): tokenizer = LlamaTokenizer.from_pretrained(self.base_model) llama_model = LlamaForCausalLM.from_pretrained( self.base_model, # load_in_8bit=True, torch_dtype=torch.float16, device_map="auto", ) model = PeftModel.from_pretrained( llama_model, self.lora_weights, torch_dtype=torch.float16, device_map="auto", ) # if not load_8bit: model.half() # model.to(self.device) # unwind broken decapoda-research config model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk model.config.bos_token_id = 1 model.config.eos_token_id = 2 return model, tokenizer def _create_few_shot_prompt(self, examples, context, question=None): example_template = """ ### Input: Context: {context} ### Response: Question: {question} Answer: {answer} """.strip() suffix = """ ### Input: Context: {context} ### Response: Question: """.strip() example_prompt = PromptTemplate( input_variables=["context", "question", "answer"], template=example_template, ) if self.task == "qg": few_shot_template = FewShotPromptTemplate( examples=examples, example_prompt=example_prompt, prefix=self.template, suffix=suffix, input_variables=["context"], example_separator="\n", ) prompt = few_shot_template.format(context=context) # print("Prompt:", prompt) return prompt def generate(self): c = 0 skipped_instances = 0 examples = [] model, tokenizer = self._load_model() model_identifier = self.base_model.split("/")[-1] model_identifier = "alpaca-lora-hf" save_path = ( BASE_PATH + f"src/data/squad/t5_squad_counterfactuals/{model_identifier}_{self.task}_pipeline_remaining_temp_0.7" ) if not os.path.exists(save_path): os.makedirs(save_path) # load squad data dataset = load_dataset("squad", "plain_text") train_data = dataset["train"] squad_data = [ sample for sample in tqdm( train_data, total=len(train_data), desc="Loading SQuAD data ... " ) ] current_files = [] file_names = [] file_path = BASE_PATH + f"src/data/squad/{self.data_path}" if self.task == "qg": file_names = [file_path] elif self.task == "qa": all_files = os.listdir(file_path) file_names = [ os.path.join(file_path, file) for file in all_files if file not in current_files ] model.eval() for file_name in file_names: with jsonlines.open(file_name) as reader: for example in tqdm(reader): try: id = example["id"].split("_")[0] context = example["context"] orig_example = [ sample for sample in squad_data if sample["id"] == id ][0] # print(orig_example) orig_context = orig_example["context"] orig_question = orig_example["question"] orig_answer = orig_example["answers"] c += 1 if c <= 5000: continue if self.max_samples: if c == self.max_samples: break if self.task == "qa": # preprocess the question # remove any new line characters question = example["question"] question = question.replace("\n", "") # check for truncated question words # check for truncated question words question_words = [ "what", "who", "when", "where", "why", "how", "which", "whom", "whose", ] for word in question_words: if question.lower().startswith(word[1:]): # replace first word with the correct word question = word + question[len(word[1:]) :] # question = word + question[3:] break prompt = self._create_few_shot_prompt( [ { "context": orig_context, "question": orig_question, "answer": orig_answer["text"][0], } ], context, question=question if self.task == "qa" else None, ) # print(prompt) inputs = tokenizer(prompt, return_tensors="pt").to(self.device) outputs = model.generate( **inputs, max_new_tokens=self.max_new_tokens, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k, repetition_penalty=self.repetition_penalty, do_sample=True, # num_return_sequences=1, early_stopping=True, num_beams=1, pad_token_id=tokenizer.eos_token_id, ) output = tokenizer.decode(outputs[0], skip_special_tokens=True) # remove the context from the output output = output[len(prompt) :] # output = infer( # model_name="GPT-JT-6B-v1", # prompt=prompt, # ) result = {"id": example["id"], "context": context} if self.task == "qg": # add the question mark if it is not present # if output[-1] != "?": # output = output + "?" # question = output # for stop_word in self.stop: # if stop_word != '' and stop_word in output: # question = output[:output.find(stop_word)] # # in some cases the stop word is at the beginning of the output # if question == "": # question = output[output.find(stop_word):] # Split the text into lines lines = output.split("\n") # Extract the question and answer question = lines[0].replace("Question: ", "") question = question.strip() answer = lines[1].replace("Answer: ", "") answer = answer.replace("<bot>:", "") answer = answer.strip() result["question"] = question result["answer"] = answer elif self.task == "qa": answer = output # print("Output:", output) for stop_word in self.stop: if stop_word != "" and stop_word in output: answer = output[output.find(stop_word) :] # remove the first stop word from answer answer = answer.split(stop_word) answer = [a for a in answer if a != ""] if answer: answer = answer[0] else: answer = "I don't know." # in some cases the stop word is at the beginning of the output # if answer == "": # answer = output[output.find(stop_word):] # answer = answer.replace(stop_word, "") # if answer == "": # answer = answer.split(stop_word) # answer = [a for a in answer if a != ""][0] skip_words = [ "A: ", "A. ", "Answer: ", "Output: ", "Label: ", ] for skip_word in skip_words: if skip_word in answer: answer = answer.replace(skip_word, "") # if answer.startswith("A: ") or answer.startswith("A. "): # answer = answer[3:] # elif answer.startswith("Answer: "): # answer = answer.replace("Answer: ", "") # question = example["question"] result["question"] = question result["answer"] = answer # print("Context:", context) # print("Question:", question) # print("Answer:", answer) # print("-" * 100) examples.append(result) if self.save_results: if c % self.threshold == 0: save_to_disk( examples, f"{save_path}/counterfactual_samples_{model_identifier}_{c}.jsonl", ) examples = [] except Exception as e: # print(outputs) skipped_instances += 1 print(f"Skipped instance {c} due to error: {e}.") continue # if c==10: # break # save the remaining examples if self.save_results: if examples: save_to_disk( examples, f"{save_path}/counterfactual_samples_{model_identifier}_{c}.jsonl", ) # break if __name__ == "__main__": qg_template = """ As a question generator, your task is to create a concise and clear question that can be answered by an answer span within a given context. The context should be a piece of text, such as a news article or historical document, and the question should require understanding and analysis of the information presented in the context. Your generated question should focus on key details or events described in the context and should demonstrate your ability to identify important information. Additionally, please ensure that your question is specific enough to have a single correct answer within the given context. Please note that you may need to read through the provided context multiple times to fully understand its contents before generating an appropriate question. """.strip() alpaca_prompt = """ Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: As a question generator, your task is to create a concise and clear question that can be answered by an answer span within the given context. The context should be a piece of text, such as a news article or historical document, and the question should require understanding and analysis of the information presented in the context. Your generated question should focus on key details or events described in the context and should demonstrate your ability to identify important information. Additionally, please ensure that your question is specific enough to have a single correct answer within the given context. Please note that you may need to read through the provided context multiple times to fully understand its contents before generating an appropriate question. """.strip() task = "qg" model = "decapoda-research/llama-13b-hf" client = LLMClient( template=alpaca_prompt, model_name_or_path=model, tokenizer_name_or_path=model, data_path="t5_squad_counterfactuals/rag_counterfactuals_complete_min_filtered_dedup.jsonl", threshold=1000, task=task, max_new_tokens=50, temperature=0.7, top_p=1, top_k=40, repetition_penalty=1.0, save_results=True, max_samples=None, ) client.generate()
[ "\n", "question", "### Input:\nContext: {context}\n\n### Response:\nQuestion: {question}\nAnswer: {answer}", "Below is an instruction that describes a task, paired with an input that provides further context. \nWrite a response that appropriately completes the request.\n### Instruction:\nAs a question generator, your task is to create a concise and clear question that can be answered by an answer span within the given context. \nThe context should be a piece of text, such as a news article or historical document, and the question should require understanding and analysis of the information presented in the context. \nYour generated question should focus on key details or events described in the context and should demonstrate your ability to identify important information. \nAdditionally, please ensure that your question is specific enough to have a single correct answer within the given context. \nPlease note that you may need to read through the provided context multiple times to fully understand its contents before generating an appropriate question.", "context", "answer", "As a question generator, your task is to create a concise and clear question that can be answered by an answer span within a given context. \nThe context should be a piece of text, such as a news article or historical document, and the question should require understanding and analysis of the information presented in the context. \nYour generated question should focus on key details or events described in the context and should demonstrate your ability to identify important information. \nAdditionally, please ensure that your question is specific enough to have a single correct answer within the given context. \nPlease note that you may need to read through the provided context multiple times to fully understand its contents before generating an appropriate question." ]
2024-01-10
UKPLab/CATfOOD
src~cf_generation~llm_generation~llm_evaluator.py
import os import torch from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer from transformers import T5ForConditionalGeneration, T5Tokenizer, AutoTokenizer from langchain import PromptTemplate import jsonlines import re from tqdm import tqdm from datasets import load_dataset from transformers import AutoModelForCausalLM, T5Tokenizer, AutoTokenizer, AutoConfig from src.cf_generation.llm_generation.utils import save_to_disk BASE_PATH = "/storage/ukp/work/sachdeva/research_projects/exp_calibration/" if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--seed", type=int, default=0) args = parser.parse_args() # Set the seed torch.manual_seed(args.seed) # load squad data dataset = load_dataset("squad", "plain_text") train_data = dataset["train"] squad_data = [ sample for sample in tqdm( train_data, total=len(train_data), desc="Loading SQuAD data ... " ) ] template = """ Given the question: \n {query} Decide if the following retrieved context is relevant to the {answer}: \n {result} Answer in the following format: \n "Context is relevant: True or False." \n """.strip() GRADE_DOCS_PROMPT_FAST = PromptTemplate( input_variables=["query", "result", "answer"], template=template ) device = torch.device("cuda:0") model_name = "google/flan-ul2" model = T5ForConditionalGeneration.from_pretrained( model_name, torch_dtype=torch.bfloat16, device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained(model_name) model_identifier = model_name.split("/")[-1] model_id = "alpaca" save_path = BASE_PATH + f"src/data/squad/{model_id}_qa_relevance_seed_{args.seed}/" if not os.path.exists(save_path): os.makedirs(save_path) file_path = os.path.join( BASE_PATH, f"src/data/squad/counterfactual_data_alpaca_13b_v2_qg_pipeline_all_data_cleaned.jsonl", ) files = [file_path] skipped = 0 c = 0 for file_name in files: examples = [] with jsonlines.open(file_name) as reader: for example in tqdm(reader): try: # c+=1 # if c <= 25000: # continue id = example["id"].split("_")[0] context = example["context"] question = example["question"] answer = example["answers"]["text"][0] # print("Given ans:", example["answers"]) orig_example = [ sample for sample in squad_data if sample["id"] == id ][0] orig_context = orig_example["context"] orig_question = orig_example["question"] orig_answer = orig_example["answers"] input = GRADE_DOCS_PROMPT_FAST.format( query=question, result=context, answer=answer ) # print(input) input_ids = tokenizer.encode(input, return_tensors="pt").to(device) # outputs = generator.generate([input], max_new_tokens=50, top_p=0.95) with torch.no_grad(): outputs = model.generate( input_ids, max_new_tokens=10, temperature=0, top_p=1, top_k=40, repetition_penalty=1.0, pad_token_id=tokenizer.eos_token_id, ) output = tokenizer.decode(outputs[0], skip_special_tokens=True) c += 1 # if c==50: # break result = { "id": example["id"], "question": question, "context": context, "answer": answer, "context_relevance": output, } # print(result) # break examples.append(result) if c % 5000 == 0: save_to_disk( examples, f"{save_path}counterfactual_samples_{model_id}_{c}.jsonl", ) examples = [] # break except Exception as e: print("Skip") # save the remaining examples if examples: save_to_disk( examples, f"{save_path}counterfactual_samples_{model_id}_{c}.jsonl" )
[ "answer", "Given the question: \n\n {query}\n Decide if the following retrieved context is relevant to the {answer}: \n\n {result}\n Answer in the following format: \n\n \"Context is relevant: True or False.\"" ]
2024-01-10
UKPLab/CATfOOD
src~cf_generation~llm_generation~llama_qa.py
import os import torch from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer from transformers import T5ForConditionalGeneration, T5Tokenizer, AutoTokenizer from langchain import PromptTemplate import jsonlines import re from tqdm import tqdm from datasets import load_dataset from transformers import AutoModelForCausalLM, T5Tokenizer, AutoTokenizer, AutoConfig from src.few_shot.utils import save_to_disk # from src.few_shot.generation import LLaMA BASE_PATH = "/storage/ukp/work/sachdeva/research_projects/exp_calibration/" # Set the seed seed = 0 torch.manual_seed(seed) def get_llama(model): def skip(*args, **kwargs): pass torch.nn.init.kaiming_uniform_ = skip torch.nn.init.uniform_ = skip torch.nn.init.normal_ = skip model = LlamaForCausalLM.from_pretrained(model, torch_dtype="auto") model.seqlen = 2048 return model from transformers import T5ForConditionalGeneration, T5Tokenizer, AutoTokenizer import torch import os import jsonlines import argparse from tqdm import tqdm from datasets import load_dataset BASE_PATH = "/storage/ukp/work/sachdeva/research_projects/exp_calibration/" # BASE_PATH = "/home/sachdeva/projects/ukp/exp_calibration/" def save_to_disk(data, file_name): with jsonlines.open(file_name, "a") as writer: for example in tqdm(data, total=len(data), desc="Saving samples ... "): writer.write(example) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--seed", type=int, default=0) args = parser.parse_args() # Set the seed torch.manual_seed(args.seed) # load squad data dataset = load_dataset("squad", "plain_text") train_data = dataset["train"] squad_data = [ sample for sample in tqdm( train_data, total=len(train_data), desc="Loading SQuAD data ... " ) ] current_files = [] c = 0 # prompt = "Given the following question and context, provide an answer " \ # "that best fits the question. Ensure that the answer " \ # "is a span in the context." prompt = ( "Answer the question based on the context below. If the question cannot be answered " "using the information provided, then answer with 'I don't know'." ) # to test # prompt = "Your task is to answer a question based on the given context. If the information provided " \ # "is insufficient to answer the question, please respond with 'I don't know.' Your response " \ # "should be clear and concise, providing only relevant information necessary to answer the question. " \ # "Please note that you should make every effort to provide an accurate and complete response based " \ # "on the available information." model_name = "llama-13b-hf" # model_name = "google/flan-ul2" model_identifier = model_name.split("/")[-1] save_path = ( BASE_PATH + f"src/data/squad/few_shot_{model_identifier}_qa_eval_seed_{args.seed}" ) if not os.path.exists(save_path): os.makedirs(save_path) model = get_llama(model_name).to("cuda") tokenizer = LlamaTokenizer.from_pretrained(model_name) # all_files = os.listdir(BASE_PATH + f"src/data/squad/few_shot_{model_identifier}_qg_temp_0.7/") # files = [file for file in all_files if file not in current_files] # print(files) # files = ["llama_collated_data_with_answers_processed_context_irrelevance.jsonl"] files = ["counterfactual_data_llama_13b_v1_qg_pipeline_all_data_cleaned.jsonl"] for file in files: # if c<=9: # continue print(f"Processing file: {file}") examples = [] with jsonlines.open(BASE_PATH + f"src/data/squad/" + file) as reader: # with jsonlines.open(BASE_PATH + f"src/data/squad/few_shot_{model_identifier}_qg_temp_0.7/" + file) as reader: for example in tqdm(reader): c += 1 id = example["id"].split("_")[0] context = example["context"] question = example["question"] tokens_to_remove = ["[", "'", '"', "]"] # Create a translation table that maps each unwanted token to None translator = str.maketrans({token: None for token in tokens_to_remove}) question = question.translate(translator).strip() orig_example = [sample for sample in squad_data if sample["id"] == id][ 0 ] orig_context = orig_example["context"] orig_question = orig_example["question"] orig_answer = orig_example["answers"] input = ( f"{prompt} \nContext: {orig_context} \nQuestion: {orig_question} " f"\nAnswer: {orig_answer['text'][0]}" f"\n{prompt} \nContext: {context} \nQuestion: {question} " f"\nAnswer: " ) inputs = tokenizer(input, return_tensors="pt").input_ids.to("cuda") outputs = model.generate( inputs, max_new_tokens=50, temperature=0.7, top_p=1, top_k=40, ) output = tokenizer.decode(outputs[0], skip_special_tokens=True) # remove the context from the output output = output[len(input) :] answer = output.split("\n")[0] result = { "id": example["id"], "question": question, "context": context, "answer": answer, } print("question: ", question) print("context: ", context) print("answer: ", result["answer"]) print("original answer: ", example["answers"]) print("-" * 10) examples.append(result) if c == 10: break # save_to_disk(examples, f"{save_path}/counterfactual_samples_{model_identifier}_{c}.jsonl") # c += 1
[ "Answer the question based on the context below. If the question cannot be answered using the information provided, then answer with 'I don't know'." ]
2024-01-10
UKPLab/CATfOOD
src~cf_generation~llm_generation~gpt_evaluator.py
import os import torch import jsonlines from tqdm import tqdm from datasets import load_dataset from src.cf_generation.llm_generation.utils import save_to_disk import openai openai.api_key = os.getenv("OPENAI_API_KEY") BASE_PATH = "/storage/ukp/work/sachdeva/research_projects/exp_calibration/" def get_response(prompt, max_tokens, model_name): """ :param prompt: :param max_tokens: :param model_name: :return: Sample output generated_ans = { "id": "chatcmpl-7dKbsNzkrAjNumSBMc4IhwCZAEMlo", "object": "chat.completion", "created": 1689608372, "model": "gpt-4-0613", "choices": [ { "index": 0, "message": { "role": "assistant", "content": "The phenomena of electric and magnetic fields being perpendicular to each other is a fundamental part of how electromagnetic waves work. \n\nLet's consider an analogy to understand this better. ...}, "finish_reason": "stop" } ], "usage": { "prompt_tokens": 222, "completion_tokens": 247, "total_tokens": 469 } } """ response = openai.ChatCompletion.create( model=model_name, messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": prompt}, ], max_tokens=max_tokens, ) return response if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--seed", type=int, default=0) args = parser.parse_args() # Set the seed torch.manual_seed(args.seed) # load squad data dataset = load_dataset("squad", "plain_text") train_data = dataset["train"] squad_data = [ sample for sample in tqdm( train_data, total=len(train_data), desc="Loading SQuAD data ... " ) ] model_name = "gpt-4-0314" save_path = ( BASE_PATH + f"src/data/squad/{model_name}_qa_relevance_seed_{args.seed}/" ) if not os.path.exists(save_path): os.makedirs(save_path) file_path = os.path.join( BASE_PATH, f"src/data/squad/counterfactual_samples_flan_t5_xxl_context_irrelevance.jsonl", ) files = [file_path] skipped = 0 c = 0 for file_name in files: examples = [] with jsonlines.open(file_name) as reader: for example in tqdm(reader): try: c += 1 # if c <= 100: # continue id = example["id"].split("_")[0] context = example["context"] question = example["question"] answer = example["answers"]["text"][0] # print("Given ans:", example["answers"]) orig_example = [ sample for sample in squad_data if sample["id"] == id ][0] orig_context = orig_example["context"] orig_question = orig_example["question"] orig_answer = orig_example["answers"] # input = GRADE_DOCS_PROMPT_FAST.format(query=question, result=context, answer=answer) # print(input) template = ( f"Given the question: \n" f"{question} \n" f"Decide if the following retrieved context is relevant to the {answer}: \n" f"{context} \n" "Answer in the following format: \n" "'Context is relevant: True or False.' \n" ) # print(template) # break output = get_response( prompt=template, max_tokens=10, model_name=model_name ) result = { "id": example["id"], "question": question, "context": context, "answer": answer, "context_relevance": output, } # print(result) # break examples.append(result) if c == 500: break except Exception as e: print("Skip") # # save the remaining examples if examples: save_to_disk( examples, f"{save_path}counterfactual_samples_{model_name}_flan_t5_xxl_{c}.jsonl", ) # print(examples)
[ "Given the question: \nPLACEHOLDER \nDecide if the following retrieved context is relevant to the PLACEHOLDER: \nPLACEHOLDER \nAnswer in the following format: \n'Context is relevant: True or False.' \n", "You are a helpful assistant." ]
2024-01-10
UKPLab/CATfOOD
src~cf_generation~llm_generation~alpaca_cot.py
import os import jsonlines from tqdm import tqdm import torch from peft import PeftModel from typing import Optional from datasets import load_dataset from langchain import PromptTemplate, FewShotPromptTemplate from transformers import AutoModelForCausalLM, T5Tokenizer, AutoTokenizer, AutoConfig from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer # from src.few_shot.utils import save_to_disk # from src.few_shot.together import infer BASE_PATH = "/storage/ukp/work/sachdeva/research_projects/exp_calibration/" def save_to_disk(data, file_name): with jsonlines.open(file_name, "a") as writer: for example in tqdm(data, total=len(data), desc="Saving samples ... "): writer.write(example) class LLMClient: def __init__( self, template, model_name_or_path: str = None, tokenizer_name_or_path: Optional[str] = None, data_path: str = None, threshold: float = 0.5, task: str = "qg", max_new_tokens: int = 50, temperature: float = 0.01, top_p: float = 1, top_k: int = 40, repetition_penalty: float = 1.0, save_results: bool = True, max_samples: int = None, stop: str = "\n", ): self.base_model = model_name_or_path self.tokenizer = tokenizer_name_or_path self.max_new_tokens = max_new_tokens self.temperature = temperature self.top_p = top_p self.top_k = top_k self.repetition_penalty = repetition_penalty self.data_path = data_path self.task = task self.threshold = threshold self.save_results = save_results self.max_samples = max_samples self.stop = stop.split(";") self.device = "cuda" if torch.cuda.is_available() else "cpu" self.lora_weights = f"{BASE_PATH}alpaca-cot-13b" def _load_model(self): tokenizer = LlamaTokenizer.from_pretrained(self.base_model) model = LlamaForCausalLM.from_pretrained( self.base_model, load_in_8bit=True, torch_dtype=torch.float16, device_map="auto", ) model = PeftModel.from_pretrained( model, self.lora_weights, torch_dtype=torch.float16, ) model.to(self.device) # unwind broken decapoda-research config model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk model.config.bos_token_id = 1 model.config.eos_token_id = 2 return model, tokenizer def _create_zero_shot_prompt(self, context, question, answer): prompt = qg_template.format(context=context, question=question, answer=answer) return prompt def generate(self): c = 0 skipped_instances = 0 examples = [] model, tokenizer = self._load_model() model_identifier = self.base_model.split("/")[-1] save_path = ( BASE_PATH + f"src/data/squad/{model_identifier}_{self.task}_pipeline_temp_0.7" ) if not os.path.exists(save_path): os.makedirs(save_path) # load squad data dataset = load_dataset("squad", "plain_text") train_data = dataset["train"] squad_data = [ sample for sample in tqdm( train_data, total=len(train_data), desc="Loading SQuAD data ... " ) ] current_files = [] file_names = [] file_path = BASE_PATH + f"src/data/squad/{self.data_path}" file_names = [file_path] model.eval() for file_name in file_names: with jsonlines.open(file_name) as reader: for example in tqdm(reader): try: id = example["id"].split("_")[0] context = example["context"] orig_example = [ sample for sample in squad_data if sample["id"] == id ][0] # print(orig_example) orig_context = orig_example["context"] orig_question = orig_example["question"] orig_answer = orig_example["answers"] c += 1 if self.max_samples: if c == self.max_samples: break prompt = self._create_zero_shot_prompt( context=orig_context, question=orig_question, answer=orig_answer["text"][0], ) # print(prompt) inputs = tokenizer(prompt, return_tensors="pt").to(self.device) outputs = model.generate( **inputs, max_new_tokens=self.max_new_tokens, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k, repetition_penalty=self.repetition_penalty, do_sample=True, # num_return_sequences=1, early_stopping=True, pad_token_id=tokenizer.eos_token_id, ) output = tokenizer.decode(outputs[0], skip_special_tokens=True) # remove the context from the output output = output[len(prompt) :] result = {"id": example["id"], "context": context} print("Context:", orig_context) print("Question:", orig_question) print("Answer:", orig_answer["text"][0]) print("Explanation:", output) print("-" * 100) # # examples.append(result) # if self.save_results: # if c % self.threshold == 0: # save_to_disk( # examples, # f"{save_path}/counterfactual_samples_{model_identifier}_{c}.jsonl" # ) # examples = [] except Exception as e: # print(outputs) skipped_instances += 1 print(f"Skipped instance {c} due to error: {e}.") continue if c == 5: break # save the remaining examples # if self.save_results: # if examples: # save_to_disk( # examples, # f"{save_path}/counterfactual_samples_{model_identifier}_{c}.jsonl" # ) break if __name__ == "__main__": qg_template = """ As an answer explainer, your job is to give a rationale for the answer to the following question given the context it is derived from. The rationale should express a clear thought of reasoning that led to the answer. Context: {context} Question: {question} Answer: {answer} Rationale: Let's think step by step, """.strip() model = "decapoda-research/llama-13b-hf" client = LLMClient( template=qg_template, model_name_or_path=model, tokenizer_name_or_path=model, task="zero-shot-cot", data_path="squad_counterfactuals_28_03.jsonl", threshold=1000, max_new_tokens=128, temperature=0.7, top_p=1, top_k=40, repetition_penalty=1.0, save_results=True, max_samples=None, ) client.generate()
[ "As an answer explainer, your job is to give a rationale for the answer to the following question given the context it is derived from. \nThe rationale should express a clear thought of reasoning that led to the answer.\n\nContext: {context}\n\nQuestion: {question}\n\nAnswer: {answer}\n\nRationale: Let's think step by step," ]
2024-01-10
UKPLab/CATfOOD
src~cf_generation~llm_generation~prompt_injection.py
import os import jsonlines from tqdm import tqdm import torch from typing import Optional from datasets import load_dataset from langchain import PromptTemplate, FewShotPromptTemplate from transformers import AutoModelForCausalLM, T5Tokenizer, AutoTokenizer, AutoConfig # from src.few_shot.utils import save_to_disk from src.few_shot.together import infer # Set the seed seed = 0 torch.manual_seed(seed) BASE_PATH = "/storage/ukp/work/sachdeva/research_projects/exp_calibration/" def save_to_disk(data, file_name): with jsonlines.open(file_name, "a") as writer: for example in tqdm(data, total=len(data), desc="Saving samples ... "): writer.write(example) class LLMClient: def __init__( self, template, model_name_or_path: str = None, tokenizer_name_or_path: Optional[str] = None, data_path: str = None, threshold: float = 0.5, task: str = "qg", max_new_tokens: int = 50, temperature: float = 0.01, top_p: float = 1, top_k: int = 40, repetition_penalty: float = 1.0, save_results: bool = True, max_samples: int = None, stop: str = "\n", ): self.model = model_name_or_path self.tokenizer = tokenizer_name_or_path self.max_new_tokens = max_new_tokens self.temperature = temperature self.top_p = top_p self.top_k = top_k self.repetition_penalty = repetition_penalty self.data_path = data_path self.task = task self.threshold = threshold self.save_results = save_results self.max_samples = max_samples self.stop = stop.split(";") self.device = "cuda" if torch.cuda.is_available() else "cpu" # self.prompt_template = PromptTemplate( # input_variables=["context"] if self.task == "qg" else ["context", "question"], # template=template, # ) def _load_model(self): model = AutoModelForCausalLM.from_pretrained( self.model, device_map="auto" ) # , torch_dtype=torch.bfloat16) model.to(self.device) tokenizer = AutoTokenizer.from_pretrained(self.model) return model, tokenizer def _create_few_shot_prompt(self, examples, context, question=None): example_template = """ Context: {context} Question: {question} Answer: {answer} """.strip() suffix = """ Context: {context} Question: """.strip() example_prompt = PromptTemplate( input_variables=["context", "question", "answer"], template=example_template, ) if self.task == "qg": few_shot_template = FewShotPromptTemplate( examples=examples, example_prompt=example_prompt, prefix=qg_template if self.task == "qg" else qa_template, suffix=suffix, input_variables=["context"], example_separator="\n", ) prompt = few_shot_template.format(context=context) else: few_shot_template = FewShotPromptTemplate( examples=examples, example_prompt=example_prompt, prefix=qa_template, suffix=suffix, input_variables=["context", "question"], example_separator="\n", ) prompt = few_shot_template.format(context=context, question=question) return prompt def generate(self): c = 0 skipped_instances = 0 examples = [] model, tokenizer = self._load_model() model_identifier = self.model.split("/")[-1] save_path = ( BASE_PATH + f"src/data/squad/t5_squad_counterfactuals/{model_identifier}_{self.task}_pipeline_remaining_temp_0.7" ) if not os.path.exists(save_path): os.makedirs(save_path) # load squad data dataset = load_dataset("squad", "plain_text") train_data = dataset["train"] squad_data = [ sample for sample in tqdm( train_data, total=len(train_data), desc="Loading SQuAD data ... " ) ] current_files = [] file_names = [] file_path = BASE_PATH + f"src/data/squad/{self.data_path}" if self.task == "qg": file_names = [file_path] elif self.task == "qa": all_files = os.listdir(file_path) file_names = [ os.path.join(file_path, file) for file in all_files if file not in current_files ] model.eval() for file_name in file_names: with jsonlines.open(file_name) as reader: for example in tqdm(reader): try: id = example["id"].split("_")[0] context = example["context"] orig_example = [ sample for sample in squad_data if sample["id"] == id ][0] # print(orig_example) orig_context = orig_example["context"] orig_question = orig_example["question"] orig_answer = orig_example["answers"] c += 1 if c <= 73000: continue if self.max_samples: if c == self.max_samples: break if self.task == "qa": # preprocess the question # remove any new line characters question = example["question"] question = question.replace("\n", "") # check for truncated question words # check for truncated question words question_words = [ "what", "who", "when", "where", "why", "how", "which", "whom", "whose", ] for word in question_words: if question.lower().startswith(word[1:]): # replace first word with the correct word question = word + question[len(word[1:]) :] # question = word + question[3:] break prompt = self._create_few_shot_prompt( [ { "context": orig_context, "question": orig_question, "answer": orig_answer["text"][0], } ], context, question=question if self.task == "qa" else None, ) # print(prompt) inputs = tokenizer(prompt, return_tensors="pt").to(self.device) outputs = model.generate( **inputs, max_new_tokens=self.max_new_tokens, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k, repetition_penalty=self.repetition_penalty, do_sample=True, # num_return_sequences=1, early_stopping=True, pad_token_id=tokenizer.eos_token_id, ) output = tokenizer.decode(outputs[0], skip_special_tokens=True) # remove the context from the output output = output[len(prompt) :] # output = infer( # model_name="GPT-JT-6B-v1", # prompt=prompt, # ) result = {"id": example["id"], "context": context} if self.task == "qg": # add the question mark if it is not present # if output[-1] != "?": # output = output + "?" # question = output # for stop_word in self.stop: # if stop_word != '' and stop_word in output: # question = output[:output.find(stop_word)] # # in some cases the stop word is at the beginning of the output # if question == "": # question = output[output.find(stop_word):] # Split the text into lines lines = output.split("\n") # Extract the question and answer question = lines[0].replace("Question: ", "") question = question.strip() answer = lines[1].replace("Answer: ", "") answer = answer.replace("<bot>:", "") answer = answer.strip() result["question"] = question result["answer"] = answer elif self.task == "qa": answer = output # print("Output:", output) for stop_word in self.stop: if stop_word != "" and stop_word in output: answer = output[output.find(stop_word) :] # remove the first stop word from answer answer = answer.split(stop_word) answer = [a for a in answer if a != ""] if answer: answer = answer[0] else: answer = "I don't know." # in some cases the stop word is at the beginning of the output # if answer == "": # answer = output[output.find(stop_word):] # answer = answer.replace(stop_word, "") # if answer == "": # answer = answer.split(stop_word) # answer = [a for a in answer if a != ""][0] skip_words = [ "A: ", "A. ", "Answer: ", "Output: ", "Label: ", ] for skip_word in skip_words: if skip_word in answer: answer = answer.replace(skip_word, "") # if answer.startswith("A: ") or answer.startswith("A. "): # answer = answer[3:] # elif answer.startswith("Answer: "): # answer = answer.replace("Answer: ", "") # question = example["question"] result["question"] = question result["answer"] = answer # print("Context:", context) # print("Question:", question) # print("Answer:", answer) # print("-" * 100) examples.append(result) if self.save_results: if c % self.threshold == 0: save_to_disk( examples, f"{save_path}/counterfactual_samples_{model_identifier}_{c}.jsonl", ) examples = [] except Exception as e: # print(outputs) skipped_instances += 1 print(f"Skipped instance {c} due to error: {e}.") continue # save the remaining examples if self.save_results: if examples: save_to_disk( examples, f"{save_path}/counterfactual_samples_{model_identifier}_{c}.jsonl", ) # break if __name__ == "__main__": qg_template = """ As a question generator, your task is to create a concise and clear question that can be answered by an answer span within a given context. The context should be a piece of text, such as a news article or historical document, and the question should require understanding and analysis of the information presented in the context. Your generated question should focus on key details or events described in the context and should demonstrate your ability to identify important information. Additionally, please ensure that your question is specific enough to have a single correct answer within the given context. Please note that you may need to read through the provided context multiple times to fully understand its contents before generating an appropriate question. """.strip() chatgpt_qg_template = """ As a question generator, your task is to create a clear and concise question that can be answered by an answer span within a given context. The context should be a piece of text, such as a news article or historical document, and the question should require understanding and analysis of the information presented in the context. Your generated question should focus on key details or events described in the context, requiring readers to carefully read and analyze the provided text. Please ensure that your question is specific enough to have only one correct answer span within the given context. Please note that you should aim for clarity and concision while still maintaining accuracy and relevance to the provided text. """.strip() qa_template = """ As a language model, your task is to generate an answer that is strictly a span in the given context provided with a specific question. Please provide a concise and relevant response that directly answers the question asked within the context. Your answer should be accurate and reflect the information presented in the context. Please note that you may need to use contextual clues and understanding to identify the most appropriate section of text to use as your answer span. Context: {context} Question: {question} Answer: """ task = "qg" model = "togethercomputer/GPT-JT-6B-v1" client = LLMClient( template=qg_template if task == "qg" else qa_template, model_name_or_path=model, tokenizer_name_or_path=model, data_path="t5_squad_counterfactuals/rag_counterfactuals_complete_min_filtered_dedup.jsonl", threshold=1000, task=task, max_new_tokens=50, temperature=0.7, top_p=1, top_k=40, repetition_penalty=1.0, save_results=True, max_samples=None, ) client.generate()
[ "\n", "As a question generator, your task is to create a clear and concise question that can be answered by an answer span within a given context. \nThe context should be a piece of text, such as a news article or historical document, and the question should require understanding and analysis of the information presented in the context. \nYour generated question should focus on key details or events described in the context, requiring readers to carefully read and analyze the provided text. \nPlease ensure that your question is specific enough to have only one correct answer span within the given context. \nPlease note that you should aim for clarity and concision while still maintaining accuracy and relevance to the provided text.", "question", "Context: {context}\nQuestion: {question}\nAnswer: {answer}", "\nAs a language model, your task is to generate an answer that is strictly a span in the given context provided with a specific question.\nPlease provide a concise and relevant response that directly answers the question asked within the context.\nYour answer should be accurate and reflect the information presented in the context.\nPlease note that you may need to use contextual clues and understanding to identify the most appropriate section of text to use as your answer span.\n\n\nContext: {context}\n\n\nQuestion: {question}\n\n\nAnswer: \n", "context", "answer", "As a question generator, your task is to create a concise and clear question that can be answered by an answer span within a given context. \nThe context should be a piece of text, such as a news article or historical document, and the question should require understanding and analysis of the information presented in the context. \nYour generated question should focus on key details or events described in the context and should demonstrate your ability to identify important information. \nAdditionally, please ensure that your question is specific enough to have a single correct answer within the given context. \nPlease note that you may need to read through the provided context multiple times to fully understand its contents before generating an appropriate question." ]
2024-01-10
ozersource/chatgpt
g4f~Provider~Providers~Chimera.py
from cgi import print_arguments import re import os import openai import openai.error from dotenv import load_dotenv from ...typing import sha256, Dict, get_type_hints import requests import json from langdetect import detect import datetime load_dotenv() api_key_env = os.environ.get("CHIMERA_API_KEY") openai.api_base = "https://api.naga.ac/v1" url = 'https://api.naga.ac/' app__api__v1__chat__Completions__Models = [ "gpt-4", "gpt-4-vision-preview", "gpt-4-1106-preview", "gpt-4-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0613", "gemini-pro", "gemini-pro-vision", "llama-2-70b-chat", "llama-2-13b-chat", "llama-2-7b-chat", "code-llama-34b", "mistral-7b", "mixtral-8x7b", "falcon-180b-chat", "claude-2", "claude-instant" ] app__api__v1__images__Images__Models=[ "midjourney", "sdxl", "latent-consistency-model", "kandinsky-2.2", "kandinsky-2", "dall-e", "stable-diffusion-2.1", "stable-diffusion-1.5", "deepfloyd-if", "material-diffusion" ] # 发送请求并获取models #response = requests.get(openai.api_base+"/models") # 将 JSON 数据解析为 Python 对象 #data = json.loads(response.text) data={} data['data']=[] for modelaaa in app__api__v1__chat__Completions__Models: data['data'].append({"id":modelaaa,"endpoints":["/v1/chat/completions"],"owned_by":"OPENAI"}) for modelaaa in app__api__v1__images__Images__Models: data['data'].append({"id":modelaaa,"endpoints":["/v1/images/generations"],"owned_by":"OPENAI"}) model=[] model_endpoint=[] model_public=[] for Models in data['data']: #if 'public' in Models: model.append(Models['id']) print(Models) model_endpoint.append(Models['endpoints'][0].replace('/v1/','')) # model_public.append(Models['public']) # 输出解析后的数据 print(data['data']) groups=[] for htmlmodels in model_endpoint: if(htmlmodels not in groups): groups.append(htmlmodels) htmlstr="" #print(groups) for group in groups: htmlstr=htmlstr+f"<optgroup label='{group}'>" for index,htmlmodels in enumerate(model): if(model_endpoint[index]==group): htmlstr=htmlstr+f"<option value='{htmlmodels}'>{htmlmodels}</option>" htmlstr=htmlstr+f"</optgroup>" with open("client/html/index_model.html", "r") as f: # 读取模板文件内容 content = f.read() content=content.replace("{('models')}",htmlstr) with open("client/html/index.html", "w") as f: # 写入index.html f.write(content) with open("g4f/models.py", "w") as f: # 写入models.py f.writelines('from g4f import Provider\n') f.writelines('class Model:\n') f.writelines('\tclass model:\n') f.writelines('\t\tname: str\n') f.writelines('\t\tbase_provider: str\n') f.writelines('\t\tbest_provider: str\n') for Models in data['data']: if 'owned_by' in Models: f.writelines('\tclass ' + Models['id'].replace('-','_').replace('.','') + ':\n') f.writelines('\t\tname: str = \'' + Models['id']+'\'\n') f.writelines('\t\tbase_provider: str = \'' + Models['owned_by']+'\'\n') f.writelines('\t\tbest_provider: Provider.Provider = Provider.Chimera\n') f.writelines('class ModelUtils:\n') f.writelines('\tconvert: dict = {\n') for modelname in model: f.writelines('\t\t\'' + modelname + '\':Model.'+modelname.replace('-','_').replace('.','')+',\n') f.writelines('\t}\n') supports_stream = True needs_auth = False def _create_completion(api_key: str, model: str, messages: list, stream: bool, **kwargs): #chat def chat_completions(endpoint,model,messages): yield endpoint+"-"+model+":\n\n" print(endpoint) try: response = openai.ChatCompletion.create( model=model, messages=messages, stream=stream, allow_fallback=True ) for chunk in response: yield chunk.choices[0].delta.get("content", "") print(response) except openai.error.PermissionError as e: yield e.user_message except openai.error.InvalidRequestError as e: yield e.user_message except openai.error.APIError as e: detail_pattern = re.compile(r'{"detail":"(.*?)"}') match = detail_pattern.search(e.user_message) if match: error_message = match.group(1) print(error_message) yield error_message else: print(e.user_message) yield e.user_message except Exception as e: # 处理其他异常 yield e.decode('utf-8') #completions def completions(endpoint,model): yield endpoint+"-"+model+":\n\n" try: response = openai.Completion.create( model=model, prompt=prompt, stream=stream, max_tokens = 500, stop = "\n\n" ) yield prompt for chunk in response: yield chunk.choices[0].text except openai.error.PermissionError as e: yield e.user_message except openai.error.InvalidRequestError as e: yield e.user_message except openai.error.APIError as e: detail_pattern = re.compile(r'{"detail":"(.*?)"}') match = detail_pattern.search(e.user_message) if match: error_message = match.group(1) print(error_message) yield error_message else: print(e.user_message) yield e.user_message except Exception as e: # 处理其他异常 yield e #images def image_gen(endpoint,model,prompt): yield endpoint+"-"+model+":\n\n" yield f"正在生成{prompt}图片,请稍候……\n\n" try: response = openai.Image.create( model=model, prompt=prompt, n=3, # images count size="1024x1024" ) responseimg=json.dumps(response["data"]) for img in eval(responseimg): mediaphoto="[!["+prompt+"]("+img['url']+")]("+img['url']+")" yield str(mediaphoto) except openai.error.PermissionError as e: yield e.user_message except openai.error.InvalidRequestError as e: yield e.user_message except openai.error.APIError as e: detail_pattern = re.compile(r'{"detail":"(.*?)"}') match = detail_pattern.search(e.user_message) if match: error_message = match.group(1) print(error_message) yield error_message else: print(e.user_message) yield e.user_message except Exception as e: # 处理其他异常 yield e #embeddings def word_embeddings(endpoint,model,prompt): yield endpoint+"-"+model+":\n\n" try: response = openai.Embedding.create( model=model, input=prompt ) embeddings = response['data'][0]['embedding'] yield str(embeddings) #print(embeddings) except openai.error.PermissionError as e: yield e.user_message except openai.error.InvalidRequestError as e: yield e.user_message except openai.error.APIError as e: detail_pattern = re.compile(r'{"detail":"(.*?)"}') match = detail_pattern.search(e.user_message) if match: error_message = match.group(1) print(error_message) yield error_message else: print(e.user_message) yield e.user_message except Exception as e: # 处理其他异常 yield e.decode('utf-8') #moderations def moderations(endpoint,model): yield endpoint+"-"+model+":\n\n" try: response = openai.Moderation.create( model=model, input=prompt ) result=response['results'][0]['flagged'] if(result): censorflag='审核未通过,包含敏感内容:\n\n' yield censorflag moderate={ "sexual":"性行为", "hate":"仇恨", "harassment":"骚扰", "self-harm":"自残", "sexual/minors":"涉及未成年人的性行为", "hate/threatening":"仇恨言论/威胁", "violence/graphic":"暴力/血腥画面", "self-harm/intent":"自残倾向", "self-harm/instructions":"自残指导", "harassment/threatening":"骚扰言论/威胁", "violence":"暴力"} for key,vaule in response['results'][0]['categories'].items(): if(vaule): yield moderate[key]+"\n\n" else: censorflag='内容合规,审核通过' yield censorflag except openai.error.PermissionError as e: yield e.user_message except openai.error.InvalidRequestError as e: yield e.user_message except openai.error.APIError as e: detail_pattern = re.compile(r'{"detail":"(.*?)"}') match = detail_pattern.search(e.user_message) if match: error_message = match.group(1) print(error_message) yield error_message else: print(e.user_message) yield e.user_message except Exception as e: # 处理其他异常 yield e.decode('utf-8') #audio def audio_transcriptions(endpoint,model): yield endpoint+"-"+model+":暂时未开发\n\n" audio_file = open("./audio_file.mp3", "rb") transcript = openai.Audio.transcribe("whisper-1", audio_file) yield json.dumps(transcript, ensure_ascii=False) prompt=messages[-1]['content'] openai.api_key = api_key_env if api_key_env else api_key #匹配endpoint for models_endpoints in data['data']: if models_endpoints['id'] == model: endpoints = models_endpoints['endpoints'] break endpoint = endpoints[0].replace('/v1/','') #查看tokenizer与status的命令 if(prompt.startswith('/')): #usage if(prompt.lower()=='/usage'): headers = {'Authorization': f'Bearer {api_key_env}'} json_data = {'model': model, 'messages': messages} response = requests.post(openai.api_base+"/chat/tokenizer",headers=headers,json=json_data) # 将 JSON 数据解析为 Python 对象 usedata = json.loads(response.text) yield f"当前模型{model},token:{usedata}" return #status if(prompt.lower()=='/status'): response = requests.get(openai.api_base+"/status") # 将 JSON 数据解析为 Python 对象 statusdata = json.loads(response.text) for key in statusdata["endpoints"]: tip=statusdata['endpoints'][key]['status'] status_work=statusdata['endpoints'][key]['works'] if(key.find('images')>0): tip="\n\n!"+tip yield f"{key} [`{status_work}`] {tip} \n\n" timestamp = statusdata['updated_at'] yield "更新时间:" + str(datetime.datetime.fromtimestamp(timestamp)) return #根据endpoint调用模型 print(endpoint) if(endpoint=='chat/completions'): for msg in chat_completions(endpoint,model,messages): yield msg if(endpoint=='completions'): for msg in completions(endpoint,model): yield msg if(endpoint=='images/generations'): language = detect(prompt) print(language) if(language != 'en'): transendpoint='chat/completions' prompteng='' messages[-1]['content']=prompt + " translate into english" gpt_model='gpt-3.5-turbo' for msg in chat_completions(transendpoint,gpt_model,messages): prompteng+=msg yield msg print(prompteng) if(prompteng.find('"')>=0): prompt=prompteng.split('"')[-2] else: prompt=prompteng.replace('chat/completions-gpt-4:','').replace('\n','') for msg in image_gen(endpoint,model,prompt): yield msg if(endpoint=='embeddings'): for msg in word_embeddings(endpoint,model,prompt): yield msg if(endpoint=='moderations'): censorship='' for msg in moderations(endpoint,model): yield msg censorship=censorship+msg+"\n\n" if(censorship.find('审核未通过')>=0): print(censorship) if(endpoint=='audio/transcriptions'): yield endpoint+"-"+model+":暂时未开发\n\n" ''' for msg in audio_transcriptions(endpoint,model): yield msg ''' if(len(messages)>=2): if(messages[-2]['role']=='system' and bool(messages[-2]['content'])): net_research=re.sub(r'\[(\d+)\]', r'\n\n[\1]', messages[-2]['content']) net_research = re.sub(r'(https?://\S+)', r'[\1](\1)', net_research) yield '\n\n' + net_research params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ '(%s)' % ', '.join( [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
[ "content", "chat/completions-gpt-4:", "\n" ]
2024-01-10
MohamedAbdeen21/News-Recommender
models~recommender~LDA_Generation.py
# from click import group # from gensim.models import CoherenceModel from gensim.utils import simple_preprocess # from gensim.test.utils import datapath from nltk.stem import WordNetLemmatizer # from pprint import pprint import nltk from nltk.corpus import stopwords from gensim.matutils import cossim from datetime import date, timedelta, datetime import gensim.corpora as corpora # import os import pandas as pd # import re import gensim import requests # import json def run(today = ''): from reco_group import users_group if today == '': today = datetime.date(datetime.today() - timedelta(days = 1) + timedelta(hours = 2)) today = datetime.strftime(today, '%Y-%m-%d') d = "http://api:8000/articles/{}".format(today) req = requests.get(d) j_data = req.json() if j_data != []: title = [] text = [] url = [] for i in j_data: title.append(i['title']) text.append(i['text']) url.append(i['url']) df = pd.DataFrame({'text': text, 'title':title, 'url': url}) lem = WordNetLemmatizer() #simulates articles input #df = pd.read_csv('/home/alnaggar/PBL/data-1653249353296.csv') lda_model = gensim.models.LdaMulticore.load("./lda_model.model") def sent_to_words(sentences): for sentence in sentences: # deacc=True removes punctuations yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) data = df.text.values.tolist() data_words = list(sent_to_words(data)) # remove stop words # Create Dictionary id2word = corpora.Dictionary(data_words) # Create Corpus texts = data_words # Term Document Frequency corpus = [id2word.doc2bow(text) for text in texts] # View print(corpus[:1][0][:30]) #a function that creates group features def groups_processing(df): df = df.sort_values(by = 'group') feature = df['feature'].tolist() g_id = df['group'].tolist() temp = '' g_feature = [] for y in list(df['group'].unique()): for z in range(0, len(feature) - 1): if y == g_id[z]: temp = temp + feature[z] + ' ' g_feature.append(temp) temp = '' return g_feature g = groups_processing(users_group) data_g = list(sent_to_words(g)) # remove stop words # Create Dictionary id2word_g = corpora.Dictionary(data_g) # Create Corpus texts_g = data_g # Term Document Frequency corpus_g = [id2word_g.doc2bow(text) for text in texts_g] group_vecs = [] for x in corpus_g: group_vecs.append(lda_model.get_document_topics(x)) print(len(corpus_g)) def generate_recommendations(df, corpus, groups): scores = [] art = 0 for x in corpus: art_vec = lda_model.get_document_topics(x) for y in group_vecs: score = cossim(y, art_vec) scores.append(score) max_score = max(scores) g_ind = scores.index(max_score) #send article url with group id d_send = {"groupid":"", "url":""} d_send['groupid'] = g_ind d_send['url'] = df['url'].loc[art] #print(d_send) send = requests.post('http://api:8000/recommend/', json = d_send) scores = [] art += 1 return generate_recommendations(df, corpus, group_vecs)
[]
2024-01-10
MohamedAbdeen21/News-Recommender
models~recommender~LDA_Training.py
# from wordcloud import WordCloud # from matplotlib import pyplot as plt # from gensim.models import CoherenceModel from gensim.utils import simple_preprocess from gensim.test.utils import datapath from nltk.stem import WordNetLemmatizer # from pprint import pprint from nltk.corpus import stopwords # from datetime import date, timedelta import requests import gensim.corpora as corpora import pandas as pd import re import gensim import nltk # import os def run(): d = "http://api:8000/all/" req = requests.get(d) j_data = req.json() if j_data != []: title = [] text = [] for i in j_data: title.append(i['title']) text.append(i['text']) df = pd.DataFrame({'text': text, 'title':title}) #df = pd.read_csv('/home/alnaggar/PBL/data-1653249353296.csv') #df.columns = ['sk', 'url', 'title', 'text', 'tags', 'count', 'date', 'summary'] print(df) news = df.copy() news['feature'] = news['title'] + ' ' + news['text'] #Removing punctuation from the text column news['feature'] = news['feature'].map(lambda x: re.sub('[,\.!?]', '', x)) # Convert the titles to lowercase news['feature'] = news['feature'].map(lambda x: x.lower()) #news = news.drop(columns = ['title','url','tags','count', 'summary', 'date']) news = news.fillna('') #Displaying most repeated words in the text through wordclouds # Join the different processed documents together. long_string = ','.join(list(news['feature'].values)) # Create a WordCloud object # wordc = WordCloud(background_color="white", max_words=5000, contour_width=3, contour_color='steelblue') # Visualize and generate word cloud #plt.imshow(wordc.generate(long_string)) #plt.axis("off") #plt.show() #image = wordc.to_image() #image.show() lem = WordNetLemmatizer() def sent_to_words(sentences): for sentence in sentences: # deacc=True removes punctuations yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) data = news.feature.values.tolist() data_words = list(sent_to_words(data)) # remove stop words # Create Dictionary id2word = corpora.Dictionary(data_words) # Create Corpus texts = data_words # Term Document Frequency corpus = [id2word.doc2bow(text) for text in texts] # View #print(corpus[:1][0][:30]) # number of topics num_topics = 10 # Build LDA model lda_model = gensim.models.LdaMulticore(corpus=corpus, id2word=id2word, num_topics=num_topics) # Print the Keyword in the topics #pprint(lda_model.print_topics()) #print(lda_model.get_document_topics(corpus[0])) #doc_lda = lda_model[corpus] # Save model to disk. temp_file = datapath("./lda_model.model") lda_model.save(temp_file) #Uncomment this to see topics analysis """" #Visulaizing the keywords and probability distribution import pyLDAvis import pyLDAvis.gensim_models import pickle today = date.today() # # this is a bit time consuming - make the if statement True # # if you want to execute visualization prep yourself LDAvis_prepared = pyLDAvis.gensim_models.prepare(lda_model, corpus, id2word) LDAvis_data_filepath = os.path.join('/home/alnaggar/Documents/PBL Data/davis/'+str(num_topics) + today.strftime("%y-%m-%d")) # # this is a bit time consuming - make the if statement True # # if you want to execute visualization prep yourself if 1 == 1: LDAvis_prepared = pyLDAvis.gensim_models.prepare(lda_model, corpus, id2word) with open(LDAvis_data_filepath, 'wb') as f: pickle.dump(LDAvis_prepared, f) # load the pre-prepared pyLDAvis data from disk with open(LDAvis_data_filepath, 'rb') as f: LDAvis_prepared = pickle.load(f) pyLDAvis.save_html(LDAvis_prepared, '/media/alnaggar/F47C61617C611F9A/PBL Data/davis/'+ str(num_topics) + today.strftime("%y-%m-%d") +'.html') LDAvis_prepared # Compute Perplexity print('\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better. # Compute Coherence Score coherence_model_lda = CoherenceModel(model=lda_model, texts = data_words, dictionary=id2word, coherence='c_v') coherence_lda = coherence_model_lda.get_coherence() print('\nCoherence Score: ', coherence_lda) """
[]
2024-01-10
GeorgiStavrev/squad-service
src~services~run_service.py
import os from typing import List from types import ModuleType from inspect import getmembers, isfunction import openai from langchain import LLMMathChain, OpenAI, SerpAPIWrapper, SQLDatabase, SQLDatabaseChain from langchain.agents import initialize_agent, Tool from langchain.agents import AgentType from langchain.chat_models import ChatOpenAI from langchain.tools import StructuredTool openai.api_key = os.getenv("OPENAI_API_KEY") llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") def run(module: ModuleType, query: str) -> str: tools = [StructuredTool.from_function(fn) for _, fn in getmembers(module, isfunction) if hasattr(fn, "__isskill")] agent = initialize_agent( tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True) return agent.run(query)
[]
2024-01-10
Nearcyan/papers.day
scrape_abs.py
import argparse import shutil import tempfile import django import fitz from openai import OpenAI import random import requests import re from scholarly import scholarly # if this breaks, run pip install --upgrade httpx from scholarly import ProxyGenerator from datetime import datetime from bs4 import BeautifulSoup from django.core.files.base import ContentFile from django.conf import settings import tarfile import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'papers.settings') django.setup() from backend.models import ArxivPaper, Author, Subject, PaperImage, PaperSource def extract_tar_gz(file_path: str, output_dir: str) -> None: """ Extract a tar.gz file to the specified output directory :param file_path: The path to the tar.gz file :param output_dir: The directory to extract the tar.gz file to :return: None """ with tarfile.open(file_path, 'r:gz') as tar: tar.extractall(output_dir) def create_image_objects(directory: str, paper) -> list: """ Given a directory which contains images, this function will create PaperImage objects for each image :param directory: The directory containing the images :return: The list of PaperImage objects """ image_files = [os.path.join(root, f) for root, _, files in os.walk(directory) for f in files if f.lower().endswith(('.png', '.jpg', '.jpeg', '.gif'))] images = [] for image_file in image_files: image_path = os.path.join(directory, image_file) with open(image_path, 'rb') as file: filename = paper.arxiv_id + '_' + os.path.basename(image_path) django_file = ContentFile(file.read(), name=filename) image = PaperImage(image=django_file, paper=paper) image.save() images.append(image) return images def create_tex_files(directory: str, paper) -> list: """ Given a directory which contains tex files, this function will create PaperSource objects for each tex file :param directory: The directory containing the tex files :return: The list of PaperSource objects """ tex_files = [f for f in os.listdir(directory) if f.lower().endswith('.tex')] sources = [] for tex_file in tex_files: tex_path = os.path.join(directory, tex_file) with open(tex_path, 'r') as f: tex_content = f.read() source = PaperSource(content=tex_content, paper=paper) source.save() sources.append(source) return sources def delete_files(directory: str) -> None: """ Delete all files in a directory :param directory: The directory to delete the files from :return: None """ for root, dirs, files in os.walk(directory): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) def get_paper_screenshot_from_pdf(pdf_path) -> str: """ Get a screenshot of the first page of the pdf :param pdf_path: The path to the pdf :return: The path to the screenshot """ try: pdf = fitz.open(pdf_path) page = pdf.load_page(0) pix = page.get_pixmap(alpha=False) random_int = random.randint(0, 1000000) temp_filename = f'temp_{random_int}.png' pix.save(temp_filename, "png") return temp_filename except Exception as e: print(f'Error occurred while getting screenshot of pdf: {pdf_path}') return None def get_paper_summary_from_abstract(abstract: str) -> str: """ Get a summary of the paper from the abstract :param abstract: The abstract of the paper :return: The summary of the paper """ client = OpenAI() client.api_key = settings.OPENAI_API_KEY prompt = f"Summarize the following AI paper abstract in two sentences:\nAbstract: {abstract}\nSummary:" response = client.completions.create( model="text-davinci-003", prompt=prompt, temperature=0.9, max_tokens=512, top_p=1, frequency_penalty=0, presence_penalty=0.6, ) summary = response.choices[0].text return summary.strip() def scrape_paper(arxiv_id, google_scholar=False): """ Scrape the paper with the given arxiv_id and save it to the database :param arxiv_id: The arxiv_id of the paper :param google_scholar: True if google scholar lookups should be performed, else false :return: The saved ArxivPaper object """ # Send a GET request to the URL and retrieve the HTML content url = f'https://arxiv.org/abs/{arxiv_id}' if ArxivPaper.objects.filter(arxiv_id=arxiv_id).exists(): print(f'[{arxiv_id}] Paper with id {arxiv_id} already exists') return None else: print(f'[{arxiv_id}] Scraping paper: {url}') try: response = requests.get(url) html_content = response.content except Exception as e: print(f'[{arxiv_id}] Error occurred while scraping {url}') return None # Create a BeautifulSoup object to parse the HTML soup = BeautifulSoup(html_content, 'html.parser') # Get the title title_tag = soup.find('h1', class_='title') title = title_tag.get_text(strip=True) title = re.sub(r'Title:', '', title) print(f'[{arxiv_id}] Title: {title}') # Get the abstract abstract_tag = soup.find('blockquote', class_='abstract') abstract = abstract_tag.get_text(strip=True) # remove various things abstract = re.sub(r'Abstract:', '', abstract) abstract = re.sub(r'\n', ' ', abstract) abstract = re.sub(r' ', ' ', abstract) # Get the authors author_div = soup.find('div', class_='authors') author_tags = author_div.find_all('a') authors = [author.get_text(strip=True) for author in author_tags] # Get the primary subject primary_subject = soup.find('span', class_='primary-subject').get_text(strip=True) short_name = primary_subject.split('(')[1].replace(')', '').strip() full_name = primary_subject.split('(')[0].strip() print(f'[{arxiv_id}] Primary subject: {short_name} - {full_name}') prim_subject = Subject.objects.filter(short_name=short_name).first() if not prim_subject: prim_subject = Subject.objects.create(short_name=short_name, full_name=full_name) print(f'[{arxiv_id}] Creating subject: {short_name} - {full_name}') # get everything inside of 'subjects' that is not in a <span>: subject_div = soup.find('td', class_='subjects') subject_text = subject_div.get_text(strip=True) subject_text = re.sub(r'<span.*span>', '', subject_text) subject_list = subject_text.split(';') subject_list = [subject.strip() for subject in subject_list] subjects = [subject for subject in subject_list if subject] jref = soup.find('td', class_='tablecell jref') if jref: jref = jref.get_text(strip=True) jref = re.sub(r'Journal ref:', '', jref) jref = re.sub(r'\n', '', jref) jref = re.sub(r' ', '', jref) print(f'[{arxiv_id}] Journal ref: {jref}') else: jref = None comments = soup.find('td', class_='tablecell comments') if comments: comments = comments.get_text(strip=True) comments = re.sub(r'Comments:', '', comments) comments = re.sub(r'\n', '', comments) comments = re.sub(r' ', '', comments) print(f'[{arxiv_id}] Comments: {comments}') else: comments = None doi = soup.find('td', class_='tablecell arxivdoi') if doi: doi = doi.find('a') doi = doi.get_text(strip=True) doi = re.sub(r'DOI:', '', doi) doi = re.sub(r'\n', '', doi) doi = re.sub(r' ', '', doi) print(f'[{arxiv_id}] DOI: {doi}') else: doi = None # Get the date date_tag = soup.find('div', class_='dateline') date_string = date_tag.get_text(strip=True) date_string = re.sub(r' \(v.*\)', '', date_string) date_match = re.search(r'\[Submitted on (.+)\]', date_string) if date_match: date_string = date_match.group(1) date = datetime.strptime(date_string, '%d %b %Y').date() else: date = None # Download the pdf pdf_url = f'https://arxiv.org/pdf/{arxiv_id}.pdf' try: pdf_response = requests.get(pdf_url) if pdf_response.status_code != 200: print(f'[{arxiv_id}] Error occurred while downloading pdf from {pdf_url}') return None except Exception as e: print(f'[{arxiv_id}] Error occurred while downloading pdf from {pdf_url}: {e}') return None pdf_content = pdf_response.content pdf_file = ContentFile(pdf_content, name=f'{arxiv_id}.pdf') # Download the source source_url = f'https://arxiv.org/e-print/{arxiv_id}' try: source_response = requests.get(source_url) print(f'[{arxiv_id}] Downloading source from {source_url}') if source_response.status_code != 200: print(f'[{arxiv_id}] Error occurred while downloading source from {source_url}') return None except Exception as e: print(f'[{arxiv_id}] Error occurred while downloading source from {source_url}: {e}') return None source_content = source_response.content source_tar = ContentFile(source_content, name=f'{arxiv_id}.tar.gz') paper = ArxivPaper.objects.create(title=title, abstract=abstract, publication_date=date, arxiv_id=arxiv_id, doi=doi, pdf=pdf_file, primary_subject=prim_subject, journal_ref=jref, comment=comments, source_tar=source_tar) # extract the source: temp_dir = tempfile.mkdtemp() try: extract_tar_gz(paper.source_tar.path, temp_dir) # grab all images from the source: images = create_image_objects(temp_dir, paper) for image in images: paper.images.add(image) print(f'[{arxiv_id}] Added {len(images)} images') sources = create_tex_files(temp_dir, paper) for source in sources: paper.sources.add(source) print(f'[{arxiv_id}] Added {len(sources)} sources') except Exception as e: print(f'[{arxiv_id}] Error occurred while extracting source: {e}') # not a fatal exception, some papers do not provide tar.gz files and the source can just be e.g. a pdf finally: delete_files(temp_dir) # Get a screenshot screenshot_path = get_paper_screenshot_from_pdf(paper.pdf.path) if screenshot_path: screenshot = ContentFile(open(screenshot_path, 'rb').read(), name=f'{arxiv_id}.png') paper.screenshot = screenshot os.remove(screenshot_path) # get a summary try: summary = get_paper_summary_from_abstract(paper.abstract) paper.summary = summary paper.save() except Exception as e: print(f"Exception while generating completion: {e}") paper.delete() return None # get number of citations if google_scholar: try: search_query = scholarly.search_pubs(f'"{paper.title}"', patents=False, citations=False) first_paper_result = next(search_query) citations = first_paper_result['num_citations'] paper.citations = citations paper.save() print(f'[{arxiv_id}] Citations: {citations}') if citations > 1000: interesting_paper = True print(f'[{arxiv_id}] Interesting paper: {citations} citations') except Exception as e: print(f'[{arxiv_id}] Could not find paper on Google Scholar') total_author_citations = 0 for author_name in authors: # get author if exists: author = Author.objects.filter(name=author_name).first() if not author and google_scholar: search_query = scholarly.search_author(author_name) try: first_author_result = next(search_query) affiliation = first_author_result['affiliation'] email_domain = first_author_result['email_domain'].replace('@', '') scolar_id = first_author_result['scholar_id'] citations = first_author_result['citedby'] author = Author.objects.create(name=author_name, affiliation=affiliation, email_domain=email_domain, scholar_id=scolar_id, citations=citations) print(f'[{arxiv_id}] Author created: {author} [affiliation: {affiliation}, email_domain: {email_domain}, citations: {citations}]') except StopIteration: author = Author.objects.create(name=author_name) print(f'[{arxiv_id}] Author created: {author}, could not find more information') except KeyError: author = Author.objects.create(name=author_name) print(f'[{arxiv_id}] Author created: {author}, key error') except Exception as e: author = Author.objects.create(name=author_name) print(f'[{arxiv_id}] [Google Scholar Lookup Failed] Author created: {author}') elif not author: author = Author.objects.create(name=author_name) print(f'[{arxiv_id}] Author created: {author}, no GS lookup') total_author_citations += author.citations paper.authors.add(author) paper.total_author_citations = total_author_citations if total_author_citations > 100000: print(f'[{arxiv_id}] Interesting paper: {total_author_citations} total author citations') for subject_name in subjects: short_name = subject_name.split('(')[1].replace(')', '').strip() full_name = subject_name.split('(')[0].strip() print(f'[{arxiv_id}] Subject: {short_name} - {full_name}') subject = Subject.objects.filter(short_name=short_name).first() if not subject: subject = Subject.objects.create(short_name=short_name, full_name=full_name) print(f'[{arxiv_id}] Creating subject: {short_name} - {full_name}') paper.subjects.add(subject) paper.save() print(f'[{arxiv_id}] Paper saved: {paper}') print(f'[{arxiv_id}] [INTERESTING] Paper was interesting!: {paper}') return paper def scrape_papers_from_list(section, num_papers, page, google_scholar=False): """ Given a list url such as https://arxiv.org/list/cs.LG/pastweek?show=557, we get all paper IDs on the results page and then scrape each paper into our DB :param section: the section of the paper, e.g. cs.LG :param num_papers: the number of papers to scrape :param page: the page to get papers to scrape from :param google_scholar: whether to scrape google scholar for citations :return: None """ # Send a GET request to the webpage list_url = f'https://arxiv.org/list/{section}/{page}?show={num_papers}' response = requests.get(list_url) # Create a BeautifulSoup object to parse the HTML content soup = BeautifulSoup(response.content, 'html.parser') # Find all span tags with class "list-identifier" span_tags = soup.find_all('span', class_='list-identifier') # Extract the paper IDs from the anchor tags paper_ids = [] for span_tag in span_tags: # Find the 'a' element within the span tag a_tag = span_tag.find('a') if a_tag and '/abs/' in a_tag['href']: # Extract the text from the 'a' element paper_id = a_tag.text.strip() paper_id = paper_id.replace('arXiv:', '') paper_ids.append(paper_id) # Print the extracted paper IDs for paper_id in paper_ids: print(f'Found paper ID: {paper_id}') scrape_paper(paper_id, google_scholar) def parse_arguments(): parser = argparse.ArgumentParser(description='Process paper details') parser.add_argument('-n', '--num_papers', type=int, default=500, help='Number of papers to scrape') parser.add_argument('-s', '--section', type=str, default='cs.LG', help='Section of arxiv to scrape from') parser.add_argument('-p', '--page', type=str, default='pastweek', help='Page from arxiv to scrape from') parser.add_argument('-gs', '--google_scholar', type=bool, default=False, help='Enable/Disable google scholar lookups') args = parser.parse_args() return args if __name__ == '__main__': args = parse_arguments() if args.google_scholar: print(f'Using google scholar') pg = ProxyGenerator() pg.FreeProxies() scholarly.use_proxy(pg) else: print(f'Not using google scholar') scrape_papers_from_list(args.section, args.num_papers, args.page, args.google_scholar)
[ "Summarize the following AI paper abstract in two sentences:\nAbstract: PLACEHOLDER\nSummary:" ]
2024-01-10
nashid/cure-public
src~trainer~gpt_conut_trainer.py
import os import sys import json import time import codecs import random import numpy as np import torch import torch.nn as nn from transformers import OpenAIGPTLMHeadModel, OpenAIGPTConfig GPT_CONUT_TRAINER_DIR = os.path.abspath(__file__)[: os.path.abspath(__file__).rindex('/') + 1] sys.path.append(GPT_CONUT_TRAINER_DIR + '../models/') sys.path.append(GPT_CONUT_TRAINER_DIR + '../dataloader/') from gpt_conut import GPTCoNuTModel from dictionary import Dictionary from gpt_conut_data_loader import GPTCoNuTDataLoader class GPTCoNuTTrainer(): def __init__(self, train_loader, valid_loader, dictionary, gpt_file): # gpt_loaded = torch.load(gpt_file) # config = gpt_loaded['config'] # gpt_model.load_state_dict(gpt_loaded['model']) configuration = OpenAIGPTConfig() gpt_model = OpenAIGPTLMHeadModel(configuration).cuda() if torch.cuda.is_available() else OpenAIGPTLMHeadModel(configuration) self.train_loader = train_loader self.valid_loader = valid_loader self.dictionary = dictionary self.batch_size = 12 self.load_size = 1200 # load 1200 samples from training data every time self.gpt_model = gpt_model self.model = None self.hyper_parameter = {} self.optimizer = None self.current_train_step = 0 self.val_loss = {} def shuffle_dataset(self): indices = [i for i in range(len(self.train_loader.dataset))] random.shuffle(indices) return indices def train_step(self, samples): self.model.train() self.current_train_step += 1 self.optimizer.zero_grad() batch = self.train_loader.dataset.collater(samples) if torch.cuda.is_available(): outputs = self.model( batch['net_input']['src_tokens'].cuda(), batch['net_input']['src_with_prev_context'].cuda(), batch['net_input']['ctx_tokens'].cuda(), prev_tokens_index=batch['target_index'].cuda(), prev_tokens_with_context=batch['target_with_prev_context'].cuda(), labels=batch['target'].cuda(), ) else: outputs = self.model( batch['net_input']['src_tokens'], batch['net_input']['src_with_prev_context'], batch['net_input']['ctx_tokens'], prev_tokens_index=batch['target_index'], prev_tokens_with_context=batch['target_with_prev_context'], labels=batch['target'], ) logits, avg_attn_scores, apr_loss, lm_loss = outputs[:4] loss = apr_loss + 0.3 * lm_loss loss.mean().backward() nn.utils.clip_grad_norm_(self.model.parameters(), 0.5, norm_type=2) self.optimizer.step() return loss.mean().item(), apr_loss.mean().item(), lm_loss.mean().item() def valid_step(self, samples): self.model.eval() batch = self.valid_loader.dataset.collater(samples) outputs = self.model( batch['net_input']['src_tokens'].cuda(), batch['net_input']['src_with_prev_context'].cuda(), batch['net_input']['ctx_tokens'].cuda(), prev_tokens_index=batch['target_index'].cuda(), prev_tokens_with_context=batch['target_with_prev_context'].cuda(), labels=batch['target'].cuda(), ) logits, avg_attn_scores, apr_loss, lm_loss = outputs[:4] loss = apr_loss + 0.3 * lm_loss return loss.mean().item(), apr_loss.mean().item(), lm_loss.mean().item(), logits def validate_and_save(self, model_id, save_dir): oom = 0 with torch.no_grad(): val_loss, val_fconv_loss, val_lm_loss = [], [], [] for i in range(0, self.valid_loader.total_size, self.batch_size): samples = [self.valid_loader.dataset[j] for j in range(i, min(len(self.valid_loader.dataset), i + self.batch_size))] try: loss, fconv_loss, lm_loss, logits = self.valid_step(samples) val_loss.append(float(loss)) val_fconv_loss.append(float(fconv_loss)) val_lm_loss.append(float(lm_loss)) except Exception as e: oom += 1 info = 'val loss:{}, val apr_loss:{}, val lm_loss:{}, val ppl:{}, oom:{}'.format( round(float(np.mean(val_loss)), 6), round(float(np.mean(val_fconv_loss)), 6), round(float(np.mean(val_lm_loss)), 6), round(float(np.exp(np.mean(val_loss))), 6), oom ) print(info) val_loss = np.mean(val_fconv_loss) checkpoint = { 'model': self.model.module.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_step': self.current_train_step, 'config': self.model.module.config(), 'val_loss': val_loss, } torch.save(checkpoint, save_dir + 'gpt_conut_' + str(model_id) + '.pt') self.val_loss[model_id] = { 'val_loss': val_loss, 'hyper-parameter': str(self.hyper_parameter), } return val_loss def train(self, model_id, epochs, hyper_parameter, save_dir): self.hyper_parameter = hyper_parameter if torch.cuda.is_available(): self.model = GPTCoNuTModel( self.dictionary, embed_dim=384, max_positions=1024, src_encoder_convolutions=self.hyper_parameter['src_encoder_convolutions'], ctx_encoder_convolutions=self.hyper_parameter['ctx_encoder_convolutions'], decoder_convolutions=self.hyper_parameter['decoder_convolutions'], dropout=self.hyper_parameter['dropout'], embed_model=self.gpt_model, ).cuda() else: self.model = GPTCoNuTModel( self.dictionary, embed_dim=384, max_positions=1024, src_encoder_convolutions=self.hyper_parameter['src_encoder_convolutions'], ctx_encoder_convolutions=self.hyper_parameter['ctx_encoder_convolutions'], decoder_convolutions=self.hyper_parameter['decoder_convolutions'], dropout=self.hyper_parameter['dropout'], embed_model=self.gpt_model, ) self.optimizer = torch.optim.Adam(self.model.parameters(), lr=6.25e-5) self.model = nn.DataParallel(self.model, device_ids=device_ids) self.valid_loader.load_data(0, self.valid_loader.total_size) for epoch in range(epochs): start_time = time.time() for i in range(0, self.train_loader.total_size, self.load_size): oom = 0 self.train_loader.load_data(i, i + self.load_size) indices = self.shuffle_dataset() train_loss, train_apr_loss, train_lm_loss = [], [], [] start, end = 0, 0 samples = [] max_src, max_ctx, max_tgt = 0, 0, 0 while end < len(self.train_loader.dataset): sample = self.train_loader.dataset[indices[end]] if max_ctx + len(sample['target']) >= 1023 \ or max_tgt + len(sample['prev_context']) >= 1023 \ or max_ctx + len(sample['source']) >= 1023 \ or max_src + len(sample['prev_context']) >= 1023 \ or end - start == self.batch_size: try: loss, apr_loss, lm_loss = self.train_step(samples) train_loss.append(loss) train_apr_loss.append(apr_loss) train_lm_loss.append(lm_loss) except Exception as e: oom += 1 start = end max_src, max_ctx, max_tgt = 0, 0, 0 samples = [] continue max_src = max(max_src, len(sample['source'])) max_ctx = max(max_ctx, len(sample['prev_context'])) max_tgt = max(max_tgt, len(sample['target'])) end += 1 samples.append(sample) if len(samples) > 0: try: loss, apr_loss, lm_loss = self.train_step(samples) train_loss.append(loss) train_apr_loss.append(apr_loss) train_lm_loss.append(lm_loss) except Exception as e: oom += 1 if (i // self.load_size) % 10 == 0: info = 'epoch:{}, load data:{}, lr:{}, loss:{}, apr_loss:{}, lm_loss:{}, time:{}s, oom:{}'.\ format(epoch + 1, i + self.load_size, round(self.optimizer.param_groups[0]['lr'], 10), round(float(np.mean(train_loss)), 6), round(float(np.mean(train_apr_loss)), 6), round(float(np.mean(train_lm_loss)), 6), int(time.time() - start_time), oom ) start_time = time.time() print(str(model_id) + ' ' + info) if (i // self.load_size) % 100 == 0: self.validate_and_save(model_id, save_dir) self.validate_and_save(model_id, save_dir) if __name__ == '__main__': device_ids = [0, 1, 2, 3] os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3" vocab_file = GPT_CONUT_TRAINER_DIR + '../../data/data/js/vocabulary.txt' train_file = GPT_CONUT_TRAINER_DIR + '../../data/data/js/training_bpe.txt' valid_file = GPT_CONUT_TRAINER_DIR + '../../data/data/js/validation_bpe.txt' gpt_file = GPT_CONUT_TRAINER_DIR + '../../data/models/code_gpt.pt' dictionary = Dictionary(vocab_file, min_cnt=0) print('dictionary initialized, vocab size:{}'.format(len(dictionary))) train_loader = GPTCoNuTDataLoader(train_file, dictionary) valid_loader = GPTCoNuTDataLoader(valid_file, dictionary) print('data loader initialized, train size:{}, validate size:{}'. format(train_loader.total_size, valid_loader.total_size)) trainer = GPTCoNuTTrainer(train_loader, valid_loader, dictionary, gpt_file) hyper_parameter = { 'src_encoder_convolutions': ((192, 5),) * 1, 'ctx_encoder_convolutions': ((384, 5),) * 1, 'decoder_convolutions': ((192, 5),) * 1, 'dropout': 0.1, } model_id = 1 epochs = 5 trainer.train(model_id, epochs, hyper_parameter, save_dir=os.path.abspath(os.path.join(GPT_CONUT_TRAINER_DIR, '..', '..', 'data/models/')))
[]
2024-01-10
Bobliuuu/MelodicMind
backend~pages~Talk%20About%20It.py
# Code refactored from Vercel Chatbot tutorial import openai import streamlit as st with st.sidebar: openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password") st.title("Mental Health Chatbot") if "messages" not in st.session_state: st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}] for msg in st.session_state.messages: st.chat_message(msg["role"]).write(msg["content"]) if prompt := st.chat_input(): if not openai_api_key: st.info("Please add your OpenAI API key to continue.") st.stop() openai.api_key = openai_api_key st.session_state.messages.append({"role": "user", "content": prompt}) st.chat_message("user").write(prompt) response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=st.session_state.messages) msg = response.choices[0].message st.session_state.messages.append(msg) st.chat_message("assistant").write(msg.content)
[ "How can I help you?" ]
2024-01-10
Bobliuuu/MelodicMind
backend~pages~Add%20Memory.py
import streamlit as st import requests import openai import json import w3storage st.title('MelodicMind - Add Memory') st.write('Think of a memory you want to remember and fill in the details below.') # Ask the user for memory details memory_description = st.text_area("Memory Description", "Enter your memory description here...") memory_image = st.file_uploader("Upload an image for the memory (optional)", type=["jpg", "jpeg", "png"]) generate_image = st.checkbox('Generate Image') def make_stable_diffusion_request(description): url = f"http://a1ac-35-185-225-62.ngrok-free.app/predict?prompt={description}" response = requests.get(url) print(json.loads(response.text)['url']) return json.loads(response.text)['url'] CHAIN = option = st.selectbox('Chain to mint on', ('goerli', 'bsc-testnet', 'fuji', 'mumbai', 'arbitrum-goerli', 'optimism-goerli', 'avalanche', 'ethereum', 'polygon', 'optimism', 'fantom')) ADDRESS = st.text_input('Address: ') if st.button("Add Memory"): if not memory_description.strip(): st.warning("Please enter a memory description.") else: st.info("Adding memory...") if not memory_image and generate_image: stable_diffusion_image_url = make_stable_diffusion_request(memory_description) if stable_diffusion_image_url: st.image(stable_diffusion_image_url, caption='Stable Diffusion Result', use_column_width=True) else: st.warning("No image generated by Stable Diffusion.") st.success("Memory added successfully!") w3 = w3storage.API(token='eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJkaWQ6ZXRocjoweGNCOTU4NjIxRjVmNjcxNkM4QTlhRDZkRWQ4MUVDRTQzZGQwQzFjOGUiLCJpc3MiOiJ3ZWIzLXN0b3JhZ2UiLCJpYXQiOjE2OTEzMDM2ODMwNjEsIm5hbWUiOiJjb2xhYiJ9.RIio_qk4aEyLJaBUjmemiPm0zTFJ4K_kHN2AUhex9Jw') if not generate_image: some_uploads = w3.user_uploads(size=25) cid = w3.post_upload(('images/mountains.jpg', open('images/mountains.jpg', 'rb'))) else: some_uploads = w3.user_uploads(size=25) cid = w3.post_upload(('images/generated.png', open('images/generated.png', 'rb'))) print(cid + '.ipfs.w3s.link') URL = f"https://ipfs.io/{cid}" if CHAIN and ADDRESS: url = "https://api.verbwire.com/v1/nft/mint/quickMintFromMetadataUrl" headers = { "X-API-Key": 'sk_live_0fde2ba6-36f7-49d5-a8bd-cd8ca6cea988', "accept": "application/json", } data = { "allowPlatformToOperateToken": "true", "chain": CHAIN, "metadataUrl": URL, "recipientAddress": ADDRESS, } response = requests.post(url, headers=headers, data=data) if response.status_code == 200: print("NFT mint successful!") st.success("View transaction here: " + json.loads(response.text)['quick_mint']['blockExplorer']) else: print("NFT minting failed!")
[]
2024-01-10
akashlinux10may/AkashlinuxGPT
run_localGPT.py
from langchain.chains import RetrievalQA # from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.vectorstores import Chroma from langchain.embeddings import HuggingFaceInstructEmbeddings from langchain.llms import HuggingFacePipeline from constants import CHROMA_SETTINGS, PERSIST_DIRECTORY from transformers import LlamaTokenizer, LlamaForCausalLM, pipeline from langchain.chains.question_answering import load_qa_chain from langchain import HuggingFaceHub import click from constants import CHROMA_SETTINGS import os os.environ["HUGGINGFACEHUB_API_TOKEN"] = "Your_HF_KEY" from langchain.embeddings import HuggingFaceEmbeddings embeddings = HuggingFaceEmbeddings() @click.command() @click.option('--device_type', default='gpu', help='device to run on, select gpu or cpu') def main(device_type, ): # load the instructorEmbeddings if device_type in ['cpu', 'CPU']: device='cpu' else: device='cuda' print(f"Running on: {device}") embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl", model_kwargs={"device": device}) # load the vectorstore db = Chroma(persist_directory=PERSIST_DIRECTORY, embedding_function=embeddings, client_settings=CHROMA_SETTINGS) retriever = db.as_retriever() # Prepare the LLM # callbacks = [StreamingStdOutCallbackHandler()] # load the Google Flan XXL LLM for generating Natural Language responses. llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1, "max_length":512}) qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True) # Interactive questions and answers while True: query = input("\nEnter a query: ") if query == "exit": break # Get the answer from the chain res = qa(query) answer, docs = res['result'], res['source_documents'] # Print the result print("\n\n> Question:") print(query) print("\n> Answer:") print(answer) if __name__ == "__main__": main()
[]
2024-01-10
everydaycodings/SecondBrain
secondbrain~helpers~add_knowledge.py
from langchain.document_loaders import TextLoader from langchain.document_loaders import DirectoryLoader from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter from langchain.document_loaders import PyPDFLoader from InstructorEmbedding import INSTRUCTOR from langchain.embeddings import HuggingFaceInstructEmbeddings from langchain.vectorstores import Chroma import streamlit as st import tempfile, os, glob from helpers.utils import load_embedding_model from langchain.document_loaders import WikipediaLoader, UnstructuredURLLoader class AddKnowledge: def __init__(self) -> None: pass def extract_pdf_content(self, files, chunk_size, chunk_overlap): with tempfile.TemporaryDirectory() as temp_dir: for uploaded_file in files: asset_path = os.path.join(temp_dir, str(uploaded_file.name).split(".")[0] + ".pdf") with open(asset_path, "wb") as f: f.write(uploaded_file.getbuffer()) loader = DirectoryLoader(temp_dir, glob="./*.pdf", loader_cls=PyPDFLoader) documents = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) texts = text_splitter.split_documents(documents) return texts def extract_wikepedia_content(self, prompt, max_docs, chunk_size, chunk_overlap): loader = WikipediaLoader(query=prompt, load_max_docs=max_docs) documents = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) texts = text_splitter.split_documents(documents) return texts def extract_url_content(self, url_text, chunk_size, chunk_overlap): loader = UnstructuredURLLoader(urls=[url_text]) documents = loader.load() text_splitter = CharacterTextSplitter(separator="\n", chunk_size=chunk_size, chunk_overlap=chunk_overlap, length_function=len) texts = text_splitter.split_documents(documents) return texts def dump_embedding_files(self, texts, model_name, device_type, persist_directory): embedding = load_embedding_model(model_name, device_type) vectordb = Chroma.from_documents(documents=texts, embedding=embedding, persist_directory="SecondBrain/secondbrain/database/{}".format(persist_directory)) vectordb.persist() vectordb = None st.success("Knowledge Added To DataBase")
[]
2024-01-10
everydaycodings/SecondBrain
secondbrain~helpers~wandering_brain.py
from langchain import PromptTemplate, LLMChain from langchain.llms import GPT4All from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.memory import ConversationBufferWindowMemory from langchain.memory import ConversationBufferMemory from langchain.chains import ConversationChain from langchain.llms import LlamaCpp from langchain.callbacks.manager import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.utilities import SerpAPIWrapper from langchain.tools import DuckDuckGoSearchRun from langchain.agents import initialize_agent from langchain.agents import Tool from langchain.agents import AgentType import streamlit as st @st.cache_resource def load__internet_model(model_architecture, model_name, model_path, max_token, temp, top_p, top_k): local_path = '{}/{}'.format(model_path, model_name) # replace with your desired local file path callbacks = CallbackManager([StreamingStdOutCallbackHandler()]) if model_architecture == "GPT4ALL": model = GPT4All(model=local_path, verbose=True, callbacks=callbacks, n_predict=max_token, temp=temp, top_p=top_p, top_k=top_k) if model_architecture == "Llama-cpp": model = LlamaCpp(model_path=local_path, verbose=True, callbacks=callbacks, max_tokens=max_token,temperature=temp,top_p=top_p,top_k=top_k) search = DuckDuckGoSearchRun() tools = [ Tool( name = "Search", func=search.run, description="useful for when you need to answer questions about current events or the current state of the world" ), ] template = """The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response. Current conversation: {history} Human: {input} AI Assistant:""" PROMPT = PromptTemplate( input_variables=["history", "input"], template=template ) memory = ConversationBufferWindowMemory(k=2, memory_key="chat_history") agent_chain = initialize_agent(tools=tools, llm=model, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory) return agent_chain @st.cache_resource def load_model(model_architecture, model_name, model_path, max_token, temp, top_p, top_k): local_path = '{}/{}'.format(model_path, model_name) # replace with your desired local file path callbacks = CallbackManager([StreamingStdOutCallbackHandler()]) if model_architecture == "GPT4ALL": model = GPT4All(model=local_path, verbose=False, n_predict=max_token, temp=temp, top_p=top_p, top_k=top_k) if model_architecture == "Llama-cpp": model = LlamaCpp(model_path=local_path, callback_manager=callbacks, verbose=True, max_tokens=max_token,temperature=temp,top_p=top_p,top_k=top_k) template = """ The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response. Current conversation: {history} Human: {input} AI Assistant:""" PROMPT = PromptTemplate( input_variables=["history", "input"], template=template ) conversation = ConversationChain( llm=model, verbose=True, memory=ConversationBufferWindowMemory(k=10), callbacks=callbacks, prompt=PROMPT ) return conversation class WanderingBrain: def __init__(self) -> None: pass def run_model(self, model_architecture, model_name, prompt, model_path, max_token, temp, top_p, top_k,is_internet=False): if model_architecture == "GPT4ALL": try: agent_chain = load_model(model_architecture=model_architecture, model_name=model_name, model_path=model_path[0], max_token=max_token, temp=temp, top_p=top_p, top_k=top_k) except: agent_chain = load_model(model_architecture=model_architecture, model_name=model_name, model_path=model_path[1], max_token=max_token, temp=temp, top_p=top_p, top_k=top_k) if model_architecture == "Llama-cpp": try: agent_chain = load_model(model_architecture=model_architecture, model_name=model_name, model_path=model_path[0], max_token=max_token, temp=temp, top_p=top_p, top_k=top_k) except: agent_chain = load_model(model_architecture=model_architecture, model_name=model_name, model_path=model_path[1], max_token=max_token, temp=temp, top_p=top_p, top_k=top_k) if is_internet == False: return agent_chain.predict(input=prompt) else: return agent_chain.predict(input=prompt)
[ "The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:", "\n The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.\n \n Current conversation:\n {history}\n Human: {input}\n AI Assistant:", "input" ]
2024-01-10
everydaycodings/SecondBrain
secondbrain~helpers~chat_with_brain.py
from helpers.utils import load_embedding_model from langchain.vectorstores import Chroma from langchain import PromptTemplate, LLMChain from langchain.llms import GPT4All from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.memory import ConversationBufferWindowMemory from langchain.memory import ConversationBufferMemory from langchain.chains import ConversationChain from langchain.llms import LlamaCpp from langchain.callbacks.manager import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.utilities import SerpAPIWrapper from langchain.tools import DuckDuckGoSearchRun from langchain.agents import initialize_agent from langchain.agents import Tool from langchain.agents import AgentType from langchain.chains import RetrievalQA import streamlit as st st.cache_resource def load_db(model_name, device, persist_directory, search_kwargs): embedding = load_embedding_model(model_name, device) vectordb = Chroma(persist_directory="SecondBrain/secondbrain/database/{}".format(persist_directory), embedding_function=embedding) return vectordb @st.cache_resource def load_model(db_model, device, persist_directory, search_kwargs, model_architecture, model_name, model_path, max_token, temp, top_p, top_k): local_path = '{}/{}'.format(model_path, model_name) # replace with your desired local file path callbacks = CallbackManager([StreamingStdOutCallbackHandler()]) db = load_db(model_name=db_model, device=device, persist_directory=persist_directory, search_kwargs=search_kwargs) if model_architecture == "GPT4ALL": model = GPT4All(model=local_path, callbacks=callbacks, verbose=True, n_predict=max_token, temp=temp, top_p=top_p, top_k=top_k) if model_architecture == "Llama-cpp": model = LlamaCpp(model_path=local_path, callback_manager=callbacks, verbose=True, max_tokens=max_token,temperature=temp,top_p=top_p,top_k=top_k) qa = RetrievalQA.from_chain_type( llm=model, chain_type="stuff", retriever=db.as_retriever(search_kwargs={"k": 3}), return_source_documents=True, verbose=False, ) return qa def run_model(db_model, device, persist_directory, search_kwargs, model_architecture, model_name, model_path, max_token, temp, top_p, top_k, prompt): try: qa = load_model(db_model, device, persist_directory, search_kwargs, model_architecture, model_name, model_path[0], max_token, temp, top_p, top_k) except: qa = load_model(db_model, device, persist_directory, search_kwargs, model_architecture, model_name, model_path[1], max_token, temp, top_p, top_k) res = qa(prompt) return res["result"]
[]
2024-01-10
everydaycodings/SecondBrain
secondbrain~helpers~source_embedding.py
from langchain.vectorstores import Chroma from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.llms import OpenAI from langchain.chains import RetrievalQA from langchain.document_loaders import PyPDFLoader from langchain.document_loaders import DirectoryLoader from langchain.embeddings import HuggingFaceInstructEmbeddings from langchain.vectorstores import FAISS from helpers.utils import load_embedding_model import streamlit as st import tempfile, os class ChatSourceEmbedding: def __init__(self) -> None: pass def embedding_chat(self, model_name, device, persist_directory, prompt, search_kwargs): embedding = load_embedding_model(model_name, device) vectordb = Chroma(persist_directory="SecondBrain/secondbrain/database/{}".format(persist_directory), embedding_function=embedding) retriever = vectordb.as_retriever(search_kwargs={"k": search_kwargs}) docs = retriever.get_relevant_documents(prompt) return docs
[]
2024-01-10
zapccu/Home-AI
home-ai.py
# ############################################################################ # # home-ai.py # # A ChatGPT based home assistant # # (c) 2023 by Dirk Braner (zapccu) - [email protected] # # ############################################################################ import configparser as cp import argparse import speech_recognition as sr import boto3 import pyaudio import pygame import sys import os import openai import wave from contextlib import closing from botocore.exceptions import BotoCoreError, ClientError VERSION = "1.0.0" # Configuration CONFIG = cp.ConfigParser() # Set default parameters CONFIG['common'] = { 'activationWord': 'computer', 'duration': 3, 'energyThreshold': -1, 'sampleRate': 44100, 'audiofiles': os.path.dirname(os.path.realpath(__file__)) + "/audio" } CONFIG['commands'] = { 'stop': 'stop', 'mute': 'mute', 'unmute': 'unmute', 'terminate': 'terminate' } CONFIG['Google'] = { 'language': 'en-GB' } CONFIG['AWS'] = { 'awsKeyId': 'none', 'awsKeySecret': 'none', 'region': 'eu-west-2', 'pollyVoiceId': 'Brian', 'language': 'en-GB' } CONFIG['OpenAI'] = { 'openAIKey': 'none', 'openAILanguage': 'en', 'openAIModel': 'gpt-3.5-turbo' } CONFIG['messages'] = { 'welcome': 'Hello, I am your personal artificial intelligence. Please say the activation word {activationWword}, if you like to ask me anything.', 'didNotUnderstand': 'Sorry, I did not understand this', 'shutdown': 'Shutting down', 'genericError': 'Something went wrong', 'muted': 'I am currently inactive. Activate me to ask questions' } # Audio recording parameters READ_CHUNK = 4096 # Chunk size for output of audio data >4K CHANNELS = 1 # Mono BYTES_PER_SAMPLE = 2 # Bytes per sample # Log details level LOG_LEVEL = 0 # 0 = Print errors only, 1 = Print some more information, 2 = Print debug information, 3 = Print detected words # Mute OpenAI. 1 = Do not listen for OpenAI queries SOFT_MUTE = 0 # ############################################################################ # Write logMessage messages # ############################################################################ def logMessage(level, message): if level <= LOG_LEVEL: print(message) # ############################################################################ # Error output via audio # ############################################################################ def errorOut(errorCode): if errorCode is None: return False if errorCode in CONFIG['messages']: logMessage(1, CONFIG['messages'][errorCode]) playAudioMessage(errorCode) return True else: return False # ############################################################################ # Play preconfigured audio message # ############################################################################ def playAudioMessage(messageKey): if messageKey in CONFIG['messages']: textToSpeech(CONFIG['messages'][messageKey], messageKey) else: logMessage(1, f"Message key {messageKey} not found in configuration") # ############################################################################ # Read configuration from file # ############################################################################ def readConfig(configFile): global CONFIG try: if not os.path.isfile(configFile): raise FileNotFoundError(f"Config file {configFile} not found.") logMessage(1, f"Reading config file {configFile} ...") CONFIG.read(configFile) # HomeAI won't work without API credentials if CONFIG['OpenAI']['openAIKey'] == 'none': raise ValueError("Open AI API key not configured") if CONFIG['AWS']['awsKeyId'] == 'none': raise ValueError("AWS key id not configured") if CONFIG['AWS']['awsKeySecret'] == 'none': raise ValueError("AWS key not configured") openai.api_key = CONFIG['OpenAI']['openAIKey'] CONFIG['messages']['welcome'] = CONFIG['messages']['welcome'].format(activationWord=CONFIG['common']['activationWord']) logMessage(2, "Control commands:") for controlWord,commandList in CONFIG.items('commands'): logMessage(2, f"{controlWord}: {commandList}") return True except ValueError as err: logMessage(0, err) except FileNotFoundError as err: logMessage(0, err) return False # ############################################################################ # Save audio to file # # audio: audio data # name: absolute name of audio file # ############################################################################ def saveRecordedAudio(audio, name): audioData = audio.get_raw_data() with wave.open(name, "wb") as wavFile: wavFile.setnchannels(CHANNELS) # Mono wavFile.setsampwidth(BYTES_PER_SAMPLE) # 2 bytes per sample wavFile.setframerate(audio.sample_rate) # Use original sample rate wavFile.writeframes(audioData) wavFile.close() # ############################################################################ # Listen for activation word # Will return activation word or detected command. # ############################################################################ def listenForActivationWord(recognizer, microphone): activationWord = CONFIG['common']['activationWord'].lower() listenTime = CONFIG['common']['duration'] recFile = CONFIG['common']['audiofiles'] + "/commandrec.wav" # Listen try: with microphone as source: logMessage(2, f"Listening for {listenTime} seconds for activation word {activationWord} ...") audio = recognizer.listen(source, timeout=float(listenTime)) #audio = recognizer.record(source, duration=float(listenTime)) if LOG_LEVEL == 3: saveRecordedAudio(audio, recFile) # Speech recognition result = recognizer.recognize_google(audio, language=CONFIG['Google']['language']) logMessage(3, "Understood: " + result) words = result.lower().split() logMessage(3, words) # Search for activation word. Will raise a ValueError exception if activation word is not found idxActivationWord = words.index(activationWord) logMessage(3, "Understood activation word " + activationWord) # Check for control commands if len(words) > idxActivationWord+1: for controlWord,commandList in CONFIG.items('commands'): commandWords = commandList.split(',') if words[idxActivationWord+1] in commandWords: logMessage(3, "Understood control command " + commandList + " [" + controlWord + "]") return controlWord return activationWord except ValueError: # Raised by index() logMessage(2, "Value Error: List of words does not contain activation word " + activationWord) except LookupError: logMessage(1, "Lookup Error: Could not understand audio") return "didNotUnderstand" except sr.UnknownValueError: logMessage(2, "Unknown Value Error: No input or unknown value") except sr.WaitTimeoutError: logMessage(2, "Listening timed out") return None # ############################################################################ # Listen for OpenAI command # ############################################################################ def listenForOpenAICommand(recognizer, microphone): listenTime = CONFIG['common']['duration'] recFile = CONFIG['common']['audiofiles'] + "/openairec.wav" try: # Listen with microphone as source: logMessage(2, f"Listening for query for {listenTime} seconds ...") audio = recognizer.listen(source, timeout=float(listenTime)) saveRecordedAudio(audio, recFile) # Convert speech to text prompt = speechToText(recFile) if prompt == "": logMessage(1, "Couldn't understand the command") return 'didNotUnderstand' else: return prompt except sr.UnknownValueError: logMessage(1, "Couldn't understand the command") return 'didNotUnderstand' except sr.WaitTimeoutError: logMessage(2, "No input") return None # ############################################################################ # Convert speech to text with OpenAI Whisper # ############################################################################ def speechToText(recFile): audioFile = open(recFile, "rb") text = openai.Audio.transcribe("whisper-1", audioFile, language=CONFIG['OpenAI']['openAILanguage']) audioFile.close() logMessage(3, text) prompt = text['text'] logMessage(3, prompt) return prompt # ############################################################################ # Ask Chat GPT # ############################################################################ def askChatGPT(prompt): messages = [{"role": "user", "content": prompt}] response = openai.ChatCompletion.create( model=CONFIG['OpenAI']['openAIModel'], messages=messages, temperature=0 ) return response.choices[0].message["content"] # ############################################################################ # Play an audio file # # loops = -1: play endlessly # loops = 0: play once # ############################################################################ def playAudioFile(fileName, background=False, loops=0): if not os.path.isfile(fileName): found = False if not fileName.startswith(CONFIG['common']['audiofiles']): fileName = CONFIG['common']['audiofiles'] + "/" + fileName if os.path.isfile(fileName): found = True if not found: logMessage(2, f"Can't play audio file {fileName}. File not found.") return pygame.mixer.init() pygame.mixer.music.load(fileName) pygame.mixer.music.play(loops) if not background: # Wait until the audio playback is completed while pygame.mixer.music.get_busy(): pass # ############################################################################ # Play an audio PCM stream # ############################################################################ def playAudioStream(stream): p = pyaudio.PyAudio() stream = p.open(format=p.get_format_from_width(BYTES_PER_SAMPLE), channels=CHANNELS, rate=int(CONFIG['common']['sampleRate']), output=True) with closing(stream) as pollyStream: while True: data = pollyStream.read(READ_CHUNK) if data is None or len(data) == 0: break stream.write(data) stream.stop_stream() stream.close() p.terminate() # ############################################################################ # Fade out audio # # duration: fade out duration in seconds # ############################################################################ def fadeOutAudio(duration): pygame.mixer.music.fadeout(duration * 1000) # ############################################################################ # Convert text to speech with AWS Polly and play result # # outputFile: Name of temporary audio file. File will be created or is # expected to be found in "audiofiles" directory. Name must be specified # without file extension. # useCache: Flag for using cached/existing file. Set it to False to force # creation of a new audio file # background: Flag for playing audio in background. Is ignored if no # outputFile is specified # ############################################################################ def textToSpeech(text, outputFile=None, useCache=True, background=False): session = boto3.Session( aws_access_key_id=CONFIG['AWS']['awsKeyId'], aws_secret_access_key=CONFIG['AWS']['awsKeySecret'], region_name=CONFIG['AWS']['region'] ) polly = session.client('polly') # Determine audio output format if outputFile is None: format = "pcm" sampleRate="16000" else: format = "mp3" sampleRate="22050" fileName = CONFIG['common']['audioFiles'] + "/" + outputFile + "." + format try: # Convert text to audio stream response = polly.synthesize_speech( Engine='standard', Text=text, OutputFormat=format, VoiceId=CONFIG['AWS']['pollyVoiceId'], LanguageCode=CONFIG['AWS']['language'], SampleRate=sampleRate ) except (BotoCoreError, ClientError) as error: logMessage(0, "BotoCoreError" + error) return # Output stream if outputFile is None: playAudioStream(response['AudioStream']) else: if not os.path.isfile(fileName) or not useCache: # Write stream to file logMessage(2, "Writing speech audio to file " + fileName) with open(fileName, 'wb') as f: f.write(response['AudioStream'].read()) playAudioFile(fileName, background=background) # ############################################################################ # List configured microphones # ############################################################################ def listMicrophones(): p = pyaudio.PyAudio() info = p.get_host_api_info_by_index(0) numdevices = info.get('deviceCount') print("Available microphone devices:") for i in range(0, numdevices): dev = p.get_device_info_by_host_api_device_index(0, i) if (dev.get('maxInputChannels')) > 0: print("Input Device id ", dev.get('index'), " - ", dev.get('name')) p.terminate() # ############################################################################ # Select microphone # ############################################################################ def selectMicrophone(micName): deviceIndex = None p = pyaudio.PyAudio() info = p.get_host_api_info_by_index(0) numdevices = info.get('deviceCount') for i in range(0, numdevices): dev = p.get_device_info_by_host_api_device_index(0, i) if (dev.get('maxInputChannels')) > 0 and micName in dev.get('name'): # Found microphone deviceIndex = dev.get('index') print("Selected microphone ", dev.get('name')) break p.terminate() return deviceIndex # ############################################################################ # Main function # ############################################################################ def main(): global LOG_LEVEL, SOFT_MUTE # Parse command line arguments parser = argparse.ArgumentParser(prog="HomeAI", description="Home AI Assistant") parser.add_argument("--config", default="homeai.conf", help="Name of configuration file") parser.add_argument("--list_microphones", action="store_true", help="List available microphones") parser.add_argument("--microphone", help="Set name of microphone") parser.add_argument("--log_level", default=0, type=int, choices=range(0, 4), help="Set level of log messages") parser.add_argument("--no_welcome", action="store_true", help="Do not play welcome message") parser.add_argument("--version", action="version", version='%(prog)s ' + VERSION) args = parser.parse_args() # List available microphones if args.list_microphones: listMicrophones() return LOG_LEVEL = int(args.log_level) print("Set log level to " + str(LOG_LEVEL)) # Read configuration if not readConfig(args.config): return # Setup microphone deviceIndex = None if args.microphone: deviceIndex = selectMicrophone(args.microphone) else: print("Using system default microphone") microphone = sr.Microphone(sample_rate=int(CONFIG['common']['sampleRate']), device_index=deviceIndex) # Setup recognizer recognizer = sr.Recognizer() recognizer.dynamic_energy_threshold = False if int(CONFIG['common']['energyThreshold']) == -1: logMessage(2, "Calibrating energy threshold ...") with microphone as source: recognizer.adjust_for_ambient_noise(source, duration=1) logMessage(2, "Speech recognition energy threshold = " + str(recognizer.energy_threshold)) else: recognizer.energy_threshold = CONFIG['common']['energyThreshold'] # Output welcome message. Will be cached in welcome.mp3 if not args.no_welcome: playAudioMessage('welcome') playAudioFile("listening.wav") while True: # Listen for activation word command = listenForActivationWord(recognizer, microphone) if command == 'stop': logMessage(2, "Stopping audio playback") fadeOutAudio(1) elif command == 'mute': logMessage(2, "Muted") SOFT_MUTE = 1 elif command == 'unmute': logMessage(2, "Unmuted") SOFT_MUTE = 0 elif command == 'terminate': logMessage(0, "Shutting down home-ai") playAudioMessage('shutdown') break elif command == CONFIG['common']['activationWord'].lower(): if SOFT_MUTE: errorOut("muted") else: playAudioFile("listening.wav", background=True) logMessage(2, ">>> Ask Open AI") # Listen for ChatGPT query prompt = listenForOpenAICommand(recognizer, microphone) if not errorOut(prompt): try: # Play sound until response from ChatGPT arrived and is converted to audio playAudioFile("processing.wav", loops=-1, background=True) # Send query to Chat GPT and output response response = askChatGPT(prompt) logMessage(3, response) fadeOutAudio(1) textToSpeech(response, "response", useCache=False, background=True) except Exception: fadeOutAudio(1) errorOut("genericError") elif not command is None: if not errorOut(command): logMessage(1, "Unknown command " + command) if __name__ == "__main__": main()
[ "Understood: PLACEHOLDER", "BotoCoreErrorPLACEHOLDER", "No input", "welcome", "Can't play audio file PLACEHOLDER. File not found.", "Calibrating energy threshold ...", "Control commands:", "shutdown", "Unknown Value Error: No input or unknown value", "3", "1", "Listening for query for PLACEHOLDER seconds ...", "Reading config file PLACEHOLDER ...", "Muted", "Understood control command PLACEHOLDER [PLACEHOLDER]", "Stopping audio playback", "Writing speech audio to file PLACEHOLDER", "PLACEHOLDER: PLACEHOLDER", "Listening timed out", "Lookup Error: Could not understand audio", ">>> Ask Open AI", "Listening for PLACEHOLDER seconds for activation word PLACEHOLDER ...", "Understood activation word PLACEHOLDER", "Unmuted", "Shutting down home-ai", "0", "Message key PLACEHOLDER not found in configuration", "2", "Couldn't understand the command", "Unknown command PLACEHOLDER", "Value Error: List of words does not contain activation word PLACEHOLDER" ]
2024-01-10
UWCCDL/pyQEEG
pyqeeg~core.py
from collections import namedtuple import numpy as np from scipy.signal import coherence, find_peaks from pyqeeg.utils import blink_ok, bounds_ok, detrend_data, qual_ok, get_bounds, longest_quality SPIKE_CUTOFF = 200 Band = namedtuple("Band", ["name", "lower_bound", "upper_bound"]) Spectrum = namedtuple("Spectrum", ["good_samples", "power", "longest_quality_segment"]) Coherence = namedtuple("Coherence", ["good_samples", "coherence"]) IndividualizedAlphaFrequency = namedtuple("IndividualizedAlphaFrequency", ["power", "freq"]) def spectral_analysis(series, x, y, blink, quality, sampling=128, length=4, sliding=0.75, hamming=True): series = detrend_data(series, x, y) good_samples = 0 shift = int(sampling * (length * sliding)) window = int(sampling * length) spectrum_len = (sampling * length) // 2 lower, upper = get_bounds(series) result = np.zeros(spectrum_len) for i in range(0, len(series) - window, shift): sub, bsub, qsub = series[i:i+window], blink[i:i+window], quality[i:i+window] maxmin = max(sub) - min(sub) if bounds_ok(sub, lower, upper) and blink_ok(bsub) and qual_ok(qsub) and maxmin < SPIKE_CUTOFF: good_samples += 1 if hamming: sub = sub * np.hamming(window) partial = np.real(np.fft.fft(sub)) ** 2 partial = partial[:spectrum_len] result = result + partial result = np.log(result / good_samples) return Spectrum(good_samples, result, longest_quality(quality, sampling)) def coherence_analysis(series1, series2, x, y, blink, quality1, quality2, sampling=128, length=4, sliding=0.75, hamming=True): series1 = detrend_data(series1, x, y) series2 = detrend_data(series2, x, y) good_samples = 0 shift = int(sampling * (length * sliding)) window = int(sampling * length) spectrum_len = (sampling * length) // 2 lower1, upper1 = get_bounds(series1) lower2, upper2 = get_bounds(series2) minlen = min(len(series1), len(series2)) result = np.zeros(spectrum_len) for i in range(0, minlen - window, shift): sub1, sub2 = series1[i:i+window], series2[i:i+window] #print("sub1 = [" + ", ".join([str(v) for v in sub1]) + "]") #print("sub2 = [" + ", ".join([str(v) for v in sub2]) + "]") bsub, qsub1, qsub2 = blink[i:i+window], quality1[i:i+window], quality2[i:i+window] if bounds_ok(sub1, lower1, upper1) and bounds_ok(sub2, lower2, upper2) \ and blink_ok(bsub) and qual_ok(qsub1) and qual_ok(qsub2): good_samples += 1 if hamming: sub1 = sub1 * np.hamming(window) sub2 = sub2 * np.hamming(window) partial = coherence(sub1, sub2)[1][1:] result += partial result /= good_samples return Coherence(good_samples, result) def find_iaf(power, freq, alpha_lower_bound=7, alpha_upper_bound=15): """ Find individualized Alpha Frequency """ max_peak_power, max_peak_freq = None, None alpha_power = power[(freq >= alpha_lower_bound) & (freq <= alpha_upper_bound)] alpha_freq = freq[(freq >= alpha_lower_bound) & (freq <= alpha_upper_bound)] peaks_coords, _ = find_peaks(alpha_power, prominence=0.2) if len(peaks_coords) > 0: max_peak_power = alpha_power[peaks_coords[0]] max_peak_freq = alpha_freq[peaks_coords[0]] for peak_coord in peaks_coords[1:]: if alpha_power[peak_coord] > max_peak_power: max_peak_power = alpha_power[peak_coord] max_peak_freq = alpha_freq[peak_coord] return IndividualizedAlphaFrequency(max_peak_power, max_peak_freq) def draw_bands(band_method, whole_head_iaf=10): """ Function to define the frequency bands according to the "band_method" argument. :param band_method: name of the definition method (IBIW or IBFW) :type band_method: str :param whole_head_iaf: whole head IAF (default 10) :type whole_head_iaf: float :return: """ if band_method == "IBIW": return [Band("Delta", 0, whole_head_iaf * 0.4), Band("Theta", whole_head_iaf * 0.4, whole_head_iaf * 0.8), Band("Alpha", whole_head_iaf * 0.8, whole_head_iaf * 1.21), Band("Low_Beta", whole_head_iaf * 1.21, whole_head_iaf * 1.8), Band("High_Beta", whole_head_iaf * 1.8, whole_head_iaf * 3), Band("Gamma", whole_head_iaf * 3, 40.5)] elif band_method == "IBFW": return [Band("Delta", 0, whole_head_iaf - 6), Band("Theta", whole_head_iaf - 6, whole_head_iaf - 2), Band("Alpha", whole_head_iaf - 2, whole_head_iaf + 2.5), Band("Low_Beta", whole_head_iaf + 2.5, whole_head_iaf + 8), Band("High_Beta", whole_head_iaf + 8, whole_head_iaf + 20), Band("Gamma", whole_head_iaf + 20, 40.5)] elif band_method == "FBFW": return [Band("Delta", 0, 4), Band("Theta", 4, 8), Band("Alpha", 8, 12.5), Band("Low_Beta", 12.5, 18), Band("High_Beta", 18, 30), Band("Gamma", 30, 40.5)] else: print("Invalid band_method")
[]
2024-01-10
UWCCDL/pyQEEG
pyqeeg~analysis.py
import itertools import numpy as np import pandas as pd from pyqeeg.core import coherence_analysis, Coherence, draw_bands, find_iaf, spectral_analysis from pyqeeg.output_formatting import get_spectra_dataframe, get_coherence_dataframe from pyqeeg.plotting import plot_coherence, plot_spectra from pyqeeg.summary import Summary from pyqeeg.utils import connection, get_channels_with_bad_spectrum CHANNELS = ["AF3", "F7", "F3", "FC5", "T7", "P7", "O1", "O2", "P8", "T8", "FC6", "F4", "F8", "AF4"] NETWORKS = {"MF": ["AF3", "AF4", "F3", "F4"], "LFT": ["F7", "FC5", "T7"], "RFT": ["F8", "FC6", "T8"], "LP": ["P7", "O1"], "RP": ["P8", "O2"]} NETWORK_CONNECTIONS = {connection(net1, net2): [connection(ch1, ch2) for ch1 in NETWORKS[net1] for ch2 in NETWORKS[net2]] for net1, net2 in itertools.combinations(NETWORKS.keys(), 2)} VERSION = "1.0.0" def run_analysis(subject, session, filename=None, sampling=128, window=2, sliding=0.75, band_method="FBFW", coherence_plots=False, min_samples_for_inclusion=75, whole_head_iaf=None, return_object=False): if not filename: filename = f"{subject}_{session}.txt" try: data = pd.read_csv(filename, sep='\t') except FileNotFoundError: print(f"File {subject}_{session}.txt doesn't exist") return summary = Summary(subject, VERSION, session, sampling, window, sliding, len(data) / sampling) freq = np.array([i * 1 / window for i in range(1, sampling + 1)]) blink = np.array(data["Blink"]) if "Blink" in data.columns else np.zeros(len(freq)) summary.fill_meta_blinks(blink) x = np.array(data["GyroX"]) if "GyroX" in data.columns else None y = np.array(data["GyroY"]) if "GyroY" in data.columns else None all_spectra, iafs = {}, {} for channel in CHANNELS: all_spectra[channel] = spectral_analysis(series=np.array(data[channel]), sampling=sampling, length=window, sliding=sliding, x=x, y=y, blink=blink, quality=np.array(data[f"{channel}_Q"])) iafs[channel] = find_iaf(all_spectra[channel].power, freq) # Handle excludes too_few_samples = [ch for ch, spectrum in all_spectra.items() if spectrum.good_samples <= min_samples_for_inclusion] # print(too_few_samples) no_peak = [ch for ch, iaf in iafs.items() if not iaf.freq] bad_spectrum = get_channels_with_bad_spectrum(all_spectra) all_excluded = set(too_few_samples + no_peak + bad_spectrum) coherence_excluded = set(too_few_samples + bad_spectrum) missing_o1_o2 = "O1" in all_excluded and "O2" in all_excluded if not whole_head_iaf: data_for_iaf = pd.DataFrame.from_dict({channel: all_spectra[channel].power for channel in all_spectra.keys() if channel not in all_excluded}) whole_head_spectrum = np.array(data_for_iaf.mean(axis=1)) print("[" + ", ".join([str(x) for x in whole_head_spectrum]) + "]") whole_head_iaf = find_iaf(whole_head_spectrum, freq).freq summary.fill_whole_head_iaf(whole_head_iaf) if missing_o1_o2: band_method = "FBFW" summary.fill_band_method(band_method) bands = draw_bands(band_method, whole_head_iaf) network_spectra = get_network_spectra(all_spectra, all_excluded) summary.fill_spectra_metrics(all_spectra, iafs, bands, freq, network_spectra) spectra_df = get_spectra_dataframe(subject, freq, all_spectra, network_spectra) spectra_df.to_csv(f"{subject}_{session}_spectra.txt", sep="\t") plot_spectra(all_spectra, bands) all_cohr = {} for channel1, channel2 in itertools.combinations(CHANNELS, 2): if (channel1 in coherence_excluded) or (channel2 in coherence_excluded): all_cohr[connection(channel1, channel2)] = Coherence(good_samples=0, coherence=np.array([np.nan for _ in freq])) else: all_cohr[connection(channel1, channel2)] = coherence_analysis(series1=np.array(data[channel1]), series2=np.array(data[channel2]), sampling=sampling, length=window, x=x, y=y, blink=blink, quality1=np.array(data[f"{channel1}_Q"]), quality2=np.array(data[f"{channel2}_Q"])) networks_coherence = get_network_coherence(all_cohr) summary.fill_coherence_metrics(all_cohr, bands, freq, networks_coherence) coherence_df = get_coherence_dataframe(subject, freq, all_cohr, networks_coherence) coherence_df.to_csv(f"{subject}_{session}_coherence.txt", sep="\t") if coherence_plots: plot_coherence(all_cohr, bands) summary.write_to_file(f"{subject}_{session}_summary.txt") if return_object: return all_spectra, network_spectra, all_cohr, networks_coherence, summary def get_network_spectra(all_spectra, excluded): network_spectra = {} data_for_networks = pd.DataFrame.from_dict({ch: all_spectra[ch].power for ch in all_spectra.keys() if ch not in excluded}) for network, channels in NETWORKS.items(): present_channels = [ch for ch in channels if ch in data_for_networks.columns] network_spectra[network] = data_for_networks[present_channels].mean(axis=1) return network_spectra def get_network_coherence(all_cohr): networks_coherence = {} data_for_networks = pd.DataFrame.from_dict({con: cohr.coherence for con, cohr in all_cohr.items()}) for network_connection, channel_connections in NETWORK_CONNECTIONS.items(): networks_coherence[network_connection] = data_for_networks[channel_connections].mean(axis=1) return networks_coherence
[]
2024-01-10
Hansimov/local-llms
examples~chat_with_openai.py
from openai import OpenAI # If runnning this service with proxy, you might need to unset `http(s)_proxy`. base_url = "http://127.0.0.1:23333/v1" api_key = "sk-xxxxx" client = OpenAI(base_url=base_url, api_key=api_key) response = client.chat.completions.create( model="dolphin-2.5-mixtral-8x7b", messages=[ { "role": "user", "content": "what is your model", } ], stream=True, stop=["[INST]", "[/INST]"], ) for chunk in response: if chunk.choices[0].delta.content is not None: print(chunk.choices[0].delta.content, end="", flush=True) elif chunk.choices[0].finish_reason == "stop": print() else: pass
[ "what is your model" ]
2024-01-10
natxc/FullStackin26
DS4A_DataEngg_Bootcamp~Streamlit_App~Hello.py
import streamlit as st import pandas as pd import numpy as np import plotly.graph_objects as go import plotly.express as px import snowflake.snowpark as snowpark from prompts import get_system_prompt import openai import time from datetime import datetime import re import category_encoders as ce from imblearn.under_sampling import RandomUnderSampler from sklearn.preprocessing import StandardScaler, FunctionTransformer, LabelEncoder, OneHotEncoder from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report, confusion_matrix from sklearn.pipeline import Pipeline st.set_page_config( page_title="Big Supply Co - Retail and Finance Projects", page_icon="📊", ) openai.api_key = st.secrets.OPENAI_API_KEY def intro(): st.title('Big Supply Co - Retail and Finance Projects') st.write("### Welcome to Big Supply Co. Info! 👋") st.sidebar.success("Select a page above.") st.markdown( """ **👈 Select an option to choose between Retail or Finance projects on the left** and then choose a page from the dropdown to see some data visualizations, talk to the chatbot, take a step inside my brain as I made this web app, use the data ingestion tool, or see data science results. """) st.image('https://static.vecteezy.com/system/resources/previews/025/501/341/non_2x/sport-equipment-on-a-black-background-sports-equipment-on-a-black-background-sports-equipment-on-a-dark-background-ai-generated-free-photo.jpg') def explanation(): st.title('Big Supply Co. - Retail Analysis') st.markdown( """ ## My Approach and Thinking The focus on this project was more so to utilize Large Language Models (LLMs) and Streamlit's User Interface (UI) capabilities to build a chatbot, but I also wanted to flex some data engineering skills and incorporate a stack I've never used. Specifically, the only guidelines for and outcomes of the project were to design and develop a user-friendly chatbot in Streamlit that interacts with a retail database, enabling it to answer questions on sales, marketing, and production. The chatbot should create and execute SQL queries on the database while also displaying the SQL query and relevant data in a Streamlit app. There are many ways one could approach this in a simpler way by just using the provided CSVs locally for the project, using a data build tool (dbt) seed, uploading files directly to S3 or Snowflake, the list goes on. However, I really wanted an opportunity to utilize Airflow, PostgreSQL, Docker, Airbyte, dbt, S3 buckets and a little AWS CLI. Luckily, most of these are open source tools and all were free for my use-cases. By using all of these tools, I was able to not only do Extract, Transform, Load (ETL), but also Extract, Load, Transform (ELT) and reinforce Kimball datawarehousing methodologies, data contracts, and more. Granted, this whole process would be a terrible and roundabout way to model data for a product, but I learned a lot and had fun along the way. Ultimately, my recommendation would be to use dbt and a Snowflake data warehouse. Snowflake has acquired Streamlit, so it has become a very strong pair and OpenAI has only solidified both of those tools. If I was feeling extra creative, I would add one more step and set up Fivetran or Rivery to extract my Postgres data models to Snowflake. How many other tools can I add to make this the most complicated ELT/ETL journey possible??? I actually did like that idea better, so I did just that! Without any new tools, I just used Airbyte to load my Postgres models to Snowflake. Should I go back and add Dagster to the mix for fun though.. High level breakdown of my process: ## Data Engineering: ### Step 1: Created the environment (I prefer conda) and installed the packages: - Kubernetes - PostgreSQL - Airflow - dbt - Docker - Airbyte Specifics can be found in requirements.txt ### Step 2: Initialized the Postgres database (db). Created myself as a non-superuser and then created a new inner database. Initialized the db and created a schema. During this step, I learned a lot of psql commands, too! """) st.image('https://github.com/natxc/FullStackin26/blob/main/DS4A_DataEngg_Bootcamp/Streamlit_App/images/psql.png?raw=true') st.markdown(""" ### Step 3: Set up Airflow with Postgres. Ran the webserver and enabled the relevant directed acyclic graphs (DAG). ### Step 4: Initialized a dbt project and connected to Postgres. ### Step 5: Created an Amazon Web Services (AWS) account and created an S3 (Amazon Simple Storage Service) bucket. ### Step 6: Added comma-separated values (CSV) files to S3 buckets via Airflow. """) st.image('https://github.com/natxc/FullStackin26/blob/main/DS4A_DataEngg_Bootcamp/Streamlit_App/images/airflow.png?raw=true') st.markdown(""" ### Step 7: Loaded data from S3 to Postgres via Airbyte. Learned a lot about iam, policies and permissions, and even AWS Command Line Interface (CLI) along the way. Airbyte was nice as it already had out of the box connections for S3 and Postgres. """) st.image('https://github.com/natxc/FullStackin26/blob/main/DS4A_DataEngg_Bootcamp/Streamlit_App/images/airbyte.png?raw=true') st.markdown(""" ### Step 8: Added the `dbt_utils` package and created the staging models in dbt. """) st.image('https://github.com/natxc/FullStackin26/blob/main/DS4A_DataEngg_Bootcamp/Streamlit_App/images/dbt.png?raw=true') st.markdown(""" After those were built, I made a star schema diagram to help prepare a plan to build the fact and dimension models and to help avoid many-to-many relationships on the joins. Then I created the final datamodel. Some cleaning needed to be done like casting data types appropriately, renaming columns to adhere to data contract names, adding logic to change certain fields like zipcode, creating booleans, and disabling empty or unvaluable columns.""") st.image('https://miro.medium.com/v2/resize:fit:1400/1*Aa5f69jOLbOkVNKRp7g-CA.png') st.markdown(""" ### Step 9: Added in tests and source freshness, even though I will never add more data to this warehouse...ever! ### Step 10: Moved Postgres models to Snowflake to eliminate worry about any servers. ## UI: ### Step 1: Installed the packages: - Streamlit - OpenAI - Snowpark - Plotly ### Step 2: Created the main Streamlit Python file and secrets file, then added and tested the Postgres connection. ### Step 3: Added chatbot, which entailed a main function and a prompts.py file for prompt engineering. ### Step 4: Created a new metadata table with descriptions and datatypes. I used a dbt seed this time, and did a `--full-refresh` seed run whenever I updated the column names. ### Step 5: Updated the prompts file as any hallucinations from the chatbot occured. ### Step 6: Added more fun visualizations, including charts based on the chatbot's output, and some text boxes for executing database queries; however, I had to consider security for Data Manipulation Language (DML) operations to avoid possible Structured query language (SQL) injections. I added that component to my prompts and restricted DML operations such as `delete`, `update`, and so on in my function. ### Step 7: Deployed and enjoyed! """) def visualizations(): st.title('Big Supply Co. - Retail Analysis') conn = st.experimental_connection("snowpark") st.markdown(""" ### Charts and Analysis: """) st.write("This portion visualizes and explains insights from the Big Supply Co. `orders` table. You can add filters using the panel on the left.") data = conn.query("select * from AIRBYTE_DATABASE.AIRBYTE_SCHEMA.ORDERS") # Sidebar with filter options st.sidebar.subheader("Filter Data") selected_region = st.sidebar.selectbox("Select Region", data['ORDER_REGION_ADDR'].unique()) selected_category = st.sidebar.selectbox("Select Category", data['CATEGORY_NAME_ATTR'].unique()) selected_segment = st.sidebar.selectbox("Select Customer Segment", data['CUSTOMER_SEGMENT_CAT'].unique()) # Explanation for filter options st.sidebar.write("You can filter data by region, product category, and customer segment.") # Filter the data based on user selection filtered_data = data[ (data['ORDER_REGION_ADDR'] == selected_region) & (data['CATEGORY_NAME_ATTR'] == selected_category) & (data['CUSTOMER_SEGMENT_CAT'] == selected_segment) ] st.header("View the raw, filtered data first:") # Explanation for the selected filters # st.write(f"Filtered by Region: {selected_region}") # st.write(f"Filtered by Category: {selected_category}") # st.write(f"Filtered by Customer Segment: {selected_segment}") # Show the filtered data st.dataframe(filtered_data) # Add a switch button to toggle between overall and filtered dataset st.subheader("For these charts, you can use this toggle here to view by the filters you provided or by the complete dataset for the full picture:") use_filtered_data = st.checkbox("Use Filtered Data") # Filter the data based on user selection or use the overall dataset if use_filtered_data: data = filtered_data else: data = data # Visualization 1: Sales by Region st.header("Sales by Region") region_sales = data.groupby('ORDER_REGION_ADDR')['SALES_AMT'].sum().reset_index() fig1 = px.bar(region_sales, x='ORDER_REGION_ADDR', y='SALES_AMT', title="Total Sales by Region") fig1.update_xaxes(title_text="Region") fig1.update_yaxes(title_text="Total Sales Amount") st.plotly_chart(fig1) st.write("This bar chart shows the total sales amount for each region.") # # Visualization 2: Product Price Variance # st.header("Product Price Variance") # product_variance = data.groupby('PRODUCT_CARD_ID')['PRODUCT_PRICE_AMT'].var().reset_index() # fig2 = px.histogram(product_variance, x='PRODUCT_PRICE_AMT', nbins=30, title="Product Price Variance") # st.plotly_chart(fig2) # st.write("This histogram represents the variance in product prices. A higher variance indicates price fluctuations.") # # Visualization 3: Average Order Amount by Customer Segment # st.header("Average Order Amount by Customer Segment") # avg_order_segment = data.groupby('CUSTOMER_SEGMENT_CAT')['ORDER_ITEM_TOTAL_AMT'].mean().reset_index() # fig3 = px.bar(avg_order_segment, x='CUSTOMER_SEGMENT_CAT', y='ORDER_ITEM_TOTAL_AMT', title="Average Order Amount by Customer Segment") # fig3.update_xaxes(title_text="Customer Segment") # fig3.update_yaxes(title_text="Total Order Item Amount") # st.plotly_chart(fig3) # st.write("This bar chart displays the average order amount for each customer segment.") # Create a combo line chart for sales and profit over time st.header(f"Sales and Profit Over Time") sales_profit_data = data.groupby('ORDER_DT').agg({'SALES_AMT': 'sum', 'ORDER_PROFIT_AMT': 'sum'}).reset_index() fig_combo = px.line(sales_profit_data, x='ORDER_DT', y='SALES_AMT', title="Sales Over Time") fig_combo.add_bar(x=sales_profit_data['ORDER_DT'], y=sales_profit_data['ORDER_PROFIT_AMT'], name="Profit") fig_combo.update_xaxes(title_text="Date") fig_combo.update_yaxes(title_text="Sales and Profit") st.plotly_chart(fig_combo) st.write("This combo chart displays both sales and profit over time for the selected region.") # Visualization 4: Delivery Status st.header("Delivery Status") delivery_status = data['DELIVERY_STATUS_CAT'].value_counts().reset_index() fig4 = px.pie(delivery_status, names='DELIVERY_STATUS_CAT', values='count', title="Delivery Status Distribution") fig4.update_xaxes(title_text="Delivery Status") fig4.update_yaxes(title_text="Frequency") st.plotly_chart(fig4) st.write("This pie chart illustrates the distribution of delivery statuses for orders.") st.markdown(""" ### QIY (Query It Yourself 💪): """) text_input = st.text_input("Replace this with your own SQL query 👇 (you can just use `table` instead of any specifics)", "select * from table limit 10;",) message = text_input.replace('table', 'AIRBYTE_DATABASE.AIRBYTE_SCHEMA.ORDERS') message = conn.query(message) st.dataframe(message) def chatbot(): st.title('Big Supply Co. - Retail Analysis') # # Initialize the chat messages history if "messages" not in st.session_state: # system prompt includes table information, rules, and prompts the LLM to produce # a welcome message to the user. st.session_state.messages = [{"role": "system", "content": get_system_prompt()}] # Prompt for user input and save if prompt := st.chat_input(): st.session_state.messages.append({"role": "user", "content": prompt}) # display the existing chat messages for message in st.session_state.messages: if message["role"] == "system": continue with st.chat_message(message["role"]): st.write(message["content"]) if "results" in message: st.dataframe(message["results"]) # If last message is not from assistant, we need to generate a new response if st.session_state.messages[-1]["role"] != "assistant": with st.chat_message("assistant"): response = "" resp_container = st.empty() for delta in openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[{"role": m["role"], "content": m["content"]} for m in st.session_state.messages], stream=True, ): response += delta.choices[0].delta.get("content", "") resp_container.markdown(response) message = {"role": "assistant", "content": response} # Parse the response for a SQL query and execute if available sql_match = re.search(r"```sql\n(.*)\n```", response, re.DOTALL) conn = st.experimental_connection("snowpark") # conn = st.experimental_connection("postgresql", type="sql") if sql_match: sql = sql_match.group(1) sql = sql.replace('<tableName>', 'AIRBYTE_DATABASE.AIRBYTE_SCHEMA.ORDERS') if not re.search(r'\b(update|delete|insert)\b', sql, re.IGNORECASE): message["results"] = conn.query(sql) # Adding bar charts if there is at least 1 dimension and 1 measure if len(message["results"].columns) == 2: if len(message["results"]) > 1: fig = go.Figure(data=go.Bar(x=message["results"].iloc[:,0], y=message["results"].iloc[:,1])) fig.update_layout(xaxis={'categoryorder': 'total descending'}) st.plotly_chart(fig) elif len(message["results"].columns) <= 1: pass else: y = message["results"].select_dtypes(include=['int','int8', 'int64', 'float64']).columns.tolist() if len(y) > 0: fig = go.Figure(data=go.Bar(x=message["results"].iloc[:,0], y=message["results"][y[0]])) fig.update_layout(xaxis={'categoryorder': 'total descending'}) st.plotly_chart(fig) else: pass else: # Handle the case where the query contains DML message["results"] = "Query contains DML operations and is not allowed." st.dataframe(message["results"]) st.session_state.messages.append(message) def data_ingestor(): st.title('Big Supply Co. - Finance Analysis') st.title('Data Ingestion Tool') st.header('Upload your dataset for processing') uploaded_file = st.file_uploader("Choose a file", type=['CSV','PARQUET']) if uploaded_file is not None: if type == 'CSV': dataframe = pd.read_csv(uploaded_file, encoding = 'utf-8') else: dataframe = pd.read_parquet(uploaded_file) st.write(dataframe) st.header('Upload the transformations you want to apply') ## TODO: Add at least 5 transformations that you consider will be beneficial for cleaning the data in order to be consumed by a machine learning model. uploaded_transformation_file = st.file_uploader("Choose a JSON file", type=['JSON']) if uploaded_transformation_file is not None: dataframe_transformations = pd.read_json(uploaded_transformation_file) st.write(dataframe_transformations) # { # "Expires": {"astype":"date"}, # "Card Number": {"astype":"str"}, # "Card Number": {"len":12}, # "Has Chip": {"map":{"YES":1, "NO":0}}, # "Card on Dark Web": {"map":{"YES":1, "NO":0}}, # "Acct Open Date": {"datediff":"Today"}, # "CARD INDEX": {"rename":"Card Index"} # } compare_copy = dataframe.copy() if st.button('Apply Transformations'): with st.spinner('Applying Transformations...'): for column in dataframe.columns: if column in dataframe_transformations.columns: dtype_rule = dataframe_transformations.loc['astype', column] map_rule = dataframe_transformations.loc['map', column] rename_rule = dataframe_transformations.loc['rename', column] datediff_rule = dataframe_transformations.loc['datediff', column] validate_rule = dataframe_transformations.loc['len', column] if not pd.isna(dtype_rule): # Convert the column to the specified data type if the rule is not NaN dataframe[column] = dataframe[column].astype(dtype_rule) if not pd.isna(map_rule): # Map values in the column based on the provided mapping if the rule is not NaN dataframe[column] = dataframe[column].map(map_rule) if not pd.isna(rename_rule): dataframe.rename(columns={column: rename_rule}, inplace=True) if not pd.isna(datediff_rule): current_date = datetime.today() dataframe[column] = pd.to_datetime(dataframe[column]) dataframe['Days Since Opening Acct'] = (current_date - dataframe[column]).dt.days # if not pd.isna(validate_rule): # dataframe = dataframe[dataframe[column].astype(str).str.len() == validate_rule] time.sleep(1) if dataframe.equals(compare_copy): st.info("Transformations Not Applicable.") else: st.success("Transformations Applied!") st.write(dataframe) st.header('Data export to SQL Database') # conn = st.experimental_connection("snowpark") session = st.experimental_connection("snowpark").session option = st.selectbox( "Select Table", ("Create table and insert data", "Insert into already existing table"), index=None, placeholder="Choose existing table or create new", ) if option == "Create table and insert data": tablename_input = st.text_input('Enter Table Name') if st.button('Update to SQL Database'): # Create a new table with the provided name snowparkDf = session.write_pandas(dataframe, tablename_input.upper(), database = "AIRBYTE_DATABASE", schema = "FINANCE", auto_create_table = True, overwrite = True) st.write(f"Table '{tablename_input}' was created and data was inserted!") elif option == "Insert into already existing table": existing_tables = pd.DataFrame(session.sql('SHOW TABLES IN AIRBYTE_DATABASE.FINANCE;').collect())['name'].to_list() selected_table = st.selectbox("Select Existing Table", existing_tables) if st.button('Update to SQL Database'): # Insert 'dataframe' data into the selected existing table existing_df = session.table("AIRBYTE_DATABASE.FINANCE." + selected_table).to_pandas() existing_df = pd.concat([existing_df, dataframe]) snowparkDf = session.write_pandas(existing_df, selected_table, database = "AIRBYTE_DATABASE", schema = "FINANCE", auto_create_table = True, overwrite = True) st.success(f"Table {selected_table} was updated!") dataframe = existing_df.copy() else: pass st.header('Data export to CSV') filename_input = st.text_input('Enter File Name') @st.cache_resource def convert_df(df): # IMPORTANT: Cache the conversion to prevent computation on every rerun return df.to_csv().encode('utf-8') csv = convert_df(dataframe) if ('csv' or 'CSV') not in filename_input: st.download_button( label="Download Dataframe as CSV", data=csv, file_name=filename_input+'.csv', mime='text/csv', ) else: st.download_button( label="Download Dataframe as CSV", data=csv, file_name=filename_input, mime='text/csv', ) st.header('Describe sample dataset the simple way') def describeDF(df): st.write("Here's some stats about the loaded data:") numeric_types = ['int64', 'float64'] numeric_columns = df.select_dtypes(include=numeric_types).columns.tolist() # Get categorical columns categorical_types = ['object'] categorical_columns = df.select_dtypes(include=categorical_types).columns.tolist() st.write("Relational schema:") columns = df.columns.tolist() st.write(columns) col1, col2, = st.columns(2) with col1: st.write('Numeric columns:\t', numeric_columns) with col2: st.write('Categorical columns:\t', categorical_columns) # Calculte statistics for our dataset st.dataframe(df.describe(include='all'), use_container_width=True) if st.button('Analyze Data Sample'): with st.spinner('Analyzing dataset...'): time.sleep(1) describeDF(dataframe) st.header('Describe sample dataset with OpenAI API') if st.button('Analyze Data Sample with LLMs'): with st.spinner('Analyzing dataset...'): df_prompt = f"Give basic analytics on this dataframe: {dataframe}. This could include counts, sums, and averages. As well as overall sentences on any trends or conclusions that can be made after viewing the data. There should be multiple facts you give." # completion = openai.Completion.create(model="text-davinci-003", prompt = df_prompt, n = 10, max_tokens = 400, stop = None, temperature = 0.1) # text_list = [choice.text for choice in completion.choices] # st.write('\n'.join(text_list)) completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[ {"role": "system", "content": "You are a helpful assistant, skilled in data analysis and describing dataframes."}, {"role": "user", "content": df_prompt}]) st.success(completion.choices[0].message["content"]) def data_science(): st.write("""The task: build a predictive model to determine the likelihood (by assigning a risk score) of a new transaction being fraudulent or not""") df = pd.read_parquet('../Project 2/Data_Files/credit_card_transaction_data_de.parquet') #, encoding='ISO-8859-1') # Perform exploratory data analysis to identify insights and patterns that can help you build the model. # Understand the dataset and its features to assess data quality and prepare it as needed. df["Amount"] = df["Amount"].str.replace("$","").astype(float) df = df[df['Merchant State'] != 'Italy'] df['Date'] = pd.to_datetime(df[['Year', 'Month', 'Day']]) # Combine the columns into a new 'date' column df["Hour"] = df["Time"].str[0:2] df["Minute"] = df["Time"].str[3:5] df = df.drop(['Time'],axis=1) days = {0: 'Mon', 1: 'Tue', 2: 'Wed', 3: 'Thu', 4: 'Fri', 5: 'Sat', 6: 'Sun'} df['Day of Week'] = df['Date'].dt.dayofweek.map(days) df["Is Fraud?"] = df["Is Fraud?"].apply(lambda x: 1 if x == 'Yes' else 0) fraud_data = df[df['Is Fraud?'] == 1] # # Filter data to limit the x-axis range # filtered_data = fraud_data[(fraud_data['Amount'] >= -200) & (fraud_data['Amount'] <= 2000)] # Create a histogram using Plotly st.subheader('Distribution of Fraudulent Transaction Amounts') fig = px.histogram(fraud_data, x='Amount', nbins=80) fig.update_traces(marker_color='navy', marker_line_color='black', marker_line_width=1) # Customize the layout fig.update_layout( xaxis_title='Amount', yaxis_title='Number of Transactions', ) # Display the Plotly figure in Streamlit st.plotly_chart(fig) st.subheader('Number of Fraudulent Transactions by State') # Get the top 30 cities top_cities = fraud_data['Merchant State'].value_counts().head(30) # Create a bar chart using Plotly fig = px.bar(top_cities, x=top_cities.values, y=top_cities.index, orientation='h') fig.update_traces(marker_color='navy', marker_line_color='black', marker_line_width=1) # Customize the layout fig.update_layout( xaxis_title='Number of Transactions', yaxis_title='Merchant State', ) st.plotly_chart(fig) st.subheader('Number of Fraudulent Transactions by Top 30 Cities') top_cities = fraud_data['Merchant City'].value_counts().head(30) fig = px.bar(top_cities, x=top_cities.values, y=top_cities.index, orientation='h') fig.update_traces(marker_color='navy', marker_line_color='black', marker_line_width=1) fig.update_layout( xaxis_title='Number of Transactions', yaxis_title='Merchant City', ) st.plotly_chart(fig) st.subheader('Number of Fraudulent Transactions by Year') fig = px.bar(fraud_data['Year'].value_counts().reset_index().sort_values(by='Year'), x='Year', y='count') fig.update_traces(marker_color='navy', marker_line_color='black', marker_line_width=1) fig.update_layout( xaxis_title='Year', yaxis_title='Number of Transactions', xaxis_type='category', ) st.plotly_chart(fig) st.subheader('Number of Fraudulent Transactions by Month') fig = px.bar(fraud_data['Month'].value_counts().reset_index().sort_values(by='Month'), y='count', x='Month') fig.update_traces(marker_color='navy', marker_line_color='black', marker_line_width=1) fig.update_layout( xaxis_title='Month', yaxis_title='Number of Transactions', xaxis_type='category', ) st.plotly_chart(fig) st.subheader('Number of Fraudulent Transactions by Day of the Month') fig = px.bar(fraud_data['Day'].value_counts().reset_index().sort_values(by='Day'), y='count', x='Day') fig.update_traces(marker_color='navy', marker_line_color='black', marker_line_width=1) fig.update_layout( xaxis_title='Day of the Month', yaxis_title='Number of Transactions', xaxis_type='category', ) st.plotly_chart(fig) st.subheader('Number of Fraudulent Transactions by Day of the Week') fig = px.bar(fraud_data['Day of Week'].value_counts().reset_index().sort_values(by='Day of Week'), y='count', x='Day of Week') fig.update_traces(marker_color='navy', marker_line_color='black', marker_line_width=1) fig.update_layout( xaxis_title='Day of the Week', yaxis_title='Number of Transactions', xaxis_type='category', ) st.plotly_chart(fig) st.subheader('Number of Fraudulent Transactions by Hour') fig = px.bar(fraud_data['Hour'].value_counts().reset_index().sort_values(by='Hour'), y='count', x='Hour') fig.update_traces(marker_color='navy', marker_line_color='black', marker_line_width=1) fig.update_layout( xaxis_title='Hour', yaxis_title='Number of Transactions', xaxis_type='category', ) st.plotly_chart(fig) st.subheader('Distribution of Fraudulent Transactions by Use Chip') fig = px.bar(fraud_data['Use Chip'].value_counts().reset_index().sort_values(by='Use Chip'), y='count', x='Use Chip', color='Use Chip') fig.update_traces(marker_line_color='black', marker_line_width=1) fig.update_layout( xaxis_title='Use Chip', yaxis_title='Number of Transactions', xaxis_type='category', showlegend=False ) st.plotly_chart(fig) @st.cache_data # making this a function so it can be cached def randomF(): # Conduct feature engineering to select relevant features for the model. # Define a strategy to experiment with possible solutions. columns_to_select = ['Year', 'Hour', 'Day of Week', 'Amount', 'Use Chip', 'Merchant Name', 'MCC', 'Is Fraud?'] df = df[columns_to_select] def clean(df): df['Hour'] = df['Hour'].astype('float') scaler = StandardScaler() df['Amount'] = scaler.fit_transform(df[['Amount']]) cat_col = ['Use Chip', 'Day of Week'] for col in cat_col: if col in df.columns: be = ce.BinaryEncoder(drop_invariant=False) enc_df = pd.DataFrame(be.fit_transform(df[col]), dtype='int8') df = pd.concat([df, enc_df], axis=1) df.drop([col], axis=1, inplace=True) for col in df.columns: df[col] = df[col].astype(float) return df preprocessing_pipeline = Pipeline([ ('cleaning', FunctionTransformer(clean, validate=False)), ], verbose=True) df_transformed = preprocessing_pipeline.fit_transform(df) # Define metrics to determine the best model. # Split the dataset into features (X) and target variable (y) X = df_transformed.drop(columns=['Is Fraud?']) y = df_transformed['Is Fraud?'] # Calculate the desired number of fraud cases based on the desired proportion desired_proportion = 0.2 total_samples = 40000 fraud_samples = int(total_samples * desired_proportion) # Create RandomUnderSampler with the desired sampling strategy rus = RandomUnderSampler(sampling_strategy={0: total_samples - fraud_samples, 1: fraud_samples}, random_state=1613) # Apply random undersampling to the original dataset X_resampled, y_resampled = rus.fit_resample(X, y) # Split the resampled data into train and test sets X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size=0.3, random_state=1613) # Build a predictive model capable of predicting the probability that a transaction is fraudulent. rf_classifier = RandomForestClassifier(n_estimators=100, random_state=42) rf_classifier.fit(X_train, y_train) y_pred_rf = rf_classifier.predict(X_test) st.write("**Random Forest Classifier Results:**") st.text(classification_report(y_test, y_pred_rf)) conf_matrix = confusion_matrix(y_test, y_pred_rf) conf_matrix_display = np.array([[f"TN: {conf_matrix[0, 0]}", f"FP: {conf_matrix[0, 1]}"], [f"FN: {conf_matrix[1, 0]}", f"TP: {conf_matrix[1, 1]}"]]) st.table(conf_matrix_display) # Hyperparameters Tuning # Define the hyperparameters param_grid = { 'n_estimators': [50, 100, 200], 'max_depth': [None, 10, 20, 30], 'max_features': ['sqrt', 'log2'], 'min_samples_split': [2, 5, 10], 'min_samples_leaf': [1, 2, 4], 'bootstrap': [True, False] } # Create a RandomForestClassifier model rf = RandomForestClassifier(random_state=42) # GridSearchCV grid_search = GridSearchCV(estimator=rf, param_grid=param_grid, cv=3, n_jobs=-1, verbose=0, scoring='f1_macro') grid_search.fit(X_train, y_train) # Get the best hyperparameters best_params = grid_search.best_params_ st.write("**Best hyperparameters:**", best_params) # Use the best estimator for predictions or further work best_rf = grid_search.best_estimator_ y_pred_best_rf = best_rf.predict(X_test) st.write("**Random Forest Classifier Results with Best Hyperparameters:**") st.text(classification_report(y_test, y_pred_best_rf)) conf_matrix = confusion_matrix(y_test, y_pred_best_rf) conf_matrix_display = np.array([[f"TN: {conf_matrix[0, 0]}", f"FP: {conf_matrix[0, 1]}"], [f"FN: {conf_matrix[1, 0]}", f"TP: {conf_matrix[1, 1]}"]]) st.table(conf_matrix_display) # Present the best model for predicting fraudulent transactions and key insights from the analysis. # Extract feature importances from the best random forest model feature_importance = best_rf.feature_importances_ features = X_train.columns # Sort the feature importances and their corresponding feature names sorted_idx = feature_importance.argsort() # Plot horizontal bar chart st.subheader('Feature Importances using Random Forest') # Plot horizontal bar chart data = pd.DataFrame({'Features': features[sorted_idx], 'Feature Importance': feature_importance[sorted_idx]}) data = data.sort_values(by='Feature Importance') fig = px.bar(data, x='Feature Importance', y='Features', orientation='h', color='Features') fig.update_traces(marker_line_color='black', marker_line_width=1) fig.update_layout( xaxis_title='Importance', yaxis_title='Features', showlegend=False ) st.plotly_chart(fig) randomF() page_names_to_funcs_retail = { "—": intro, "Visualizations": visualizations, "Chatbot": chatbot, "Explanation": explanation, } page_names_to_funcs_finance = { "Data Ingestion Tool": data_ingestor, "ML Fraud Detection": data_science, } st.sidebar.header("Toggle Between Projects") project_selector = st.sidebar.radio("Select a Project", ("Retail", "Finance")) if project_selector == "Retail": demo_name = st.sidebar.selectbox("Choose a page", page_names_to_funcs_retail.keys()) page_names_to_funcs_retail[demo_name]() else: demo_name = st.sidebar.selectbox("Choose a page", page_names_to_funcs_finance.keys()) page_names_to_funcs_finance[demo_name]()
[ "Give basic analytics on this dataframe: PLACEHOLDER. This could include counts, sums, and averages. As well as overall sentences on any trends or conclusions that can be made after viewing the data. There should be multiple facts you give.", "content", "You are a helpful assistant, skilled in data analysis and describing dataframes." ]
2024-01-10
outday29/wildered
wildered~context~autocomplete.py
import inspect from pathlib import Path from typing import Any, Dict, List, Literal import pyperclip import guidance from wildered.context.utils import temporary_workspace from wildered.utils import read_file, write_file from wildered.logger import logger from .tasks import ( TaskGroup, ) # Use OPENAI_API_KEY env guidance.llm = guidance.llms.OpenAI("text-davinci-003") def task_executor( task_list: List[TaskGroup], clipboard: bool = False, auto_integrate: bool = False, ) -> None: for group in task_list: final_prompt = format_task_prompt(group=group, clipboard=clipboard) if auto_integrate: final_prompt = augment_guidance_prompt(prompt=final_prompt) response = get_llm_response(final_prompt) logger.debug(f"LLM response: {response=}") group.integrate(response=response) def format_task_prompt(group: TaskGroup, clipboard: bool) -> str: prompt = group.format_prompt() if clipboard: print("Copied prompt to clipboard") pyperclip.copy(prompt) with temporary_workspace() as f: write_file(f, prompt) print(f"Prompt wrote into {f}") _ = input("Press enter to continue/exit. ") return read_file(f) def augment_guidance_prompt(prompt: str) -> str: logger.debug(f"Before guidance: {prompt=}") additions = inspect.cleandoc(""" Your answer should only consist of code. All explanation should be done with comments instead of raw text. ```python {{gen 'code'}} ``` """) prompt = prompt + additions logger.debug(f"After guidance: {prompt=}") return prompt def get_llm_response(prompt: str) -> str: guidance_program = guidance(prompt) raw_response = guidance_program() cleaned = raw_response['code'].strip("```") return cleaned
[ "promptb4283f4c-0ebb-4d3d-8e71-c2247518d835PLACEHOLDERPLACEHOLDER" ]
2024-01-10
zip13/llama_index_gui
build.py
from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper from langchain import OpenAI from env import ini_env import sys def construct_index(directory_path): max_input_size = 4096 num_outputs = 512 max_chunk_overlap = 20 chunk_size_limit = 600 prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit) llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.3, model_name="gpt-3.5-turbo", max_tokens=num_outputs)) documents = SimpleDirectoryReader(directory_path).load_data() index = GPTVectorStoreIndex.from_documents(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper) #index.save_to_disk('index.json') index.storage_context.persist(persist_dir='./storage') return index ini_env() construct_index("docs")
[]
2024-01-10
zip13/llama_index_gui
build_gui.py
from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper from langchain import OpenAI import sys import os import logging import gradio as gr import json from env import ini_env #构建向量库index def construct_index(folder_path,temperature,max_input_size,num_outputs,max_chunk_overlap,chunk_size_limit,folder_output_path): #设置模型 prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit) llm_predictor = LLMPredictor(llm=OpenAI(temperature=temperature, model_name="gpt-3.5-turbo", max_tokens=num_outputs)) #读取目录下的文档 documents = SimpleDirectoryReader(folder_path).load_data() index = GPTVectorStoreIndex.from_documents(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper) index.storage_context.persist(persist_dir=folder_output_path) #读取保存后的结果 datastr = read_storage_data(folder_output_path) return "向量库建立成功:\n"+datastr; #读取保存的向量库 def read_storage_data(folder_output_path): # 打开你的文件 with open(folder_output_path+'/docstore.json', 'r', encoding='utf-8') as file: data = json.load(file) # 现在 'data' 是一个Python字典,它包含了你的JSON文件中的数据 # 可以打印出来查看 # 将Python字典进行格式化 formatted_data = json.dumps(data, indent=4, ensure_ascii=False) # 输出格式化后的数据 return formatted_data def BuildDig(): #设置一个对话窗 folder_path = gr.inputs.Textbox(label="请输入文档目录",default="./docs") temperature_slider = gr.inputs.Slider(minimum=0.1, maximum=1.0, step=0.1, default=0.7, label="温度") max_input_size = gr.inputs.Slider(minimum=512, maximum=8192, default=4096, step=512, label="最大输入长度") num_outputs = gr.inputs.Slider(minimum=64, maximum=1024, default=512, step=64, label="输出长度") max_chunk_overlap = gr.inputs.Slider(minimum=10, maximum=50, default=20, step=5, label="最大分块重叠单词数") chunk_size_limit = gr.inputs.Slider(minimum=200, maximum=1000, default=600, step=100, label="分块大小限制") folder_output_path = gr.inputs.Textbox(label="请选择文档目录",default="./storage") demo = gr.Interface( construct_index, [folder_path,temperature_slider,max_input_size,num_outputs,max_chunk_overlap,chunk_size_limit,folder_output_path], ["text"], # 设置没有保存数据的按钮 allow_flagging="never", ) return demo #加载环境变量 ini_env() #启动服务 BuildDig().launch(share=True,server_port=17860,server_name="127.0.0.1")
[]
2024-01-10
zip13/llama_index_gui
chat_test_add_context.py
import os import time import sysconfig from modules import chat_options from modules.chat_options import cmd_opts from modules.chat_ui import create_ui,load_index from env import ini_env # patch PATH for cpm_kernels libcudart lookup import sys import os import json ini_env() # 导入必要的库和模块 from llama_index import ServiceContext, LLMPredictor, PromptHelper, StorageContext, load_index_from_storage from langchain import OpenAI from modules.chat_options import cmd_opts from modules.context import Context from llama_index.data_structs.node import NodeWithScore from llama_index.response.schema import Response from llama_index.utils import truncate_text from llama_index import download_loader, GPTVectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage from pathlib import Path import os # 初始化LLM预测器(这里使用gpt-3.5-turbo模型) llm_predictor = LLMPredictor(llm=OpenAI(temperature=cmd_opts.temperature, model_name=cmd_opts.model_name)) # 构建服务上下文 service_context = ServiceContext.from_defaults( llm_predictor=llm_predictor, prompt_helper=PromptHelper(max_input_size=cmd_opts.max_input_size, max_chunk_overlap=cmd_opts.max_chunk_overlap, num_output=cmd_opts.num_output), chunk_size_limit=cmd_opts.chunk_size_limit ) # 构建存储上下文 storage_context = StorageContext.from_defaults(persist_dir=cmd_opts.persist_dir) # 加载索引 index = load_index_from_storage(storage_context, service_context=service_context) query_engine = index.as_query_engine( similarity_top_k=3, response_mode="simple_summarize" ) def add_turn(turns, new_turn): turns.append(new_turn) if len(turns) > 5: del turns[0] # 定义打印来源节点的函数 def pprint_source_node( source_node, source_length: int = 350, wrap_width: int = 70 ) -> str: source_text_fmt = truncate_text(source_node.node.get_text().strip(), source_length) return "".join([ f'(相似度{source_node.score}) ', "\nnode id:", source_node.doc_id, "\n", source_text_fmt]) def pprint_answer(response): # 初始化参考文档列表 refDoc = [] # 遍历来源节点,获取参考文档 for node in response.source_nodes: if node.similarity is not None: refDoc.append(pprint_source_node(node)) # 根据是否需要显示引用,生成最终的回应 res = "Agent0: "+"".join([ response.response, "\n引用:\n", "\n".join(refDoc)]) print(res) turns=[] while True: text_input = input("User: ") turns.append({"role":"user","content":text_input}) turns_str = json.dumps(turns, ensure_ascii=False) print(turns_str) response = query_engine.query(turns_str) pprint_answer(response) add_turn(turns,{"role":"assistant","content":response.response}) # 只有这样迭代才能连续提问理解上下文
[]
2024-01-10
zip13/llama_index_gui
chat_test.py
import os import time import sysconfig from modules import chat_options from modules.chat_options import cmd_opts from env import ini_env ini_env() # 导入必要的库和模块 from llama_index import ServiceContext, LLMPredictor, PromptHelper, StorageContext, load_index_from_storage from langchain import OpenAI from langchain.agents import Tool, initialize_agent from llama_index.langchain_helpers.memory_wrapper import GPTIndexChatMemory # 初始化LLM预测器(这里使用gpt-3.5-turbo模型) llm_predictor = LLMPredictor(llm=OpenAI(temperature=cmd_opts.temperature, model_name=cmd_opts.model_name)) # 构建服务上下文 service_context = ServiceContext.from_defaults( llm_predictor=llm_predictor, prompt_helper=PromptHelper(max_input_size=cmd_opts.max_input_size, max_chunk_overlap=cmd_opts.max_chunk_overlap, num_output=cmd_opts.num_output), chunk_size_limit=cmd_opts.chunk_size_limit ) # 构建存储上下文 storage_context = StorageContext.from_defaults(persist_dir=cmd_opts.persist_dir) # 加载索引 index = load_index_from_storage(storage_context, service_context=service_context) memory = GPTIndexChatMemory( index=index, memory_key="chat_history", query_kwargs={"response_mode": "simple_summarize"}, # return_source returns source nodes instead of querying index return_source=True, # return_messages returns context in message format return_messages=True ) tools = [ Tool( name = "GPT Index", func=lambda q: str(index.as_query_engine( similarity_top_k=3, response_mode='tree_summarize',verbose=True).query(q)), description="useful for when you want to answer questions about 丰迈. The input to this tool should be a complete chinese sentence.", return_direct=True ), ] llm=OpenAI(temperature=cmd_opts.temperature, model_name=cmd_opts.model_name) agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory,verbose=True) while True: text_input = input("User: ") response = agent_chain.run(input=text_input) print(f'Agent: {response}')
[]
2024-01-10
zip13/llama_index_gui
mergeStorage.py
from llama_index import SimpleDirectoryReader, GPTListIndex,ServiceContext,StorageContext, GPTVectorStoreIndex, LLMPredictor, PromptHelper,load_index_from_storage from langchain import OpenAI import sys from env import ini_env def load_index(persist_dir): # LLM Predictor (gpt-3.5-turbo) max_input_size = 4096 num_outputs = 512 max_chunk_overlap = 20 chunk_size_limit = 600 prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit) llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.3, model_name="gpt-3.5-turbo", max_tokens=num_outputs)) service_context = ServiceContext.from_defaults( llm_predictor=llm_predictor, prompt_helper=prompt_helper, chunk_size_limit=chunk_size_limit ) # rebuild storage context storage_context = StorageContext.from_defaults(persist_dir=persist_dir) # load index index = load_index_from_storage(storage_context,service_context=service_context) return index ini_env() #加载已有index向量库 index5g = load_index('./storage/5g') #加载新增文档 documents = SimpleDirectoryReader('./docs/bai').load_data() #添加文档到已有index for doc in documents: index5g.insert(doc) #保存到新的位置 index5g.storage_context.persist(persist_dir='./storage/all')
[]
2024-01-10
zip13/llama_index_gui
modules~chat_ui.py
# 导入必要的库和模块 from llama_index import ServiceContext, LLMPredictor, PromptHelper, StorageContext, load_index_from_storage from langchain import OpenAI from modules.chat_options import cmd_opts from modules.context import Context from llama_index.data_structs.node import NodeWithScore from llama_index.response.schema import Response from llama_index.utils import truncate_text import gradio as gr import os # 定义CSS和Javascript路径 css = "style.css" script_path = "scripts" # 保存原始的gradio模板响应 _gradio_template_response_orig = gr.routes.templates.TemplateResponse # 初始化index变量 index = None # 定义加载索引的函数 def load_index(): global index # 初始化LLM预测器(这里使用gpt-3.5-turbo模型) llm_predictor = LLMPredictor(llm=OpenAI(temperature=cmd_opts.temperature, model_name=cmd_opts.model_name)) # 构建服务上下文 service_context = ServiceContext.from_defaults( llm_predictor=llm_predictor, prompt_helper=PromptHelper(max_input_size=cmd_opts.max_input_size, max_chunk_overlap=cmd_opts.max_chunk_overlap, num_output=cmd_opts.num_output), chunk_size_limit=cmd_opts.chunk_size_limit ) # 构建存储上下文 storage_context = StorageContext.from_defaults(persist_dir=cmd_opts.persist_dir) # 加载索引 index = load_index_from_storage(storage_context, service_context=service_context) # 定义聊天函数 def chat(ctx, message, model_type, refFlag): global index # 检查索引是否已加载 if not index: raise "index not loaded" # 限制对话轮次 ctx.limit_round() # 构建查询引擎 query_engine = index.as_query_engine( similarity_top_k=cmd_opts.similarity_top_k, response_mode=model_type ) # 发出查询并获取回应 response = query_engine.query(message) # 打印回应 print(response) # 初始化参考文档列表 refDoc = [] # 遍历来源节点,获取参考文档 for node in response.source_nodes: if node.similarity is not None: refDoc.append(pprint_source_node(node)) # 根据是否需要显示引用,生成最终的回应 if(refFlag): res = "".join([ response.response, "\n引用:\n", "\n".join(refDoc)]) else: res = response.response # 更新对话历史 ctx.append(message, res) ctx.refresh_last() # 返回对话历史 return ctx.rh # 定义打印来源节点的函数 def pprint_source_node( source_node, source_length: int = 350, wrap_width: int = 70 ) -> str: source_text_fmt = truncate_text(source_node.node.get_text().strip(), source_length) return "".join([ f'(相似度{source_node.score}) ', "\nnode id:", source_node.doc_id, "\n", source_text_fmt]) # 定义创建用户界面的函数 def create_ui(): reload_javascript(); with gr.Blocks(analytics_enabled=False) as chat_interface: _ctx = Context() state = gr.State(_ctx) with gr.Row(): with gr.Column(scale=3): input=gr.inputs.Textbox(lines=7, label="请输入") model_type = gr.inputs.Radio( choices=["tree_summarize", "compact", "simple_summarize", "refine", "generation"], label="选择模型", default="simple_summarize", ) refFlag=gr.inputs.Checkbox(default=True, label="显示引用", optional=False) submit = gr.Button("发送", elem_id="c_generate") with gr.Column(scale=7): chatbot = gr.Chatbot(elem_id="c_chatbot", show_label=False).style(height=500) savebutton = gr.Button("保存", elem_id="c_save") # 设置对话窗的点击事件 submit.click(chat, inputs=[ state, input, model_type, refFlag ], outputs=[ chatbot, ]) return chat_interface # 定义重新加载Javascript的函数 def reload_javascript(): scripts_list = [os.path.join(script_path, i) for i in os.listdir(script_path) if i.endswith(".js")] javascript = "" for path in scripts_list: with open(path, "r", encoding="utf8") as js_file: javascript += f"\n<script>{js_file.read()}</script>" # 修改gradio的模板响应,添加Javascript def template_response(*args, **kwargs): res = _gradio_template_response_orig(*args, **kwargs) res.body = res.body.replace( b'</head>', f'{javascript}</head>'.encode("utf8")) res.init_headers() return res gr.routes.templates.TemplateResponse = template_response
[]
2024-01-10
Ajasra/ai_assist_back
conversation~conv_helper.py
import os import openai import tiktoken from dotenv import load_dotenv from cocroach_utils.database_utils import save_error from cocroach_utils.db_conv import get_conv_by_id load_dotenv() def get_conv_id(conv_id, user_id, doc_id): """ Get the conversation conv_id and return a new one if it does not exist :param conv_id: :return: """ cur_conv = None if conv_id is None or conv_id == -1 or conv_id == 0: pass else: conv = get_conv_by_id(conv_id) if conv is None: save_error("Conversation not found") cur_conv = -1 else: cur_conv = conv return cur_conv def num_tokens_from_message(message, model="gpt-3.5-turbo"): try: encoding = tiktoken.encoding_for_model(model) except KeyError: print("Warning: model not found. Using cl100k_base encoding.") encoding = tiktoken.get_encoding("cl100k_base") if model == "gpt-3.5-turbo": print("Warning: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.") return num_tokens_from_message(message, model="gpt-3.5-turbo-0301") elif model == "gpt-4": print("Warning: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.") return num_tokens_from_message(message, model="gpt-4-0314") elif model == "gpt-3.5-turbo-0301": tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n tokens_per_name = -1 # if there's a name, the role is omitted elif model == "gpt-4-0314": tokens_per_message = 3 tokens_per_name = 1 else: raise NotImplementedError( f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""") num_tokens = len(encoding.encode(message)) num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> return num_tokens def format_response(response_input): """ Format the response :param response_input: :return: """ data = [] # check if there are follow up questions regardless of uppercase or lowercase if "FOLLOW UP QUESTIONS:" in response_input: data = response_input.split("FOLLOW UP QUESTIONS:") elif "FOLLOWUP QUESTIONS:" in response_input: data = response_input.split("FOLLOWUP QUESTIONS:") elif "Follow up questions:" in response_input: data = response_input.split("Follow up questions:") elif "Followup questions:" in response_input: data = response_input.split("Followup questions:") elif "follow up questions:" in response_input: data = response_input.split("follow up questions:") elif "followup questions:" in response_input: data = response_input.split("followup questions:") elif "Followup" in response_input: data = response_input.split("Followup:") elif "FOLLOWUP" in response_input: data = response_input.split("FOLLOWUP:") elif "followup" in response_input: data = response_input.split("followup:") elif "follow-up" in response_input: data = response_input.split("follow-up:") if len(data) > 1: # answer = data[0].strip().replace("ANSWER:", "") # answer = data[0].strip().replace("answer:", "") answer = data[0].strip().replace("Answer:", "") follow_up_questions = data[1].strip().split("\n") if len(follow_up_questions) == 1: follow_up_questions = data[1].strip().split("?") return { "answer": answer, "follow_up_questions": follow_up_questions } else: return { "answer": response_input.replace("ANSWER:", "").strip(), "follow_up_questions": [], "source": "" } def moderation(text): print("Moderation text: ", text) result = openai.Moderation.create(input=text, api_key=os.getenv("OPENAI_API_KEY")) print("Moderation result: ", result) result = result["results"][0] return result.flagged == True
[]
2024-01-10
Ajasra/ai_assist_back
conversation~ll_conv.py
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores import ChromaVectorStore import os from dotenv import load_dotenv from langchain.chat_models import ChatOpenAI from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import Chroma from langchain.chains import RetrievalQAWithSourcesChain from llama_index import GPTVectorStoreIndex, StorageContext, load_index_from_storage from llama_index import LLMPredictor, GPTVectorStoreIndex, PromptHelper, ServiceContext, SimpleDirectoryReader from llama_index.indices.composability import ComposableGraph from llama_index.indices.keyword_table import GPTSimpleKeywordTableIndex load_dotenv() def create_index(filename): loader = SimpleDirectoryReader("./data/"+filename, recursive=True, exclude_hidden=True) documents = loader.load_data() print("Loaded {} documents".format(len(documents))) print("First document: {}".format(documents[0])) index = None llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") llm_predictor = LLMPredictor(llm) max_input_size = 4096 num_output = 1024 max_chunk_overlap = 64 prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper) print("Creating index") index = GPTVectorStoreIndex.from_documents( documents, service_context=service_context ) print("Index created") print("Index conv_id: {}".format(index.index_id)) print(index.docstore) if not os.path.exists("./persist/"+filename): os.makedirs("./persist/"+filename) index.storage_context.persist(persist_dir="./persist/"+filename) print("./persist/"+filename) def load_multiple_indexes(): # Define the directories directories = ['./persist/1', './persist/2'] # Create a dictionary of StorageContext objects storage_contexts = {} for directory in directories: storage_contexts[directory] = StorageContext.from_defaults(persist_dir=directory) # Load the indexes from each directory ind = [] vector_indices = {} for directory, storage_context in storage_contexts.items(): index = load_index_from_storage(storage_context) ind.append(index) vector_indices[directory] = index index_summaries = ["This document is about how humans have evolved to become the dominant species on Earth, " "and how our current way of life is causing a rapid rate of extinction of other species. It " "proposes that humans should be treated as a collective organism, and examines the past, " "present, and future of our species in order to diagnose and treat the ailments caused by our " "current way of life.","This document is about the philosophical debate on the idea of truth, " "the importance of understanding how identity is built up through a " "network of interactions, and the prevalence of individualism in " "American culture."] # Create the composed graph graph = ComposableGraph.from_indices( GPTSimpleKeywordTableIndex, [ind[0], ind[1]], index_summaries=index_summaries, max_keywords_per_chunk=100 ) # Create the query engine query_engine = graph.as_query_engine() # query_engine = ind[1].as_query_engine() query = "What is cancer culture?" _DEFAULT_TEMPLATE = """Given the context information answer the following question If you don't know the answer, just say you dont know Don't try to make up an answer. ========= Always answer in the format: ANSWER: <your answer> FOLLOW UP QUESTIONS: <list of 3 suggested questions related to context and conversation for better understanding> SOURCE: <do not make up source, give the page or the chapter from the document> ========= question: {}""".format(query) response = query_engine.query(query) print(response) print(response.get_formatted_sources())
[ "Given the context information answer the following question\n If you don't know the answer, just say you dont know Don't try to make up an answer.\n =========\n Always answer in the format:\n ANSWER: <your answer>\n FOLLOW UP QUESTIONS: <list of 3 suggested questions related to context and conversation for better understanding>\n SOURCE: <do not make up source, give the page or the chapter from the document>\n =========\n question: What is cancer culture?" ]
2024-01-10
Explorergt92/Automotive-AI
api~openai_functions~gpt_chat.py
""" This module provides functions for working with OpenAI's API. """ import os import json import ast from rich.console import Console from openai import OpenAI, APIConnectionError, RateLimitError, APIStatusError from config import OPENAI_API_KEY # Instantiate OpenAI client client = OpenAI(api_key=OPENAI_API_KEY) console = Console() def chat_gpt(prompt): """ Generates a response using OpenAI's API. Args: prompt (str): The prompt to generate a response for. Returns: str: The generated response. """ with console.status("[bold green]Generating...", spinner="dots"): try: completion = client.chat.completions.create( model="gpt-4-1106-preview", messages=[ { "role": "system", "content": "You are an AI assistant.", }, { "role": "user", "content": f"{prompt}", }, ], max_tokens=200, n=1, stop=None, temperature=0.5, frequency_penalty=0, presence_penalty=0, ) # Extract the text part of the response response_text = completion.choices[0].message.content.strip() except APIConnectionError as e: console.print("[bold red]The server could not be reached") console.print(e.__cause__) response_text = "Error: The server could not be reached." except RateLimitError as e: console.print(f"[bold red]A 429 status code.{e}") response_text = "Error: Rate limit exceeded. Try again later." except APIStatusError as e: console.print(f"[bold red]Error code was received{e}") console.print(e.status_code) console.print(e.response) response_text = f"API error occurred status code {e.status_code}" return response_text def chat_gpt_custom(processed_data): """ Extracts VIN number from processed data using OpenAI's API. Args: processed_data (str): The processed data containing the VIN response. Returns: str: The extracted VIN number or the generated response. """ if "VIN response:" in processed_data: vin = processed_data.split("VIN response: ")[1].split("\n")[0].strip() decoded_data = processed_data.split("Decoded VIN: ")[1].strip() vehicle_data = ast.literal_eval(decoded_data) if vehicle_data: response = ( f"The VIN is {vin}. This is a {vehicle_data['Model Year']} " f"{vehicle_data['Make']} {vehicle_data['Model']} with a " f"{vehicle_data['Displacement (L)']} engine. Trim level is " f"{vehicle_data['Trim'] if vehicle_data['Trim'] else 'none'}." ) else: response = "couldn't retrieve information for the provided VIN." else: with console.status("[bold green]Processing", spinner="dots"): try: completion = client.chat.completions.create( model="gpt-4-1106-preview", messages=[ { "role": "system", "content": "You are an AI assistant.", }, { "role": "user", "content": f"{processed_data}", }, ], max_tokens=200, n=1, stop=None, temperature=0.5, frequency_penalty=0, presence_penalty=0, ) response = completion.choices[0].message.content.strip() except APIConnectionError as e: console.print("[bold red]The server could not be reached") console.print(e.__cause__) response = "Error: The server could not be reached." except RateLimitError as e: console.print(f"[bold red]429 status code was received.{e}") response = "Error: Rate limit exceeded." except APIStatusError as e: console.print("[bold red]non-200-range status code received") console.print(e.status_code) console.print(e.response) response = f"Error: An API error occurred {e.status_code}." return response def chat_gpt_conversation(prompt, conversation_history): """ This function generates a response for the given prompt using GPT model. :param prompt: The input prompt for the GPT model. :type prompt: str :param conversation_history: The history of the conversation so far. :type conversation_history: list """ with console.status("[bold green]Generating...", spinner="dots"): try: response = client.chat.completions.create( model="gpt-4-1106-preview", messages=conversation_history + [{"role": "user", "content": f"{prompt}"}], max_tokens=200, n=1, stop=None, temperature=0.5, frequency_penalty=0, presence_penalty=0, ) response_text = response.choices[0].message.content.strip() except APIConnectionError as e: console.print("[bold red]The server could not be reached") console.print(e.__cause__) response_text = "Error: The server could not be reached." except RateLimitError as e: console.print(f"[bold red]A 429 status code was received.{e}") response_text = "Error: Rate limit exceeded." except APIStatusError as e: console.print(f"[bold red]non-200-range status code received{e}") console.print(e.status_code) console.print(e.response) response_text = ( f"Error: API error occurred with status code {e.status_code}." ) return response_text def load_conversation_history(file_path="conversation_history.json"): """ This function loads conversation history from a JSON file. :param file_path: Defaults to "conversation_history.json". :return: A list of conversation messages. """ with console.status("[bold green]Loading...", spinner="dots"): try: if os.path.exists(file_path): with open(file_path, "r", encoding="utf-8") as f: conversation_history = json.load(f) else: conversation_history = [ { "role": "system", "content": "You are an AI assistant.", } ] except IOError as io_error: console.print(f"[bold red]Error loading history: {io_error}") conversation_history = [ { "role": "system", "content": "You are an AI assistant." } ] return conversation_history def save_conversation_history( conversation_history, file_path="conversation_history.json" ): """ Save the conversation history to a JSON file. Args: conversation_history (list): representing the conversation history. file_path (str, optional): JSON file where the conversation history. """ try: with open(file_path, "w", encoding="utf-8") as f: json.dump(conversation_history, f) except IOError as io_error: print(f"An error occurred saving conversation history: {io_error}") def format_conversation_history_for_summary(conversation_history): """ Format the conversation history for summary display. Args: conversation_history (str): The conversation history as a string. Returns: str: The formatted conversation history. """ with console.status("[bold green]Formatting...", spinner="dots"): formatted_history = "" for message in conversation_history: role = message["role"].capitalize() content = message["content"] formatted_history += f"{role}: {content}\n" return formatted_history def summarize_conversation_history_direct(conversation_history): """ This function summarizes the conversation history provided as input. :param conversation_history: A list of conversation messages. :return: None """ with console.status("[bold green]Summarizing..", spinner="dots"): try: formatted_history = format_conversation_history_for_summary( conversation_history ) summary_prompt = ( "Please summarize the following conversation history and " "retain all important information:\n\n" f"{formatted_history}\nSummary:" ) messages = conversation_history + [ {"role": "user", "content": summary_prompt} ] response = client.chat.completions.create( model="gpt-4-1106-preview", messages=messages, max_tokens=300, n=1, stop=None, temperature=0.5, top_p=0.5, frequency_penalty=0, presence_penalty=0, ) summary_text = response.choices[0].message.content.strip() summarized_history = [ {"role": "system", "content": "You are an AI assistant"} ] summarized_history.append( { "role": "assistant", "content": summary_text } ) except APIConnectionError as e: console.print("[bold red]The server could not be reached") console.print(e.__cause__) summarized_history = [ { "role": "assistant", "content": "Error: The server could not be reached.", } ] except RateLimitError as e: console.print(f"[bold red]A 429 status code was received.{e}") summarized_history = [ { "role": "assistant", "content": "Error: Rate limit exceeded. Try again later.", } ] except APIStatusError as e: console.print("[bold red]non-200-range status code received") console.print(e.status_code) console.print(e.response) summarized_history = [ { "role": "assistant", "content": f"Error: API error {e.status_code}.", } ] return summarized_history
[ "Error: The server could not be reached.", "Please summarize the following conversation history and retain all important information:\n\nPLACEHOLDER\nSummary:", "You are an AI assistant", "You are an AI assistant.", "PLACEHOLDER", "Error: Rate limit exceeded. Try again later.", "content" ]
2024-01-10
ru-martynov/deploy-app-site
Chatbot.py
import openai import streamlit as st import os # with st.sidebar: # openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password") # "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)" # "[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)" # "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)" st.title("💬 CustDev") os.environ['OPENAI_API_KEY'] = st.secrets['OPENAI_API_KEY'] """Добро пожаловать в нашу CustDev сессию! Мы рады видеть вас здесь, так как ваше мнение и опыт важны для нас. Цель этого чата - познакомить вас с нашим основным продуктом и поговорить о ваших потребностях и проблемах в области HR. Мы хотим лучше понять, как мы можем помочь вам решать эти проблемы. Начнем эту увлекательную journey вместе!""" if "messages" not in st.session_state: st.session_state["messages"] = [{"role": "assistant", "content": """Я — чат-бот, специально созданный для того, чтобы поговорить о вашей работе и потребностях для последущего своего развития! Как вас зовут?"""}] for msg in st.session_state.messages: if msg["content"] != "Я хочу, чтобы ты выступил в роли эксперта в области CustDev у Hr-ов по сбору обратной связи, с большим опытом и знаниями в сфере интервьюирования. Ты обладаешь глубоким пониманием того, как создавать эффективные стратегии для выявления истинного мнения. Кратко слушая человека и задавая наводящие вопросы, не пиши своё мнение и пиши кратко. Начни общение с того где работает человек, а после с вопроса по частоте проведения обратной связи, а после узнать о проблемах в подходе сбора обратной связи и продолжай задовать вопросы на разные темы сбора обратной связи:": st.chat_message(msg["role"]).write(msg["content"]) st.session_state.messages.append({"role": "user", "content": "Я хочу, чтобы ты выступил в роли эксперта в области CustDev у Hr-ов по сбору обратной связи, с большим опытом и знаниями в сфере интервьюирования. Ты обладаешь глубоким пониманием того, как создавать эффективные стратегии для выявления истинного мнения. Кратко слушая человека и задавая наводящие вопросы, не пиши своё мнение и пиши кратко. Начни общение с того где работает человек, а после с вопроса по частоте проведения обратной связи, а после узнать о проблемах в подходе сбора обратной связи и продолжай задовать вопросы на разные темы сбора обратной связи:"}) if prompt := st.chat_input(): # if not openai_api_key: # st.info("Please add your OpenAI API key to continue.") # st.stop() openai.api_key = os.getenv('OPENAI_API_KEY') st.session_state.messages.append({"role": "user", "content": prompt}) st.chat_message("user").write(prompt) response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=st.session_state.messages) msg = response.choices[0].message st.session_state.messages.append(msg) st.chat_message("assistant").write(msg.content)
[ "Я хочу, чтобы ты выступил в роли эксперта в области CustDev у Hr-ов по сбору обратной связи, с большим опытом и знаниями в сфере интервьюирования. Ты обладаешь глубоким пониманием того, как создавать эффективные стратегии для выявления истинного мнения. Кратко слушая человека и задавая наводящие вопросы, не пиши своё мнение и пиши кратко. Начни общение с того где работает человек, а после с вопроса по частоте проведения обратной связи, а после узнать о проблемах в подходе сбора обратной связи и продолжай задовать вопросы на разные темы сбора обратной связи:", "Я — чат-бот, специально созданный для того, чтобы поговорить о вашей работе и потребностях для последущего своего развития! \n Как вас зовут?" ]
2024-01-10
Mkrolick/Flashcarder
Old%20Content~chatmain.py
import os import openai import dotenv import prompts dotenv.load_dotenv() openai.api_key = os.getenv("OPENAI_API_KEY") start_sequence = "\nAI:" restart_sequence = "\nHuman: " response = openai.ChatCompletion.create( model="gpt-4", messages= [{"role": "system", "content": "You produce flashcard from a specific book. You produce highly detailed flash cards with a term name and a definition. You produce around 20 flashcards per book chapter."}, {"role": "user", "content": prompts.book_summary_prompt}], ) # save out put to a text file with open('output.txt', 'w') as f: f.write(response["choices"][0]["message"]["content"])
[ "You produce flashcard from a specific book. You produce highly detailed flash cards with a term name and a definition. You produce around 20 flashcards per book chapter." ]
2024-01-10
Mkrolick/Flashcarder
Old%20Content~book_extraction~flash_card_reducer.py
import pandas as pd import openai import dotenv import os dotenv.load_dotenv() openai.api_key = os.getenv("OPENAI_API_KEY") df = pd.read_csv("C:/Users/malco/OneDrive/Documents/GitHub/Auto-GPT/GPT-Tools/book_extraction/flash_decks/like_switch_aggr.csv") #for value in data frame column card_definition has \n filter into a new data frame df = df[df["card_definition"].str.contains("\n")] reduced_df = pd.DataFrame(columns=["card_name", "card_definition"]) for index, row in df.iterrows(): #try: response = openai.ChatCompletion.create( model="gpt-4", messages= [{"role": "system", "content": "You take in information and condense it into one high quality flashcard. You produce highly a detailed flashcard with a term name and a definition in the format: Term: <Card Name> \n\n Definition: <Card Definition> "}, {"role": "user", "content": f"Please produce a flashcard on {row['card_name']} from the following content: \n {row['card_definition']}"}], ) text = response["choices"][0]["message"]["content"] print(text) term, definition = text.split("\n\n") term = term.replace("Term: ", "") definition = definition.replace("Definition: ", "") reduced_df = pd.concat([reduced_df, pd.DataFrame({"card_name": [term], "card_definition": [definition]})], ignore_index=True) reduced_df["card_definition"] = reduced_df["card_definition"].apply(lambda x: x.strip()) # save reduced_df to a csv file reduced_df.to_csv("C:/Users/malco/OneDrive/Documents/GitHub/Auto-GPT/GPT-Tools/book_extraction/flash_decks/like_switch_reduced.csv") #except Exception as e: # continue
[ "You take in information and condense it into one high quality flashcard. You produce highly a detailed flashcard with a term name and a definition in the format: Term: <Card Name> \n\n Definition: <Card Definition> ", "Please produce a flashcard on PLACEHOLDER from the following content: \n PLACEHOLDER" ]
2024-01-10
Mkrolick/Flashcarder
Old%20Content~getmodels.py
import os import openai import dotenv import prompts dotenv.load_dotenv() openai.api_key = os.getenv("OPENAI_API_KEY") # print out all available engines engines = openai.Engine.list() print(engines)
[]
2024-01-10
Mkrolick/Flashcarder
Old%20Content~genchapters.py
import os import openai import dotenv import prompts dotenv.load_dotenv() openai.api_key = os.getenv("OPENAI_API_KEY") book_name = input("Enter the name of the book: ") response = openai.Completion.create( engine="text-davinci-003", prompt=f"List out all of the chapters of the book: {book_name} in a python list format. In the format [ChapterOneName, ChapterTwoName, ...]", temperature=0.7, max_tokens=709, top_p=1, frequency_penalty=0, presence_penalty=0 ) # save out put to a text file with open('output.txt', 'w') as f: f.write(response['choices'][0]['text'])
[ "List out all of the chapters of the book: PLACEHOLDER in a python list format. In the format [ChapterOneName, ChapterTwoName, ...]" ]
2024-01-10
Mkrolick/Flashcarder
Old%20Content~book_extraction~flashchunk_creator.py
import os import openai import dotenv dotenv.load_dotenv() openai.api_key = os.getenv("OPENAI_API_KEY") #print("Books in pdf folder:") #for file in os.listdir("pdfs"): # print(file) # #print("-----------------------------------") print("Folders in current directory") for file in os.listdir(): if os.path.isdir(file) and file != "pdfs": print(file) folder_name = input("Enter the name of the folder you want to extract text from: ") #get the list of all files in the directory page_chunks files = os.listdir(f"{folder_name}/page_chunks") start_sequence = "\nAI:" restart_sequence = "\nHuman: " if not os.path.exists(f"{folder_name}/flash_chunks"): os.makedirs(f"{folder_name}/flash_chunks") if not os.path.exists(f"{folder_name}/file_exceptions"): os.makedirs(f"{folder_name}/file_exceptions") for index, file in enumerate(files[2:]): try: file_content = open(f"{folder_name}/page_chunks/{file}", "r", encoding="utf-8").read() response = openai.ChatCompletion.create( model="gpt-4", messages= [{"role": "system", "content": "You produce flashcards from a two-page section from a book. You produce highly detailed flash cards with a term name and a definition in the format: Term: <Card Name> \n Definition: <Card Definition> \n ... Term: <Card Name> \n Defnition: <Card Definition>"}, {"role": "user", "content": "please produce some flashcards from the provided content: \n" + file_content}], ) with open(f"{folder_name}/flash_chunks/flash_chunks_{index}.txt", "w", encoding="utf-8") as f: f.write(response["choices"][0]["message"]["content"]) except Exception as e: # write file exepction to a file in file_exceptions folder with open(f"{folder_name}/file_exceptions/{file}.txt", "w", encoding="utf-8") as f: f.write(str(e))
[ "please produce some flashcards from the provided content: \nPLACEHOLDER", "You produce flashcards from a two-page section from a book. You produce highly detailed flash cards with a term name and a definition in the format: Term: <Card Name> \n Definition: <Card Definition> \n ... Term: <Card Name> \n Defnition: <Card Definition>" ]
2024-01-10
karpathy/deep-vector-quantization
dvq~vqvae.py
""" Defines the full (PyTorch Lightning module) VQVAE, which incorporates an encoder, decoder and a quantize layer in the middle for the discrete bottleneck. """ import os import math from argparse import ArgumentParser import torch from torch import nn, einsum import torch.nn.functional as F import pytorch_lightning as pl from pytorch_lightning.callbacks import ModelCheckpoint from data.cifar10 import CIFAR10Data from model.deepmind_enc_dec import DeepMindEncoder, DeepMindDecoder from model.openai_enc_dec import OpenAIEncoder, OpenAIDecoder from model.openai_enc_dec import Conv2d as PatchedConv2d from model.quantize import VQVAEQuantize, GumbelQuantize from model.loss import Normal, LogitLaplace # ----------------------------------------------------------------------------- class VQVAE(pl.LightningModule): def __init__(self, args, input_channels=3): super().__init__() self.args = args # encoder/decoder module pair Encoder, Decoder = { 'deepmind': (DeepMindEncoder, DeepMindDecoder), 'openai': (OpenAIEncoder, OpenAIDecoder), }[args.enc_dec_flavor] self.encoder = Encoder(input_channels=input_channels, n_hid=args.n_hid) self.decoder = Decoder(n_init=args.embedding_dim, n_hid=args.n_hid, output_channels=input_channels) # the quantizer module sandwiched between them, +contributes a KL(posterior || prior) loss to ELBO QuantizerModule = { 'vqvae': VQVAEQuantize, 'gumbel': GumbelQuantize, }[args.vq_flavor] self.quantizer = QuantizerModule(self.encoder.output_channels, args.num_embeddings, args.embedding_dim) # the data reconstruction loss in the ELBO ReconLoss = { 'l2': Normal, 'logit_laplace': LogitLaplace, # todo: add vqgan }[args.loss_flavor] self.recon_loss = ReconLoss def forward(self, x): z = self.encoder(x) z_q, latent_loss, ind = self.quantizer(z) x_hat = self.decoder(z_q) return x_hat, latent_loss, ind def training_step(self, batch, batch_idx): x, y = batch # hate that i have to do this here in the model x = self.recon_loss.inmap(x) x_hat, latent_loss, ind = self.forward(x) recon_loss = self.recon_loss.nll(x, x_hat) loss = recon_loss + latent_loss return loss def validation_step(self, batch, batch_idx): x, y = batch # hate that i have to do this here in the model x = self.recon_loss.inmap(x) x_hat, latent_loss, ind = self.forward(x) recon_loss = self.recon_loss.nll(x, x_hat) self.log('val_recon_loss', recon_loss, prog_bar=True) # debugging: cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally encodings = F.one_hot(ind, self.quantizer.n_embed).float().reshape(-1, self.quantizer.n_embed) avg_probs = encodings.mean(0) perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp() cluster_use = torch.sum(avg_probs > 0) self.log('val_perplexity', perplexity, prog_bar=True) self.log('val_cluster_use', cluster_use, prog_bar=True) def configure_optimizers(self): # separate out all parameters to those that will and won't experience regularizing weight decay decay = set() no_decay = set() whitelist_weight_modules = (torch.nn.Linear, torch.nn.Conv2d, torch.nn.ConvTranspose2d, PatchedConv2d) blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.BatchNorm2d, torch.nn.Embedding) for mn, m in self.named_modules(): for pn, p in m.named_parameters(): fpn = '%s.%s' % (mn, pn) if mn else pn # full param name if pn.endswith('bias'): # all biases will not be decayed no_decay.add(fpn) elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules): # weights of whitelist modules will be weight decayed decay.add(fpn) elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules): # weights of blacklist modules will NOT be weight decayed no_decay.add(fpn) # validate that we considered every parameter param_dict = {pn: p for pn, p in self.named_parameters()} inter_params = decay & no_decay union_params = decay | no_decay assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), ) assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \ % (str(param_dict.keys() - union_params), ) # create the pytorch optimizer object optim_groups = [ {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 1e-4}, {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0}, ] optimizer = torch.optim.AdamW(optim_groups, lr=3e-4, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-4) self.optimizer = optimizer return optimizer @staticmethod def add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False) # model type parser.add_argument("--vq_flavor", type=str, default='vqvae', choices=['vqvae', 'gumbel']) parser.add_argument("--enc_dec_flavor", type=str, default='deepmind', choices=['deepmind', 'openai']) parser.add_argument("--loss_flavor", type=str, default='l2', choices=['l2', 'logit_laplace']) # model size parser.add_argument("--num_embeddings", type=int, default=512, help="vocabulary size; number of possible discrete states") parser.add_argument("--embedding_dim", type=int, default=64, help="size of the vector of the embedding of each discrete token") parser.add_argument("--n_hid", type=int, default=64, help="number of channels controlling the size of the model") return parser # ----------------------------------------------------------------------------- def cos_anneal(e0, e1, t0, t1, e): """ ramp from (e0, t0) -> (e1, t1) through a cosine schedule based on e \in [e0, e1] """ alpha = max(0, min(1, (e - e0) / (e1 - e0))) # what fraction of the way through are we alpha = 1.0 - math.cos(alpha * math.pi/2) # warp through cosine t = alpha * t1 + (1 - alpha) * t0 # interpolate accordingly return t """ These ramps/decays follow DALL-E Appendix A.2 Training https://arxiv.org/abs/2102.12092 """ class DecayTemperature(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): # The relaxation temperature τ is annealed from 1 to 1/16 over the first 150,000 updates. t = cos_anneal(0, 150000, 1.0, 1.0/16, trainer.global_step) pl_module.quantizer.temperature = t class RampBeta(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): # The KL weight β is increased from 0 to 6.6 over the first 5000 updates # "We divide the overall loss by 256 × 256 × 3, so that the weight of the KL term # becomes β/192, where β is the KL weight." # TODO: OpenAI uses 6.6/192 but kinda tricky to do the conversion here... about 5e-4 works for this repo so far... :\ t = cos_anneal(0, 5000, 0.0, 5e-4, trainer.global_step) pl_module.quantizer.kld_scale = t class DecayLR(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): # The step size is annealed from 1e10−4 to 1.25e10−6 over 1,200,000 updates. I use 3e-4 t = cos_anneal(0, 1200000, 3e-4, 1.25e-6, trainer.global_step) for g in pl_module.optimizer.param_groups: g['lr'] = t def cli_main(): pl.seed_everything(1337) # ------------------------------------------------------------------------- # arguments... parser = ArgumentParser() # training related parser = pl.Trainer.add_argparse_args(parser) # model related parser = VQVAE.add_model_specific_args(parser) # dataloader related parser.add_argument("--data_dir", type=str, default='/apcv/users/akarpathy/cifar10') parser.add_argument("--batch_size", type=int, default=128) parser.add_argument("--num_workers", type=int, default=8) # done! args = parser.parse_args() # ------------------------------------------------------------------------- data = CIFAR10Data(args) model = VQVAE(args) # annealing schedules for lots of constants callbacks = [] callbacks.append(ModelCheckpoint(monitor='val_recon_loss', mode='min')) callbacks.append(DecayLR()) if args.vq_flavor == 'gumbel': callbacks.extend([DecayTemperature(), RampBeta()]) trainer = pl.Trainer.from_argparse_args(args, callbacks=callbacks, max_steps=3000000) trainer.fit(model, data) if __name__ == "__main__": cli_main()
[]
2024-01-10
karpathy/deep-vector-quantization
dvq~model~loss.py
""" VQVAE losses, used for the reconstruction term in the ELBO """ import math import torch # ----------------------------------------------------------------------------- class LogitLaplace: """ the Logit Laplace distribution log likelihood from OpenAI's DALL-E paper """ logit_laplace_eps = 0.1 @classmethod def inmap(cls, x): # map [0,1] range to [eps, 1-eps] return (1 - 2 * cls.logit_laplace_eps) * x + cls.logit_laplace_eps @classmethod def unmap(cls, x): # inverse map, from [eps, 1-eps] to [0,1], with clamping return torch.clamp((x - cls.logit_laplace_eps) / (1 - 2 * cls.logit_laplace_eps), 0, 1) @classmethod def nll(cls, x, mu_logb): raise NotImplementedError # coming right up class Normal: """ simple normal distribution with fixed variance, as used by DeepMind in their VQVAE note that DeepMind's reconstruction loss (I think incorrectly?) misses a factor of 2, which I have added to the normalizer of the reconstruction loss in nll(), we'll report number that is half of what we expect in their jupyter notebook """ data_variance = 0.06327039811675479 # cifar-10 data variance, from deepmind sonnet code @classmethod def inmap(cls, x): return x - 0.5 # map [0,1] range to [-0.5, 0.5] @classmethod def unmap(cls, x): return torch.clamp(x + 0.5, 0, 1) @classmethod def nll(cls, x, mu): return ((x - mu)**2).mean() / (2 * cls.data_variance) #+ math.log(math.sqrt(2 * math.pi * cls.data_variance))
[]
2024-01-10
SalimHachemaoui/API-boughani
Algorithme~scraper.py
import os import requests from bs4 import BeautifulSoup import openai # Initialisation du client OpenAI avec la clé API depuis une variable d'environnement client = openai.api_key = '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' def scrape_website(url): response = requests.get(url) if response.status_code == 200: soup = BeautifulSoup(response.text, 'html.parser') page_content = soup.get_text() return page_content else: return None def generate_summary(text_content): messages = [{"role": "user", "content": f"Résumez le texte suivant:\n{text_content}"}] response = client.chat.completions.create( model="gpt-3.5-turbo-16k", messages=messages, temperature=0, ) choices = response.choices if choices: summary = choices[0].message.content return summary else: return None
[ "Résumez le texte suivant:\nPLACEHOLDER" ]
2024-01-10
lvyufeng/Cybertron
tests~test_comparison~test_gpt.py
import unittest import mindspore import torch from cybertron.models import GPTModel, GPTConfig from cybertron.utils import convert_state_dict from mindspore import Tensor from transformers import OpenAIGPTModel, OpenAIGPTConfig class TestGPTComparison(unittest.TestCase): def test_gpt_comparison(self): model = GPTModel(GPTConfig()) model.set_train(False) pt_model = OpenAIGPTModel(OpenAIGPTConfig()) pt_model.eval() ms_dict = convert_state_dict(pt_model, 'gpt') mindspore.load_param_into_net(model, ms_dict) input_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] + [0] * 500 ms_input_ids = Tensor(input_ids, mindspore.int32).reshape(1, -1) (outputs, ) = model(ms_input_ids) pt_input_ids = torch.IntTensor(input_ids).reshape(1, -1) (outputs_pt, ) = pt_model(input_ids=pt_input_ids) assert (outputs.asnumpy() - outputs_pt.detach().numpy()).mean() < 1e-5
[]
2024-01-10
GRKdev/StreamLit-Api
chat_bot.py
import os import streamlit as st import requests import openai from utils.sidebar_info import display_sidebar_info, display_main_info from utils.generate_token import TokenManager from utils.key_check import run_key_check, get_openai_key from utils.lakera_guard import LakeraGuard from utils.chatbot_utils import ( handle_chat_message, handle_gpt_ft_message, ask_fine_tuned_api, ) token_manager = TokenManager() lakera_guard_api_key = st.secrets.get("LAKERA_API", os.getenv("LAKERA_API")) def chat_bot(): session_state = st.session_state DOMINIO = st.secrets.get("DOMINIO", os.getenv("DOMINIO")) token = token_manager.get_token() api_key = get_openai_key(session_state) display_main_info() display_sidebar_info() if api_key: openai.api_key = api_key if run_key_check(session_state): st.session_state.chat_history = st.session_state.get("chat_history", []) if not st.session_state.chat_history: st.session_state.chat_history.append( {"role": "assistant", "content": "¡Empezemos a chatear!"} ) for message in st.session_state.chat_history: with st.chat_message(message["role"]): st.markdown(message["content"]) lakera_guard = LakeraGuard(lakera_guard_api_key) user_input = st.chat_input("Ingresa tu pregunta:") if user_input: user_input = user_input.strip() ## Lakera Guard for prompt injection if lakera_guard.check_prompt_injection(user_input): st.session_state.chat_history.append( {"role": "user", "content": user_input} ) with st.chat_message("user"): st.markdown(user_input) error_message = "Mensaje no permitido por motivos de seguridad.🚫" st.session_state.chat_history.append( {"role": "assistant", "content": error_message} ) with st.chat_message("assistant"): st.error(error_message, icon="⚠️") return else: categories, flagged = lakera_guard.check_moderation(user_input) if flagged: combined_error_message = lakera_guard.get_error_messages(categories) st.session_state.chat_history.append( {"role": "user", "content": user_input} ) with st.chat_message("user"): st.markdown(user_input) error_message = f"Alerta de moderación: {combined_error_message}.🔞" st.session_state.chat_history.append( {"role": "assistant", "content": error_message} ) with st.chat_message("assistant"): st.error(error_message, icon="⚠️") return ## End of Lakera Guard ## else: st.session_state.chat_history.append( {"role": "user", "content": user_input} ) with st.chat_message("user"): st.markdown(user_input) if ( len(user_input) == 12 or len(user_input) == 13 ) and user_input.isdigit(): api_response_url = f"/api/art?bar={user_input}" else: api_response_url = ask_fine_tuned_api(user_input) if "api/" in api_response_url: full_url = DOMINIO + api_response_url headers = {"Authorization": f"Bearer {token}"} try: response = requests.get(full_url, headers=headers) response.raise_for_status() except requests.exceptions.RequestException as e: if isinstance( e, requests.exceptions.HTTPError ) and e.response.status_code in [400, 404, 500]: response = e.response else: st.warning( "Error de conexión API con endpoint", icon="🔧" ) return else: response = None with st.chat_message("assistant"): message_placeholder = st.empty() if response and response.status_code == 200: data = response.json() handle_chat_message( api_response_url, data, message_placeholder, user_input ) else: handle_gpt_ft_message( user_input, message_placeholder, api_response_url, response, )
[ "¡Empezemos a chatear!" ]
2024-01-10
GRKdev/StreamLit-Api
utils~key_check.py
import streamlit as st import openai def run_key_check(session_state): message_placeholder = st.empty() input_value = st.text_input('🔑 OpenAI API Key o Password', type='password', key="unique_input_key", placeholder="Escribe aquí:") if len(input_value) <= 10 and len(input_value) > 0: stored_password = st.secrets.get("PASSWORD") stored_openai_key = st.secrets.get("OPENAI_API_KEY") if input_value == stored_password: set_openai_key(session_state, stored_openai_key) return True else: message_placeholder.warning('Password incorrecto', icon="🔒") elif len(input_value) > 10: try: openai.api_key = input_value openai.Completion.create(engine="text-davinci-003", prompt="test", max_tokens=5) set_openai_key(session_state, input_value) return True except openai.error.AuthenticationError: message_placeholder.warning('Por favor, introduce una clave válida de OpenAI!', icon="⚠️") return False def get_openai_key(session_state): if 'api_key' in session_state: return session_state['api_key'] else: return None def set_openai_key(session_state, api_key): session_state['api_key'] = api_key
[ "test" ]
2024-01-10
ChicagoHAI/llm_radiology
utils_finetune.py
import dataclasses import logging import math import os import io import sys import time import json from typing import Optional, Sequence, Union import openai import tqdm from openai import openai_object import copy StrOrOpenAIObject = Union[str, openai_object.OpenAIObject] openai_org = os.getenv("OPENAI_ORG") if openai_org is not None: openai.organization = openai_org logging.warning(f"Switching to organization: {openai_org} for OAI API key.") @dataclasses.dataclass class OpenAIDecodingArguments(object): max_tokens: int = 1800 temperature: float = 0.2 top_p: float = 1.0 n: int = 1 stream: bool = False stop: Optional[Sequence[str]] = None presence_penalty: float = 0.0 frequency_penalty: float = 0.0 suffix: Optional[str] = None logprobs: Optional[int] = None echo: bool = False def openai_completion( prompts: Union[str, Sequence[str], Sequence[dict[str, str]], dict[str, str]], decoding_args: OpenAIDecodingArguments, model_name="text-davinci-003", sleep_time=2, batch_size=1, max_instances=sys.maxsize, max_batches=sys.maxsize, return_text=False, **decoding_kwargs, ): """Decode with OpenAI API. Args: prompts: A string or a list of strings to complete. If it is a chat model the strings should be formatted as explained here: https://github.com/openai/openai-python/blob/main/chatml.md. If it is a chat model it can also be a dictionary (or list thereof) as explained here: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb decoding_args: Decoding arguments. model_name: Model name. Can be either in the format of "org/model" or just "model". sleep_time: Time to sleep once the rate-limit is hit. batch_size: Number of prompts to send in a single request. Only for non chat model. max_instances: Maximum number of prompts to decode. max_batches: Maximum number of batches to decode. This argument will be deprecated in the future. return_text: If True, return text instead of full completion object (which contains things like logprob). decoding_kwargs: Additional decoding arguments. Pass in `best_of` and `logit_bias` if you need them. Returns: A completion or a list of completions. Depending on return_text, return_openai_object, and decoding_args.n, the completion type can be one of - a string (if return_text is True) - an openai_object.OpenAIObject object (if return_text is False) - a list of objects of the above types (if decoding_args.n > 1) """ is_single_prompt = isinstance(prompts, (str, dict)) if is_single_prompt: prompts = [prompts] if max_batches < sys.maxsize: logging.warning( "`max_batches` will be deprecated in the future, please use `max_instances` instead." "Setting `max_instances` to `max_batches * batch_size` for now." ) max_instances = max_batches * batch_size prompts = prompts[:max_instances] num_prompts = len(prompts) prompt_batches = [ prompts[batch_id * batch_size : (batch_id + 1) * batch_size] for batch_id in range(int(math.ceil(num_prompts / batch_size))) ] completions = [] for batch_id, prompt_batch in tqdm.tqdm( enumerate(prompt_batches), desc="prompt_batches", total=len(prompt_batches), ): batch_decoding_args = copy.deepcopy(decoding_args) # cloning the decoding_args while True: try: shared_kwargs = dict( model=model_name, **batch_decoding_args.__dict__, **decoding_kwargs, ) completion_batch = openai.Completion.create(prompt=prompt_batch, **shared_kwargs) choices = completion_batch.choices for choice in choices: choice["total_tokens"] = completion_batch.usage.total_tokens completions.extend(choices) break except openai.error.OpenAIError as e: logging.warning(f"OpenAIError: {e}.") if "Please reduce your prompt" in str(e): batch_decoding_args.max_tokens = int(batch_decoding_args.max_tokens * 0.8) logging.warning(f"Reducing target length to {batch_decoding_args.max_tokens}, Retrying...") else: logging.warning("Hit request rate limit; retrying...") time.sleep(sleep_time) # Annoying rate limit on requests. if return_text: completions = [completion.text for completion in completions] if decoding_args.n > 1: # make completions a nested list, where each entry is a consecutive decoding_args.n of original entries. completions = [completions[i : i + decoding_args.n] for i in range(0, len(completions), decoding_args.n)] if is_single_prompt: # Return non-tuple if only 1 input and 1 generation. (completions,) = completions return completions def _make_w_io_base(f, mode: str): if not isinstance(f, io.IOBase): f_dirname = os.path.dirname(f) if f_dirname != "": os.makedirs(f_dirname, exist_ok=True) f = open(f, mode=mode) return f def _make_r_io_base(f, mode: str): if not isinstance(f, io.IOBase): f = open(f, mode=mode) return f def jdump(obj, f, mode="w", indent=4, default=str): """Dump a str or dictionary to a file in json format. Args: obj: An object to be written. f: A string path to the location on disk. mode: Mode for opening the file. indent: Indent for storing json dictionaries. default: A function to handle non-serializable entries; defaults to `str`. """ f = _make_w_io_base(f, mode) if isinstance(obj, (dict, list)): json.dump(obj, f, indent=indent, default=default) elif isinstance(obj, str): f.write(obj) else: raise ValueError(f"Unexpected type: {type(obj)}") f.close() def jload(f, mode="r"): """Load a .json file into a dictionary.""" f = _make_r_io_base(f, mode) jdict = json.load(f) f.close() return jdict
[ "False", "1" ]
2024-01-10
zakandrewking/brainshare
backend~backend~test~mock.py
import openai # don't use the real API! openai.api_key = "FAKE" openai.api_key_path = None from unittest.mock import AsyncMock def mock_openai_embedding_async(): mock = AsyncMock() mock.return_value = { "data": [{"embedding": [1] * 1536}], } openai.Embedding.acreate = mock
[]
2024-01-10
zakandrewking/brainshare
backend~backend~test~test_file.py
# from os.path import join, dirname, realpath # import os # import pytest # from sqlalchemy.ext.asyncio import AsyncSession # from sqlalchemy import select # from uuid import uuid4 # import openai # from backend.ai_experiments2 import chat_with_tools # from backend import file # from backend import models # from backend import schemas # directory = dirname(realpath(__file__)) # vin_paper_path = join(directory, "data", "s41586-022-05157-3.pdf") # @pytest.fixture # async def user_id(session): # user_id = str(uuid4()) # session.add(models.Users(id=user_id)) # await session.commit() # return user_id # async def test_user(session: AsyncSession, user_id: str): # (await session.execute(select(models.Users.id).where(models.Users.id == user_id))).scalar_one() # @pytest.fixture # async def pdf_file_id(session: AsyncSession, user_id: str) -> int | None: # with open(vin_paper_path, "rb") as f: # pdf_data = f.read() # file_id = await file.process_synced_file( # session, pdf_data, user_id, mime_type="application/pdf" # ) # return file_id # async def test_pdf_file(session: AsyncSession, pdf_file_id: int | None): # text_content = ( # (await session.execute(select(models.FileData).where(models.FileData.id == pdf_file_id))) # .scalar_one() # .text_content # ) # assert text_content # assert "A microbial supply chain for production" in text_content # async def test_chat(session: AsyncSession, user_id: str, pdf_file_id: int | None): # # TODO drop, only for testing # openai.api_key = os.environ.get("OPENAI_API_KEY") # response, tokens = await chat_with_tools( # "Respond with the ID of a paper I have uploaded", session, user_id # ) # print(response) # print(f"\n\n{tokens} tokens")
[]
2024-01-10
zakandrewking/brainshare
backend~backend~ai_experiments2.py
from typing import Final from langchain.agents import AgentExecutor, AgentType, initialize_agent from langchain.agents.format_scratchpad import format_to_openai_function_messages from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser from langchain.agents import tool from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from sqlalchemy import select from sqlalchemy.ext.asyncio import AsyncSession from langchain.tools.render import format_tool_to_openai_function from backend import models # # Do this so we can see exactly what's going on under the hood # from langchain.globals import set_debug # set_debug(True) async def chat_with_tools(query: str, session: AsyncSession, user_id: str) -> tuple[str, int]: input_formatter = { "query": lambda x: x["query"], "agent_scratchpad": lambda x: format_to_openai_function_messages(x["intermediate_steps"]), } prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assistant designed to output JSON."), ("user", "{query}"), MessagesPlaceholder(variable_name="agent_scratchpad"), ] ) @tool async def find_user_files() -> str: """Finds files owned by the user.""" res = list( ( await session.execute( select(models.FileData).where(models.FileData.user_id == user_id) ) ).scalars() ) return "\n".join(f"file with ID {f.id}" for f in res) tools = [find_user_files] llm = ChatOpenAI( model_name="gpt-4-1106-preview", ).bind( functions=[format_tool_to_openai_function(t) for t in tools], ) agent = input_formatter | prompt | llm | OpenAIFunctionsAgentOutputParser() agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) # good example for openai multi tool, for when you want to run multiple # tools at once # https://github.com/langchain-ai/langchain/issues/8325 # let's stay serial and use the OPENAI_FUNCTIONS (which i think is the same # as the above?) res = await agent_executor.ainvoke({"query": query, "agent_scratchpad": []}) return str(res["output"]), 0 # # NOTE: Using JSON mode with LCEL: # # you need to instruct the llm to output JSON # sysmsg = SystemMessage(content="You are a helpful assistant designed to output JSON.") # # shortcut for HumanMessagePromptTemplate.from_template("{query}") # prompt = sysmsg + {query}" # # use a newish version of gpt4 # model = ChatOpenAI( # model_name="gpt-4-1106-preview", # # add the options # ).bind(response_format={ "type": "json_object" }) # # run it # chain = prompt | model # res = chain.invoke({"query": query}) # # res.content is a JSON string # return str(res.content)
[ "Finds files owned by the user.", "agent_scratchpad", "You are a helpful assistant designed to output JSON." ]
2024-01-10
Dyke-F/RAG_Medical_Guidelines
compare_batch.py
from dotenv import load_dotenv from PyPDF2 import PdfReader import chromadb import json from fastapi.encoders import jsonable_encoder from pathlib import Path import openai import logging import os import re import hydra from pathlib import Path from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.embeddings import OpenAIEmbeddings from langchain.chat_models import ChatOpenAI from langchain.vectorstores import Chroma from langchain.document_loaders import PyPDFLoader from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate from langchain.chains import RetrievalQA import datetime os.environ["HYDRA_FULL_ERROR"] = "1" logging.getLogger().setLevel(logging.INFO) def load_docs_from_src(directory: Path): # Load each PDF document seperately docs = {} for doc_p in Path(directory).rglob("*.pdf"): doc_str = str(doc_p) try: split = doc_str.rsplit("_") (association, _) = split[-2].rsplit("/")[-1], split[-1].split(".")[0] assert association in {"ASCO", "ESMO"}, "The document naming convention has been violated. The expected format is 'ASSOCIATION_ENTITY.pdf'. For example: 'ASCO_CRC.pdf'." except Exception as e: raise NameError("Invalid document name.") from e l = PyPDFLoader(doc_str) txt = l.load() docs.setdefault(association, []).extend(txt) return docs def get_chunks_per_pdf(doc_dict, chunk_size, overlap): # Store document chunks in a dict, where each key is one identifier for 1 PDF chunks = {} text_splitter = RecursiveCharacterTextSplitter( chunk_size = chunk_size, chunk_overlap = overlap, length_function = len) for key, doc in doc_dict.items(): chunks[key] = text_splitter.split_documents(doc) return chunks def get_vectorstore_per_pdf(chunk_dict, chunk_size, overlap): # Store each PDF in a separated Vectorstore object vs_dict = {} embeddings = OpenAIEmbeddings() for (key, doc_chunks) in chunk_dict.items(): entity = doc_chunks[0].metadata["source"].split("/")[-1].split(".")[0].split("_")[1] valid_entity_names = "mCRC", "PancreaticCancer", "HCC" pattern = re.compile(r'^(%s)' % '|'.join(valid_entity_names)) match = pattern.match(entity) entity = match.group(1) index = Path(f"./chroma_db/{key}/{entity}_{chunk_size}_{overlap}") client_settings = chromadb.config.Settings( chroma_db_impl="duckdb+parquet", persist_directory=str(index), anonymized_telemetry=False ) if index.exists(): try: vectorstore = Chroma(persist_directory=index, embedding_function=embeddings, client_settings=client_settings) logging.info(f"Loading existing chroma database from {index}.") except Exception as e: vectorstore = Chroma.from_documents( doc_chunks, embeddings, persist_directory=str(index), client_settings=client_settings) vectorstore.persist() logging.info(f"Failed loading existing database from {index}.") else: vectorstore = Chroma.from_documents( doc_chunks, embeddings, persist_directory=str(index), client_settings=client_settings) vectorstore.persist() logging.info(f"Index not existing. Creating new database at {index}.") vs_dict[key] = vectorstore return vs_dict def compare(vectorstores, question, model=None): # Compare the input from 2 or more documents llm = ChatOpenAI(temperature=0, model=model) human_message_prompt = HumanMessagePromptTemplate.from_template("{question}") system_message_prompt = SystemMessagePromptTemplate.from_template( """You are an AI medical assistant specializing in oncology. Based on the provided oncology guidelines, provide detailed and truthful information in response to inquiries from a medical doctor. Ensure your responses are: - Relevant to the given context. For instance, when asked about chemoradiation, do not include information about chemotherapy alone. - Presented in concise bullet points. - Honest, especially when the answer isn't available in the guidelines. - Include citations and references. - As detailed as possible. Include all details regarding patient and tumor characteristics like R-status (R0/R1), tumor grade and Tumor Stage (TNM) - Include references to clinical trials (by name and not by number), survival data, exact treatments, their outcomes and details on the trial design. Context: {context} Based on the American and European medical oncology guidelines, what does the association say about the topic presented in the context? """ ) chain_res = {} for key, vectorstore in vectorstores.items(): qa_chain = RetrievalQA.from_chain_type( llm=llm, retriever=vectorstore.as_retriever(search_kwargs={"k": 25}), # 25 return_source_documents=True, chain_type_kwargs={ "prompt": ChatPromptTemplate.from_messages([ system_message_prompt, human_message_prompt, ])}, ) result = qa_chain({"query": question}) print(result) chain_res[key] = result def format_dict(data_dict): output = [] for key, value in data_dict.items(): output.append(f"{key}:\n{value['result']}\n") return "\n".join(output) response_str = format_dict(chain_res) input_prompt = """You are a dedicated AI assistant providing detailed responses to a medical doctor. Your answers are based on the provided documents and are strictly truthful. If the information is not available, state it clearly. Refrain from including irrelevant or out-of-context details. You have been provided with specific cancer-related information extracted from both the ESMO and ASCO guidelines. Your task is to conduct a line-by-line comparison to identify and extract the similarities and differences between the two sets of guidelines. The main objective is to pinpoint discrepancies in the recommendations. It is important to consider all available details, for example including resection status (R0 vs R1), tumor stage etc. to allow for a correct comparison. Also, provide all the details from clinical trials, like the trial name, survival data, and the overall conclusion from the trial The provided input follows this structure: ESMO: - ... ASCO: - ... Your structured output should be in the format: Comparison of {topic of the question} between ASCO and ESMO: Similarities: - {topic}: ... - {topic}: ... Differences: - {topic}: ... - {topic}: ... Every subpoint in similarities and differences should be structured based on a useful {topic} as given in the data. For example: If recommendations can be seperated into adjuvant / locally advanced / metastatic disease, use these as topic and compare what the different institutions recommend. For example: If different treatment options are given like surgery, radiation, chemotherapy, seperate your structured output by these. Ensure all relevant details are given in your answer: This includes for instance: Names of clinical trials, the trial design, their outcomes and conclusions. Specific patient and treatment characteristics that are compared (tumor stage, R0/R1, treatment details (timing, duration, substances)) Finally, summarize your comparison. """ completion = openai.ChatCompletion.create( model=model, messages=[ {"role": "system", "content": input_prompt}, {"role": "user", "content": response_str} ] ) return completion, chain_res def save_complete(user_question, vectorstores, model_name, chunk_size, overlap): completion, chain_res = compare(vectorstores, user_question, model=model_name) ai_message = [jsonable_encoder(completion["choices"][0]["message"]["content"])] hu_message = [jsonable_encoder(user_question)] source_docs = [jsonable_encoder(v["source_documents"] for v in chain_res.values())] with open(f"{model_name}_outputs.json", "a") as f: json.dump({"Human Message": hu_message, "AI Response": ai_message, "source documents": source_docs, "# timestamp": datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S'), "# chunk_size": chunk_size, "# overlap": overlap, }, f, indent=4 ) print(completion["choices"][0]["message"]["content"]) @hydra.main(version_base="1.3", config_path="conf", config_name="path_to_your_data.yaml") def main(cfg): load_dotenv() openai.api_key = os.getenv("OPENAI_API_KEY") documents_dir = Path(cfg.documents_dir) if not documents_dir.exists(): raise NotADirectoryError(f"Directory at {cfg.documents_dir} does not exist.") docs_dict = load_docs_from_src(documents_dir) chunks_dict = get_chunks_per_pdf(docs_dict, chunk_size=cfg.chunk_size, overlap=cfg.overlap) vs_dict = get_vectorstore_per_pdf(chunks_dict, chunk_size=cfg.chunk_size, overlap=cfg.overlap) counter = 0 for user_input in cfg.topics: save_complete(user_input, vs_dict, cfg.model_name, chunk_size=cfg.chunk_size, overlap=cfg.overlap) counter += 1 logging.info(f"Completed {counter} out of {len(cfg.topics)} comparisons.") if __name__ == '__main__': main()
[ "You are an AI medical assistant specializing in oncology. Based on the provided oncology guidelines, provide detailed and truthful information in response to inquiries from a medical doctor. Ensure your responses are:\n - Relevant to the given context.\n For instance, when asked about chemoradiation, do not include information about chemotherapy alone.\n - Presented in concise bullet points.\n - Honest, especially when the answer isn't available in the guidelines. \n - Include citations and references.\n - As detailed as possible. Include all details regarding patient and tumor characteristics like R-status (R0/R1), tumor grade and Tumor Stage (TNM)\n - Include references to clinical trials (by name and not by number), survival data, exact treatments, their outcomes and details on the trial design. \n\n Context:\n {context}\n\n Based on the American and European medical oncology guidelines, what does the association say about the topic presented in the context?\n ", "You are a dedicated AI assistant providing detailed responses to a medical doctor. Your answers are based on the provided documents and are strictly truthful. If the information is not available, state it clearly. Refrain from including irrelevant or out-of-context details.\n You have been provided with specific cancer-related information extracted from both the ESMO and ASCO guidelines. Your task is to conduct a line-by-line comparison to identify and extract the similarities and differences between the two sets of guidelines.\n The main objective is to pinpoint discrepancies in the recommendations.\n\n It is important to consider all available details, for example including resection status (R0 vs R1), tumor stage etc. to allow for a correct comparison. \n Also, provide all the details from clinical trials, like the trial name, survival data, and the overall conclusion from the trial\n \n The provided input follows this structure:\n ESMO:\n - ...\n ASCO:\n - ...\n\n Your structured output should be in the format:\n Comparison of {topic of the question} between ASCO and ESMO:\n Similarities:\n - {topic}: ...\n - {topic}: ...\n Differences:\n - {topic}: ...\n - {topic}: ...\n\n Every subpoint in similarities and differences should be structured based on a useful {topic} as given in the data.\n For example: If recommendations can be seperated into adjuvant / locally advanced / metastatic disease, use these as topic and compare what the different institutions recommend. \n For example: If different treatment options are given like surgery, radiation, chemotherapy, seperate your structured output by these.\n \n \n Ensure all relevant details are given in your answer: This includes for instance:\n Names of clinical trials, the trial design, their outcomes and conclusions. \n Specific patient and treatment characteristics that are compared (tumor stage, R0/R1, treatment details (timing, duration, substances)) \n\n Finally, summarize your comparison.\n ", "[PLACEHOLDER, PLACEHOLDER]", "{question}" ]
2024-01-10
Dyke-F/RAG_Medical_Guidelines
compare.py
from dotenv import load_dotenv from PyPDF2 import PdfReader import chromadb import json from fastapi.encoders import jsonable_encoder from pathlib import Path import openai import logging import os import re import hydra from pathlib import Path from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.embeddings import OpenAIEmbeddings from langchain.chat_models import ChatOpenAI from langchain.vectorstores import Chroma from langchain.document_loaders import PyPDFLoader from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate from langchain.chains import RetrievalQA import datetime os.environ["HYDRA_FULL_ERROR"] = "1" logging.getLogger().setLevel(logging.INFO) def load_docs_from_src(directory: Path): # Load each PDF document seperately docs = {} for doc_p in Path(directory).rglob("*.pdf"): doc_str = str(doc_p) try: split = doc_str.rsplit("_") (association, _) = split[-2].rsplit("/")[-1], split[-1].split(".")[0] assert association in {"ASCO", "ESMO"}, "The document naming convention has been violated. The expected format is 'ASSOCIATION_ENTITY.pdf'. For example: 'ASCO_CRC.pdf'." except Exception as e: raise NameError("Invalid document name.") from e l = PyPDFLoader(doc_str) txt = l.load() docs.setdefault(association, []).extend(txt) return docs def get_chunks_per_pdf(doc_dict, chunk_size, overlap): # Store document chunks in a dict, where each key is one identifier for 1 PDF chunks = {} text_splitter = RecursiveCharacterTextSplitter( chunk_size = chunk_size, chunk_overlap = overlap, length_function = len) for key, doc in doc_dict.items(): chunks[key] = text_splitter.split_documents(doc) return chunks def get_vectorstore_per_pdf(chunk_dict, chunk_size, overlap): # Store each PDF in a separated Vectorstore object vs_dict = {} embeddings = OpenAIEmbeddings() for (key, doc_chunks) in chunk_dict.items(): entity = doc_chunks[0].metadata["source"].split("/")[-1].split(".")[0].split("_")[1] valid_entity_names = "mCRC", "PancreaticCancer", "HCC" pattern = re.compile(r'^(%s)' % '|'.join(valid_entity_names)) match = pattern.match(entity) entity = match.group(1) index = Path(f"./chroma_db/{key}/{entity}_{chunk_size}_{overlap}") client_settings = chromadb.config.Settings( chroma_db_impl="duckdb+parquet", persist_directory=str(index), anonymized_telemetry=False ) if index.exists(): try: vectorstore = Chroma(persist_directory=index, embedding_function=embeddings, client_settings=client_settings) logging.info(f"Loading existing chroma database from {index}.") except Exception as e: vectorstore = Chroma.from_documents( doc_chunks, embeddings, persist_directory=str(index), client_settings=client_settings) vectorstore.persist() logging.info(f"Failed loading existing database from {index}.") else: vectorstore = Chroma.from_documents( doc_chunks, embeddings, persist_directory=str(index), client_settings=client_settings) vectorstore.persist() logging.info(f"Index not existing. Creating new database at {index}.") vs_dict[key] = vectorstore return vs_dict def compare(vectorstores, question, model=None): # Compare the input from 2 or more documents llm = ChatOpenAI(temperature=0, model=model) human_message_prompt = HumanMessagePromptTemplate.from_template("{question}") system_message_prompt = SystemMessagePromptTemplate.from_template( """You are an AI medical assistant specializing in oncology. Based on the provided oncology guidelines, provide detailed and truthful information in response to inquiries from a medical doctor. Ensure your responses are: - Relevant to the given context. For instance, when asked about chemoradiation, do not include information about chemotherapy alone. - Presented in concise bullet points. - Honest, especially when the answer isn't available in the guidelines. - Include citations and references. - As detailed as possible. Include all details regarding patient and tumor characteristics like R-status (R0/R1), tumor grade and Tumor Stage (TNM) - Include references to clinical trials (by name and not by number), survival data, exact treatments, their outcomes and details on the trial design. Context: {context} Based on the American and European medical oncology guidelines, what does the association say about the topic presented in the context? """ ) chain_res = {} for key, vectorstore in vectorstores.items(): qa_chain = RetrievalQA.from_chain_type( llm=llm, retriever=vectorstore.as_retriever(search_kwargs={"k": 25}), return_source_documents=True, chain_type_kwargs={ "prompt": ChatPromptTemplate.from_messages([ system_message_prompt, human_message_prompt, ])}, ) result = qa_chain({"query": question}) chain_res[key] = result def format_dict(data_dict): output = [] for key, value in data_dict.items(): output.append(f"{key}:\n{value['result']}\n") return "\n".join(output) response_str = format_dict(chain_res) input_prompt = """You are a dedicated AI assistant providing detailed responses to a medical doctor. Your answers are based on the provided documents and are strictly truthful. If the information is not available, state it clearly. Refrain from including irrelevant or out-of-context details. You have been provided with specific cancer-related information extracted from both the ESMO and ASCO guidelines. Your task is to conduct a topic-by-topic comparison to identify and extract the similarities and differences between the two sets of guidelines. The main objective is to pinpoint discrepancies in the recommendations. It is important to consider all available details, for example including resection status (R0 vs R1), tumor stage etc. to allow for a correct comparison. Also, provide all the details from clinical trials, like the trial name, survival data, and the overall conclusion from the trial The provided input follows this structure: ESMO: - ... ASCO: - ... Your structured output should be in the format: Comparison of {topic of the question} between ASCO and ESMO: Similarities: - {topic}: ... - {topic}: ... Differences: - {topic}: ... - {topic}: ... Every subpoint in similarities and differences should be structured based on a useful {topic} as given in the data. For example: If recommendations can be seperated into adjuvant / locally advanced / metastatic disease, use these as topic and compare what the different institutions recommend. For example: If different treatment options are given like surgery, radiation, chemotherapy, seperate your structured output by these. Ensure all relevant details are given in your answer: This includes for instance: Names of clinical trials, the trial design, their outcomes and conclusions. Specific patient and treatment characteristics that are compared (tumor stage, R0/R1, treatment details (timing, duration, substances)) Finally, summarize your comparison. """ completion = openai.ChatCompletion.create( model=model, messages=[ {"role": "system", "content": input_prompt}, {"role": "user", "content": response_str} ] ) return completion, chain_res def save_complete(user_question, vectorstores, model_name, chunk_size, overlap): completion, chain_res = compare(vectorstores, user_question, model=model_name) ai_message = [jsonable_encoder(completion["choices"][0]["message"]["content"])] hu_message = [jsonable_encoder(user_question)] source_docs = [jsonable_encoder(v["source_documents"] for v in chain_res.values())] with open(f"{model_name}_outputs.json", "a") as f: json.dump({"Human Message": hu_message, "AI Response": ai_message, "source documents": source_docs, "# timestamp": datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S'), "# chunk_size": chunk_size, "# overlap": overlap, }, f, indent=4 ) print(completion["choices"][0]["message"]["content"]) @hydra.main(version_base="1.3", config_path="conf", config_name="mCRC_config.yaml") def main(cfg): load_dotenv() openai.api_key = os.getenv("OPENAI_API_KEY") documents_dir = Path(cfg.documents_dir) if not documents_dir.exists(): raise NotADirectoryError(f"Directory at {cfg.documents_dir} does not exist.") docs_dict = load_docs_from_src(documents_dir) chunks_dict = get_chunks_per_pdf(docs_dict, chunk_size=cfg.chunk_size, overlap=cfg.overlap) vs_dict = get_vectorstore_per_pdf(chunks_dict, chunk_size=cfg.chunk_size, overlap=cfg.overlap) # save_complete(cfg.user_question, vs_dict, cfg.model_name) save_complete("MSI", vs_dict, cfg.model_name, chunk_size=cfg.chunk_size, overlap=cfg.overlap) if __name__ == '__main__': main()
[ "You are an AI medical assistant specializing in oncology. Based on the provided oncology guidelines, provide detailed and truthful information in response to inquiries from a medical doctor. Ensure your responses are:\n - Relevant to the given context.\n For instance, when asked about chemoradiation, do not include information about chemotherapy alone.\n - Presented in concise bullet points.\n - Honest, especially when the answer isn't available in the guidelines. \n - Include citations and references.\n - As detailed as possible. Include all details regarding patient and tumor characteristics like R-status (R0/R1), tumor grade and Tumor Stage (TNM)\n - Include references to clinical trials (by name and not by number), survival data, exact treatments, their outcomes and details on the trial design. \n\n Context:\n {context}\n\n Based on the American and European medical oncology guidelines, what does the association say about the topic presented in the context?\n ", "[PLACEHOLDER, PLACEHOLDER]", "{question}", "You are a dedicated AI assistant providing detailed responses to a medical doctor. Your answers are based on the provided documents and are strictly truthful. If the information is not available, state it clearly. Refrain from including irrelevant or out-of-context details.\n You have been provided with specific cancer-related information extracted from both the ESMO and ASCO guidelines. Your task is to conduct a topic-by-topic comparison to identify and extract the similarities and differences between the two sets of guidelines.\n The main objective is to pinpoint discrepancies in the recommendations.\n\n It is important to consider all available details, for example including resection status (R0 vs R1), tumor stage etc. to allow for a correct comparison. \n Also, provide all the details from clinical trials, like the trial name, survival data, and the overall conclusion from the trial\n \n The provided input follows this structure:\n ESMO:\n - ...\n ASCO:\n - ...\n\n Your structured output should be in the format:\n Comparison of {topic of the question} between ASCO and ESMO:\n Similarities:\n - {topic}: ...\n - {topic}: ...\n Differences:\n - {topic}: ...\n - {topic}: ...\n\n Every subpoint in similarities and differences should be structured based on a useful {topic} as given in the data.\n For example: If recommendations can be seperated into adjuvant / locally advanced / metastatic disease, use these as topic and compare what the different institutions recommend. \n For example: If different treatment options are given like surgery, radiation, chemotherapy, seperate your structured output by these.\n \n Ensure all relevant details are given in your answer: This includes for instance:\n Names of clinical trials, the trial design, their outcomes and conclusions. \n Specific patient and treatment characteristics that are compared (tumor stage, R0/R1, treatment details (timing, duration, substances)) \n\n Finally, summarize your comparison.\n " ]
2024-01-10
b-shelton/technical_examples
machine_learning~nlp~cord-19.py
# This script uses an aggregation of COVID-19 research papers to create ML-generate topic-modeling metadata associated with each paper. import os import zipfile import tempfile import json import numpy as np import pandas as pd import re from langdetect import detect from time import process_time import multiprocessing as mp import nltk from nltk.tokenize import RegexpTokenizer from nltk.corpus import stopwords from nltk.stem import PorterStemmer from sklearn.feature_extraction.text import CountVectorizer # SET THIS APPROPRIATELY to analyze either abstracts or full texts focus = 'abstract' # 'abstract' or 'body_text' # If not in a Kaggle notebook, configure environment to download data from Kaggle API (one time activity) # Follow instructions here: https://medium.com/@ankushchoubey/how-to-download-dataset-from-kaggle-7f700d7f9198 #os.system('kaggle datasets download -d allen-institute-for-ai/CORD-19-research-challenge') zippath = 'CORD-19-research-challenge.zip' # Create the temporary directory to store the zip file's content temp_dir = tempfile.TemporaryDirectory() # Extract the zip file's content into the temporary directory with zipfile.ZipFile(zippath, 'r') as zip_ref: zip_ref.extractall(temp_dir.name) # Read the metadata.csv file md = pd.read_csv(temp_dir.name + '/metadata.csv') ############################################################################### # Read all of the text from the research papers ############################################################################### sources = ['biorxiv_medrxiv', 'comm_use_subset', 'noncomm_use_subset', 'custom_license'] papers_source = [] papers_sha = [] papers_text = [] for h in sources: paper_path = '/' + h + '/' + h + '/' for i in range(0, len(os.listdir(temp_dir.name + paper_path))): # read json file sha = os.listdir(temp_dir.name + paper_path)[i] json_path = (temp_dir.name + paper_path + sha) with open(json_path) as f: d = json.load(f) if len(d[focus]) == 0: continue else: # get text paper_text = [] for j in range(0, len(d[focus])): if len(paper_text) == 0: paper_text = d[focus][j]['text'] else: paper_text += d[focus][j]['text'] # append to the rest of the extracted papers papers_source.append(h) papers_sha.append(re.sub('.json', '', sha)) papers_text.append(paper_text) df = pd.DataFrame({'sha': papers_sha, 'source': papers_source, 'text': papers_text}) df = df[df['text'].str.len() > 5] # Only retain research papers in English (for now) df['language'] = df['text'].apply(detect) df.groupby('language')['sha'].count() \ .reset_index().sort_values('sha', ascending = False) df = df[df['language'] == 'en'] ############################################################################### # Pre-Processing ############################################################################### ''' This section will clean the text to prepare if for analysis, including transformation to all lowercase, tokenization, stemming (PortStemmer), and removing stop words. This section uses the multiprocessing package, which takes advantage of all the operating system's cores. I've hard coded the number of cores to 4, but the user can identify how many cores they have available by running `mp.cpu_count()`. Even with the multiprocessing package, it takes a long time to stem every word in the 30k~ papers. ''' # make every word lowercase papers_lower = [x.lower() for x in df['text'].tolist()] # tokenize every paper, using multiprocessing tokenizer = RegexpTokenizer('[a-zA-Z]\w+\'?\w*') def token_a_paper(paper_lower): return tokenizer.tokenize(paper_lower) t1_start = process_time() pool = mp.Pool(4) token_papers = list(pool.map(token_a_paper, papers_lower)) t1_end = process_time() print('Time to tokenize:', round(t1_end - t1_start, 2), 'seconds') pool.close() # remove stop words (including customs stop words) custom_to_exclude = {'et', 'al', 'al.', 'preprint', 'copyright', 'peer-review', 'author/fund', 'http', 'licens', 'biorxiv', 'fig', 'figure', 'medrxiv', 'i.e.', 'e.g.', 'e.g.,', '\'s', 'doi', 'author', 'funder', 'https', 'license'} stop_words = set(stopwords.words('english')) | custom_to_exclude st_words = [] for i in token_papers: t = [word for word in i if (not word in stop_words)] st_words.append(t) # stem every remaining word, using multiprocessing stemmer = PorterStemmer() def stem_tokens(st_paper): return [stemmer.stem(word) for word in st_paper] t1_start = process_time() pool = mp.Pool(4) stemmed_words = pool.map(stem_tokens, st_words) t1_end = process_time() print('Time to stem:', round((t1_end - t1_start) / 60, 2), 'minutes') pool.close() # count how many words after stop words are removed # and put the tokenized words back into papers counter = 0 stemmed_papers = [] for i in stemmed_words: paper = " ".join(i) stemmed_papers.append(paper) counter += len(i) print('Number of total words:', counter) # show top words in corpus flat_words =[] for i in stemmed_words: flat_words += i fw = pd.DataFrame({'words': flat_words, 'occurences': 1}) gfw = fw.groupby('words')['occurences'].count() \ .reset_index().sort_values('occurences', ascending = False) gfw.head(25) ############################################################################### # Topic Modeling with Latent Dirichlet Allocation (LDA) # and NMF ############################################################################### from sklearn.feature_extraction import text from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer custom_to_exclude = ['et', 'al', 'al.', 'preprint', 'copyright', 'peer-review', 'author/fund', 'http', 'licens', 'biorxiv', 'fig', 'figure', 'medrxiv', 'i.e.', 'e.g.', 'e.g.,', '\'s', 'doi', 'author', 'funder', 'https', 'license'] my_stop_words = text.ENGLISH_STOP_WORDS.union(custom_to_exclude) no_features = 1000 # NMF is able to use tf-idf tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words=my_stop_words) tfidf = tfidf_vectorizer.fit_transform(stemmed_papers) tfidf_feature_names = tfidf_vectorizer.get_feature_names() # LDA can only use raw term counts for LDA because it is a probabilistic graphical model tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words=my_stop_words) tf = tf_vectorizer.fit_transform(stemmed_papers) tf_feature_names = tf_vectorizer.get_feature_names() from sklearn.decomposition import NMF, LatentDirichletAllocation no_topics = 10 # Run NMF nmf = NMF(n_components = no_topics, random_state = 1, alpha = .1, l1_ratio = .5, init = 'nndsvd').fit(tfidf) # Run LDA lda = LatentDirichletAllocation(n_components = no_topics, max_iter = 5, learning_method = 'online', learning_offset = 50., random_state=0).fit(tf) def display_topics(model, feature_names, no_top_words): for topic_idx, topic in enumerate(model.components_): print('Topic %d:' % (topic_idx)) print(" ".join([feature_names[i] for i in topic.argsort()[:-no_top_words - 1:-1]])) no_top_words = 15 #display_topics(nmf, tfidf_feature_names, no_top_words) display_topics(lda, tf_feature_names, no_top_words) # based on the top words from each topic, creating a title for each one (manual) topic_ids = [] for i in range(0, no_topics): topic_ids.append(f'topic{i}') topics = pd.DataFrame({'topic_id': topic_ids, 'topic_name': ['animal virus', 'animal tesing', 'outbreak \nmonitoring', 'symptoms \nanalyses', 'vaccine \ndevelopment', 'patient affects', 'cellular studies', 'genomic studies', 'comparison to \nother outbreaks', 'disease/drug \ninteraction']}) ############################################################################### # Assign a topic to every research paper ############################################################################### # collapse the different topic weights for every word into a single dataframe lda_df = pd.DataFrame({'words': tf_feature_names}) for i in range(0, len(lda.components_)): colname = f'topic{i}' lda_df[colname] = lda.components_[i].tolist() # get the summed weights for every topic, for every research paper t1_start = process_time() topic_amounts = pd.DataFrame() for i in range(0, len(stemmed_words)): topic0_amount = 0 df = pd.DataFrame({'words': stemmed_words[i]}) df_lda = df.merge(lda_df, on = 'words', how = 'inner') amounts = df_lda.drop(['words'], axis = 1).sum(axis = 0).reset_index() amounts['paper'] = i topic_amounts = topic_amounts.append(amounts) t1_end = process_time() round(t1_end - t1_start, 2) idx = topic_amounts.groupby(['paper'])[0] \ .transform(max) == topic_amounts[0] paper_topics = topic_amounts[idx] paper_topics.columns = ['topic_id', 'lda_value', 'paper_loc'] # group paper counts by topic and visualize topic_count = paper_topics.groupby('topic_id')['paper_loc'].count().reset_index() topic_count = topic_count.merge(topics, on = 'topic_id', how = 'inner') import matplotlib.pyplot as plt def topic_viz(): fig = plt.figure() ax = fig.add_axes([0,0,1,1]) paper_count = list(topic_count['paper_loc']) topics = tuple(list(topic_count['topic_name'])) x_pos = np.arange(len(topics)) ax.bar(x_pos, paper_count) plt.xticks(x_pos, topics, rotation = 45) plt.title('Research Paper Categorization by Model Topic') topic_viz() ############################################################################### # Exploratory Analysis ############################################################################### def get_most_freq_words(str, n=None): vect = CountVectorizer().fit(str) bag_of_words = vect.transform(str) sum_words = bag_of_words.sum(axis=0) freq = [(word, sum_words[0, idx]) for word, idx in vect.vocabulary_.items()] freq =sorted(freq, key = lambda x: x[1], reverse=True) return freq[:n] get_most_freq_words([word for word in stemmed_papers for word in word] , 50) df = pd.DataFrame({'abstract': papers_text1, 'token_stemmed': stemmed_papers}) # build a dictionary where for each tweet, each word has its own id. # create a single list of all stemmed words from the papers flat_words =[] for i in stemmed_papers: flat_words += i # Creating dictionary for the word frequency table frequency_table = dict() for wd in flat_words: if wd in frequency_table: frequency_table[wd] += 1 else: frequency_table[wd] = 1 # build the corpus i.e. vectors with the number of occurence of each word per tweet corpus_corpus = [frequency_table.doc2bow(word) for word in stemmed_papers] from gensim.corpora import Dictionary from gensim.models.ldamodel import LdaModel from gensim.models import CoherenceModel tweets_dictionary = Dictionary(stemmed_papers) # compute coherence tweets_coherence = [] for nb_topics in range(1,36): lda = LdaModel(tweets_corpus, num_topics = nb_topics, id2word = tweets_dictionary, passes=10) cohm = CoherenceModel(model=lda, corpus=tweets_corpus, dictionary=tweets_dictionary, coherence='u_mass') coh = cohm.get_coherence() tweets_coherence.append(coh) # visualize coherence plt.figure(figsize=(10,5)) plt.plot(range(1,36),tweets_coherence) plt.xlabel("Number of Topics") plt.ylabel("Coherence Score"); # Close the temporary directory import shutil shutil.rmtree(temp_dir.name)
[]
2024-01-10
ginomcfino/ML-playground
CS523~HW5~logx.py
""" Some simple logging functionality, inspired by rllab's logging and adapted from OpenAI Spinning Up Logs to a tab-separated-values file (path/to/output_directory/progress.txt) """ import json import joblib import shutil import numpy as np import torch import os.path as osp, time, atexit, os import warnings color2num = dict( gray=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37, crimson=38 ) def statistics_scalar(x, with_min_and_max=False): """ Get mean/std and optional min/max of scalar x across MPI processes. Args: x: An array containing samples of the scalar to produce statistics for. with_min_and_max (bool): If true, return min and max of x in addition to mean and std. """ x = np.array(x, dtype=np.float32) global_sum, global_n = np.sum(x), len(x) mean = global_sum / global_n global_sum_sq = np.sum((x - mean)**2) std = np.sqrt(global_sum_sq / global_n) # compute global std if with_min_and_max: global_min = np.min(x) if len(x) > 0 else np.inf global_max = np.max(x) if len(x) > 0 else -np.inf return mean, std, global_min, global_max return mean, std def convert_json(obj): """ Convert obj to a version which can be serialized with JSON. """ if is_json_serializable(obj): return obj else: if isinstance(obj, dict): return {convert_json(k): convert_json(v) for k,v in obj.items()} elif isinstance(obj, tuple): return (convert_json(x) for x in obj) elif isinstance(obj, list): return [convert_json(x) for x in obj] elif hasattr(obj,'__name__') and not('lambda' in obj.__name__): return convert_json(obj.__name__) elif hasattr(obj,'__dict__') and obj.__dict__: obj_dict = {convert_json(k): convert_json(v) for k,v in obj.__dict__.items()} return {str(obj): obj_dict} return str(obj) def is_json_serializable(v): try: json.dumps(v) return True except: return False def colorize(string, color, bold=False, highlight=False): """ Colorize a string. This function was originally written by John Schulman. """ attr = [] num = color2num[color] if highlight: num += 10 attr.append(str(num)) if bold: attr.append('1') return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string) class Logger: """ A general-purpose logger. Makes it easy to save diagnostics, hyperparameter configurations, the state of a training run, and the trained model. """ def __init__(self, output_dir=None, output_fname='progress.txt', exp_name=None): """ Initialize a Logger. Args: output_dir (string): A directory for saving results to. If ``None``, defaults to a temp directory of the form ``/tmp/experiments/somerandomnumber``. output_fname (string): Name for the tab-separated-value file containing metrics logged throughout a training run. Defaults to ``progress.txt``. exp_name (string): Experiment name. If you run multiple training runs and give them all the same ``exp_name``, the plotter will know to group them. (Use case: if you run the same hyperparameter configuration with multiple random seeds, you should give them all the same ``exp_name``.) """ self.output_dir = output_dir or "/tmp/experiments/%i"%int(time.time()) if osp.exists(self.output_dir): print("Warning: Log dir %s already exists! Storing info there anyway."%self.output_dir) else: os.makedirs(self.output_dir) self.output_file = open(osp.join(self.output_dir, output_fname), 'w') atexit.register(self.output_file.close) print(colorize("Logging data to %s"%self.output_file.name, 'green', bold=True)) self.first_row=True self.log_headers = [] self.log_current_row = {} self.exp_name = exp_name def log(self, msg, color='green'): """Print a colorized message to stdout.""" print(colorize(msg, color, bold=True)) def log_tabular(self, key, val): """ Log a value of some diagnostic. Call this only once for each diagnostic quantity, each iteration. After using ``log_tabular`` to store values for each diagnostic, make sure to call ``dump_tabular`` to write them out to file and stdout (otherwise they will not get saved anywhere). """ if self.first_row: self.log_headers.append(key) else: assert key in self.log_headers, "Trying to introduce a new key %s that you didn't include in the first iteration"%key assert key not in self.log_current_row, "You already set %s this iteration. Maybe you forgot to call dump_tabular()"%key self.log_current_row[key] = val def save_config(self, config): """ Log an experiment configuration. Call this once at the top of your experiment, passing in all important config vars as a dict. This will serialize the config to JSON, while handling anything which can't be serialized in a graceful way (writing as informative a string as possible). Example use: .. code-block:: python logger = EpochLogger(**logger_kwargs) logger.save_config(locals()) """ config_json = convert_json(config) if self.exp_name is not None: config_json['exp_name'] = self.exp_name output = json.dumps(config_json, separators=(',',':\t'), indent=4, sort_keys=True) print(colorize('Saving config:\n', color='cyan', bold=True)) print(output) with open(osp.join(self.output_dir, "config.json"), 'w') as out: out.write(output) def save_state(self, state_dict, itr=None): """ Saves the state of an experiment. To be clear: this is about saving *state*, not logging diagnostics. All diagnostic logging is separate from this function. This function will save whatever is in ``state_dict``---usually just a copy of the environment---and the most recent parameters for the model you previously set up saving for with ``setup_pytorch_saver``. Call with any frequency you prefer. If you only want to maintain a single state and overwrite it at each call with the most recent version, leave ``itr=None``. If you want to keep all of the states you save, provide unique (increasing) values for 'itr'. Args: state_dict (dict): Dictionary containing essential elements to describe the current state of training. itr: An int, or None. Current iteration of training. """ fname = 'vars.pkl' if itr is None else 'vars%d.pkl'%itr try: joblib.dump(state_dict, osp.join(self.output_dir, fname)) except: self.log('Warning: could not pickle state_dict.', color='red') if hasattr(self, 'pytorch_saver_elements'): self._pytorch_simple_save(itr) def setup_pytorch_saver(self, what_to_save): """ Set up easy model saving for a single PyTorch model. Because PyTorch saving and loading is especially painless, this is very minimal; we just need references to whatever we would like to pickle. This is integrated into the logger because the logger knows where the user would like to save information about this training run. Args: what_to_save: Any PyTorch model or serializable object containing PyTorch models. """ self.pytorch_saver_elements = what_to_save def _pytorch_simple_save(self, itr=None): """ Saves the PyTorch model (or models). """ assert hasattr(self, 'pytorch_saver_elements'), \ "First have to setup saving with self.setup_pytorch_saver" fpath = 'pyt_save' fpath = osp.join(self.output_dir, fpath) fname = 'model' + ('%d'%itr if itr is not None else '') + '.pt' fname = osp.join(fpath, fname) os.makedirs(fpath, exist_ok=True) with warnings.catch_warnings(): warnings.simplefilter("ignore") # We are using a non-recommended way of saving PyTorch models, # by pickling whole objects (which are dependent on the exact # directory structure at the time of saving) as opposed to # just saving network weights. This works sufficiently well # for the our purposes in this homework, but you may want to do # something different for your personal PyTorch project. # We use a catch_warnings() context to avoid the warnings about # not being able to save the source code. torch.save(self.pytorch_saver_elements, fname) def dump_tabular(self): """ Write all of the diagnostics from the current iteration. Writes both to stdout, and to the output file. """ vals = [] key_lens = [len(key) for key in self.log_headers] max_key_len = max(15,max(key_lens)) keystr = '%'+'%d'%max_key_len fmt = "| " + keystr + "s | %15s |" n_slashes = 22 + max_key_len print("-"*n_slashes) for key in self.log_headers: val = self.log_current_row.get(key, "") valstr = "%8.3g"%val if hasattr(val, "__float__") else val print(fmt%(key, valstr)) vals.append(val) print("-"*n_slashes, flush=True) if self.output_file is not None: if self.first_row: self.output_file.write("\t".join(self.log_headers)+"\n") self.output_file.write("\t".join(map(str,vals))+"\n") self.output_file.flush() self.log_current_row.clear() self.first_row=False class EpochLogger(Logger): """ A variant of Logger tailored for tracking average values over epochs. Typical use case: there is some quantity which is calculated many times throughout an epoch, and at the end of the epoch, you would like to report the average / std / min / max value of that quantity. With an EpochLogger, each time the quantity is calculated, you would use .. code-block:: python epoch_logger.store(NameOfQuantity=quantity_value) to load it into the EpochLogger's state. Then at the end of the epoch, you would use .. code-block:: python epoch_logger.log_tabular(NameOfQuantity, **options) to record the desired values. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.epoch_dict = dict() def store(self, **kwargs): """ Save something into the epoch_logger's current state. Provide an arbitrary number of keyword arguments with numerical values. """ for k,v in kwargs.items(): if not(k in self.epoch_dict.keys()): self.epoch_dict[k] = [] self.epoch_dict[k].append(v) def log_tabular(self, key, val=None, with_min_and_max=False, average_only=False): """ Log a value or possibly the mean/std/min/max values of a diagnostic. Args: key (string): The name of the diagnostic. If you are logging a diagnostic whose state has previously been saved with ``store``, the key here has to match the key you used there. val: A value for the diagnostic. If you have previously saved values for this key via ``store``, do *not* provide a ``val`` here. with_min_and_max (bool): If true, log min and max values of the diagnostic over the epoch. average_only (bool): If true, do not log the standard deviation of the diagnostic over the epoch. """ if val is not None: super().log_tabular(key,val) else: v = self.epoch_dict[key] vals = np.concatenate(v) if isinstance(v[0], np.ndarray) and len(v[0].shape)>0 else v stats = statistics_scalar(vals, with_min_and_max=with_min_and_max) super().log_tabular(key if average_only else 'Average' + key, stats[0]) if not(average_only): super().log_tabular('Std'+key, stats[1]) if with_min_and_max: super().log_tabular('Max'+key, stats[3]) super().log_tabular('Min'+key, stats[2]) self.epoch_dict[key] = [] def get_stats(self, key): """ Lets an algorithm ask the logger for mean/std/min/max of a diagnostic. """ v = self.epoch_dict[key] vals = np.concatenate(v) if isinstance(v[0], np.ndarray) and len(v[0].shape)>0 else v return statistics_scalar(vals)
[]
2024-01-10
tabee/b3rn_zero_streamlit
app~conversational_retrieval_agent.py
import langchain from langchain.agents.agent_toolkits import ( create_conversational_retrieval_agent, create_retriever_tool) from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.schema.messages import SystemMessage from langchain.vectorstores import FAISS from langchain.cache import SQLiteCache from langchain.callbacks import get_openai_callback SYS_PATH_LOCAL = '/workspaces/b3rn_zero_streamlit' SYS_PATH_STREAMLIT = '/app/b3rn_zero_streamlit/' SYS_PATH = SYS_PATH_STREAMLIT langchain.llm_cache = SQLiteCache(database_path=f"{SYS_PATH}/data/langchain_cache.db") def ask_agent__eak(query, openai_api_key, sys_path, model='gpt-4'): '''Display the answer to a question.''' embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key) new_db = FAISS.load_local( f'{sys_path}/data/vectorstores/eak_admin_ch_defaultdocs_faiss_index_4096', embeddings) retriever = new_db.as_retriever() tool = create_retriever_tool( retriever, "content_of_eak_website", """ This tool is designed for an LLM that interacts with the content of the EAK website to retrieve documents. The EAK acts as a compensation fund for various federal entities. Its main responsibility is overseeing the implementation of the 1st pillar (AHV/IV) and the family compensation fund. The tool offers services related to: - Insurance - Contributions - Employer regulations - Pensions Furthermore, it provides insights into family allowances and facilitates electronic data exchange with the EAK via connect.eak. """ ) tools = [tool] system_message = SystemMessage( content=""" You are an expert for the eak_admin_website and: - Always answer questions citing the source. - The source is the URL you receive as a response from the eak_admin_website tool. - If you don't know an answer, state: "No source available, thus no answer possible". - Never invent URLs. Only use URLs from eak_admin_website. - Always respond in German. """ ) llm = ChatOpenAI(openai_api_key=openai_api_key, model=model, temperature=0, n=10, verbose=True) agent_executor = create_conversational_retrieval_agent( llm, tools, verbose=False, system_message=system_message, max_token_limit=3000) # heikel print(f"\nFrage: {query}") with get_openai_callback() as callback: answer = agent_executor({"input": query}) print(f"\nAntwort: {answer['output']}\n\n") print(f"Total Tokens: {callback.total_tokens}") print(f"Prompt Tokens: {callback.prompt_tokens}") print(f"Completion Tokens: {callback.completion_tokens}") print(f"Total Cost (USD): ${callback.total_cost}") return answer def ask_agent__chch(query, openai_api_key, sys_path, model='gpt-4'): '''Display the answer to a question.''' embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key) # new_db1 = FAISS.load_local( # f'{sys_path}/data/vectorstores/eak_admin_ch_defaultdocs_faiss_index_4096', # embeddings) # new_db2 = FAISS.load_local( # f'{sys_path}/data/vectorstores/eak_admin_ch_defaultdocs_faiss_index_512', # embeddings) new_db3 = FAISS.load_local( f'{sys_path}/data/vectorstores/ch_ch_texts_faiss_index_4096', embeddings) # new_db1.merge_from(new_db2) # new_db1.merge_from(new_db3) new_db = new_db3 retriever = new_db.as_retriever() tool = create_retriever_tool( retriever, "content_of_chch_website", """ This tool is designed for an LLM that interacts with the content of the ch.ch website to retrieve documents. The chch acts as a information hub for various federal entities. A service of the Confederation, cantons and communes. The tool offers services related to: "Easy answers about life in Switzerland" The ch.ch portal is an information platform provided by the Swiss authorities. In just a few clicks, you will find straightforward answers in five languages to questions that many of you ask the authorities. """ ) tools = [tool] system_message = SystemMessage( content=""" You are an expert on the chch_website and: - Always answer questions by citing the source. - The source is the URL you receive as an answer from the content_of_chch_website tool. - If you do not know an answer, indicate "No source available, therefore no answer possible". - Never make up URLs. Only use URLs from the content_of_chch_website. - Always answer in German. """ ) llm = ChatOpenAI(openai_api_key=openai_api_key, model=model, temperature=0, n=10, verbose=True) agent_executor = create_conversational_retrieval_agent( llm, tools, verbose=False, system_message=system_message, max_token_limit=3000) # heikel print(f"\nFrage: {query}") with get_openai_callback() as callback: answer = agent_executor({"input": query}) print(f"\nAntwort: {answer['output']}\n\n") print(f"Total Tokens: {callback.total_tokens}") print(f"Prompt Tokens: {callback.prompt_tokens}") print(f"Completion Tokens: {callback.completion_tokens}") print(f"Total Cost (USD): ${callback.total_cost}") return answer if __name__ == "__main__": QUESTIONS = [ "Wann bezahlt die EAK jeweils die Rente aus?", "Was ist das SECO?", "Wer ist Kassenleiterin oder Kassenleiter der EAK?", ] for question in QUESTIONS: OPENAPI_API_KEY = "YOUR_API_KEY" SYS_PATH = "YOUR_SYSTEM_PATH" ask_agent__eak(question, OPENAPI_API_KEY, SYS_PATH)
[ "\n You are an expert for the eak_admin_website and:\n - Always answer questions citing the source.\n - The source is the URL you receive as a response from the eak_admin_website tool.\n - If you don't know an answer, state: \"No source available, thus no answer possible\".\n - Never invent URLs. Only use URLs from eak_admin_website.\n - Always respond in German.\n ", "\n You are an expert on the chch_website and:\n - Always answer questions by citing the source.\n - The source is the URL you receive as an answer from the content_of_chch_website tool.\n - If you do not know an answer, indicate \"No source available, therefore no answer possible\".\n - Never make up URLs. Only use URLs from the content_of_chch_website.\n - Always answer in German.\n " ]
2024-01-10
tabee/b3rn_zero_streamlit
app~question_optimizer_chain.py
''' This chain is used to optimize the question by using the content of the question. ''' import os import langchain from langchain import PromptTemplate, LLMChain from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import FAISS from langchain.cache import SQLiteCache from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) SYS_PATH_LOCAL = '/workspaces/b3rn_zero_streamlit' SYS_PATH_STREAMLIT = '/app/b3rn_zero_streamlit/' SYS_PATH = SYS_PATH_STREAMLIT langchain.llm_cache = SQLiteCache(database_path=f"{SYS_PATH}/data/langchain_cache.db") system_message_prompt = SystemMessagePromptTemplate( prompt=PromptTemplate( template=""" # Your role and task Rephrase the Human questions to align with the standards of a Swiss social insurance expert. The restructured question should elicit the same response as the original but with enhanced clarity and precision. Answer not the question, rephrase it. You response should be in german. # Examples of good questions: Wie hoch ist der aktuelle AHV-Rentenbetrag in der Schweiz? Welche Voraussetzungen müssen erfüllt sein um eine IV-Rente zu erhalten? Welche Leistungen werden durch die Erwerbsersatzordnung (EO) abgedeckt? # Use Chunks Use the "Chunks" content to refine the question. Ensure you filter out irrelevant information and focus only on pertinent details. ## Chunks content: {chunks} """, input_variables=["chunks"], ) ) human_message_prompt = HumanMessagePromptTemplate( prompt=PromptTemplate( template=""" ==================== Frage: {question} ==================== Generiere zwei sehr ähnliche mögliche Fragen. du kannst die Fragen mit einem Komma trennen. die frage welche die ursprünglich Frage am besten präzisiert nennst du als erstes: """, input_variables=["question"], ) ) chat_prompt_template = ChatPromptTemplate.from_messages( [system_message_prompt, human_message_prompt]) def optimize_question(user_input, openai_api_key, sys_path): ''' optimize the question by using the content of the question. ''' embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key) new_db1 = FAISS.load_local(f'{sys_path}/data/vectorstores/eak_admin_ch_defaultdocs_faiss_index_4096', embeddings) new_db2 = FAISS.load_local(f'{sys_path}/data/vectorstores/eak_admin_ch_defaultdocs_faiss_index_512', embeddings) new_db3 = FAISS.load_local(f'{sys_path}/data/vectorstores/ch_ch_texts_faiss_index_4096', embeddings) new_db1.merge_from(new_db2) new_db1.merge_from(new_db3) new_db = new_db1 chat = ChatOpenAI( temperature=0.8, model="gpt-4", openai_api_key=openai_api_key) chain = LLMChain( llm=chat, prompt=chat_prompt_template, verbose=False) docs = new_db.similarity_search(user_input, k=10) thechunk = "" for doc in docs: thechunk += doc.page_content + "\n-------end this content-----------\n\n" return chain.run(chunks=thechunk, question=user_input) if __name__ == "__main__": QUESTION = "was isch ch.ch" optimized_question = optimize_question(QUESTION) print(optimized_question)
[ "\n====================\nFrage: {question}\n====================\nGeneriere zwei sehr ähnliche mögliche Fragen. du kannst die Fragen mit einem Komma trennen.\ndie frage welche die ursprünglich Frage am besten präzisiert nennst du als erstes:\n", "question", "[PLACEHOLDER, PLACEHOLDER]", "\n# Your role and task\nRephrase the Human questions to align with the standards of a Swiss social insurance expert. \nThe restructured question should elicit the same response as the original but with enhanced \nclarity and precision. Answer not the question, rephrase it. \nYou response should be in german.\n\n# Examples of good questions:\nWie hoch ist der aktuelle AHV-Rentenbetrag in der Schweiz?\nWelche Voraussetzungen müssen erfüllt sein um eine IV-Rente zu erhalten?\nWelche Leistungen werden durch die Erwerbsersatzordnung (EO) abgedeckt?\n\n# Use Chunks\nUse the \"Chunks\" content to refine the question.\nEnsure you filter out irrelevant information and focus only on pertinent details.\n\n## Chunks content:\n {chunks}\n", "chunks" ]
2024-01-10
robocorp/langchain
libs~community~tests~integration_tests~embeddings~test_qianfan_endpoint.py
"""Test Baidu Qianfan Embedding Endpoint.""" from langchain_community.embeddings.baidu_qianfan_endpoint import ( QianfanEmbeddingsEndpoint, ) def test_embedding_multiple_documents() -> None: documents = ["foo", "bar"] embedding = QianfanEmbeddingsEndpoint() output = embedding.embed_documents(documents) assert len(output) == 2 assert len(output[0]) == 384 assert len(output[1]) == 384 def test_embedding_query() -> None: query = "foo" embedding = QianfanEmbeddingsEndpoint() output = embedding.embed_query(query) assert len(output) == 384 def test_model() -> None: documents = ["hi", "qianfan"] embedding = QianfanEmbeddingsEndpoint(model="Embedding-V1") output = embedding.embed_documents(documents) assert len(output) == 2
[]
2024-01-10
robocorp/langchain
libs~langchain~langchain~utils~ernie_functions.py
from typing import Literal, Optional, Type, TypedDict from langchain.pydantic_v1 import BaseModel from langchain.utils.json_schema import dereference_refs class FunctionDescription(TypedDict): """Representation of a callable function to the Ernie API.""" name: str """The name of the function.""" description: str """A description of the function.""" parameters: dict """The parameters of the function.""" class ToolDescription(TypedDict): """Representation of a callable function to the Ernie API.""" type: Literal["function"] function: FunctionDescription def convert_pydantic_to_ernie_function( model: Type[BaseModel], *, name: Optional[str] = None, description: Optional[str] = None, ) -> FunctionDescription: """Converts a Pydantic model to a function description for the Ernie API.""" schema = dereference_refs(model.schema()) schema.pop("definitions", None) return { "name": name or schema["title"], "description": description or schema["description"], "parameters": schema, } def convert_pydantic_to_ernie_tool( model: Type[BaseModel], *, name: Optional[str] = None, description: Optional[str] = None, ) -> ToolDescription: """Converts a Pydantic model to a function description for the Ernie API.""" function = convert_pydantic_to_ernie_function( model, name=name, description=description ) return {"type": "function", "function": function}
[]
2024-01-10
robocorp/langchain
libs~langchain~tests~unit_tests~chains~test_retrieval.py
"""Test conversation chain and memory.""" from langchain_core.documents import Document from langchain_core.prompts.prompt import PromptTemplate from langchain.chains import create_retrieval_chain from langchain.llms.fake import FakeListLLM from tests.unit_tests.retrievers.parrot_retriever import FakeParrotRetriever def test_create() -> None: answer = "I know the answer!" llm = FakeListLLM(responses=[answer]) retriever = FakeParrotRetriever() question_gen_prompt = PromptTemplate.from_template("hi! {input} {chat_history}") chain = create_retrieval_chain(retriever, question_gen_prompt | llm) expected_output = { "answer": "I know the answer!", "chat_history": "foo", "context": [Document(page_content="What is the answer?")], "input": "What is the answer?", } output = chain.invoke({"input": "What is the answer?", "chat_history": "foo"}) assert output == expected_output
[ "hi! {input} {chat_history}" ]
2024-01-10
robocorp/langchain
libs~langchain~tests~unit_tests~retrievers~parrot_retriever.py
from typing import List from langchain_core.documents import Document from langchain_core.retrievers import BaseRetriever class FakeParrotRetriever(BaseRetriever): """Test util that parrots the query back as documents.""" def _get_relevant_documents( # type: ignore[override] self, query: str, ) -> List[Document]: return [Document(page_content=query)] async def _aget_relevant_documents( # type: ignore[override] self, query: str, ) -> List[Document]: return [Document(page_content=query)]
[]
2024-01-10
robocorp/langchain
libs~core~tests~unit_tests~callbacks~tracers~test_base_tracer.py
"""Test Tracer classes.""" from __future__ import annotations from datetime import datetime from typing import List from uuid import uuid4 import pytest from freezegun import freeze_time from langchain_core.callbacks import CallbackManager from langchain_core.messages import HumanMessage from langchain_core.outputs import LLMResult from langchain_core.tracers.base import BaseTracer, TracerException from langchain_core.tracers.schemas import Run SERIALIZED = {"id": ["llm"]} SERIALIZED_CHAT = {"id": ["chat_model"]} class FakeTracer(BaseTracer): """Fake tracer that records LangChain execution.""" def __init__(self) -> None: """Initialize the tracer.""" super().__init__() self.runs: List[Run] = [] def _persist_run(self, run: Run) -> None: """Persist a run.""" self.runs.append(run) def _compare_run_with_error(run: Run, expected_run: Run) -> None: if run.child_runs: assert len(expected_run.child_runs) == len(run.child_runs) for received, expected in zip(run.child_runs, expected_run.child_runs): _compare_run_with_error(received, expected) received_dict = run.dict(exclude={"child_runs"}) received_err = received_dict.pop("error") expected_dict = expected_run.dict(exclude={"child_runs"}) expected_err = expected_dict.pop("error") assert received_dict == expected_dict if expected_err is not None: assert received_err is not None assert expected_err in received_err else: assert received_err is None @freeze_time("2023-01-01") def test_tracer_llm_run() -> None: """Test tracer on an LLM run.""" uuid = uuid4() compare_run = Run( id=uuid, parent_run_id=None, start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "end", "time": datetime.utcnow()}, ], extra={}, execution_order=1, child_execution_order=1, serialized=SERIALIZED, inputs={"prompts": []}, outputs=LLMResult(generations=[[]]), error=None, run_type="llm", ) tracer = FakeTracer() tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid) assert tracer.runs == [compare_run] @freeze_time("2023-01-01") def test_tracer_chat_model_run() -> None: """Test tracer on a Chat Model run.""" tracer = FakeTracer() manager = CallbackManager(handlers=[tracer]) run_managers = manager.on_chat_model_start( serialized=SERIALIZED_CHAT, messages=[[HumanMessage(content="")]] ) compare_run = Run( id=str(run_managers[0].run_id), name="chat_model", start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "end", "time": datetime.utcnow()}, ], extra={}, execution_order=1, child_execution_order=1, serialized=SERIALIZED_CHAT, inputs=dict(prompts=["Human: "]), outputs=LLMResult(generations=[[]]), error=None, run_type="llm", ) for run_manager in run_managers: run_manager.on_llm_end(response=LLMResult(generations=[[]])) assert tracer.runs == [compare_run] @freeze_time("2023-01-01") def test_tracer_llm_run_errors_no_start() -> None: """Test tracer on an LLM run without a start.""" tracer = FakeTracer() with pytest.raises(TracerException): tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid4()) @freeze_time("2023-01-01") def test_tracer_multiple_llm_runs() -> None: """Test the tracer with multiple runs.""" uuid = uuid4() compare_run = Run( id=uuid, name="llm", start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "end", "time": datetime.utcnow()}, ], extra={}, execution_order=1, child_execution_order=1, serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=LLMResult(generations=[[]]), error=None, run_type="llm", ) tracer = FakeTracer() num_runs = 10 for _ in range(num_runs): tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid) assert tracer.runs == [compare_run] * num_runs @freeze_time("2023-01-01") def test_tracer_chain_run() -> None: """Test tracer on a Chain run.""" uuid = uuid4() compare_run = Run( id=str(uuid), start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "end", "time": datetime.utcnow()}, ], extra={}, execution_order=1, child_execution_order=1, serialized={"name": "chain"}, inputs={}, outputs={}, error=None, run_type="chain", ) tracer = FakeTracer() tracer.on_chain_start(serialized={"name": "chain"}, inputs={}, run_id=uuid) tracer.on_chain_end(outputs={}, run_id=uuid) assert tracer.runs == [compare_run] @freeze_time("2023-01-01") def test_tracer_tool_run() -> None: """Test tracer on a Tool run.""" uuid = uuid4() compare_run = Run( id=str(uuid), start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "end", "time": datetime.utcnow()}, ], extra={}, execution_order=1, child_execution_order=1, serialized={"name": "tool"}, inputs={"input": "test"}, outputs={"output": "test"}, error=None, run_type="tool", ) tracer = FakeTracer() tracer.on_tool_start(serialized={"name": "tool"}, input_str="test", run_id=uuid) tracer.on_tool_end("test", run_id=uuid) assert tracer.runs == [compare_run] @freeze_time("2023-01-01") def test_tracer_nested_run() -> None: """Test tracer on a nested run.""" tracer = FakeTracer() chain_uuid = uuid4() tool_uuid = uuid4() llm_uuid1 = uuid4() llm_uuid2 = uuid4() for _ in range(10): tracer.on_chain_start( serialized={"name": "chain"}, inputs={}, run_id=chain_uuid ) tracer.on_tool_start( serialized={"name": "tool"}, input_str="test", run_id=tool_uuid, parent_run_id=chain_uuid, ) tracer.on_llm_start( serialized=SERIALIZED, prompts=[], run_id=llm_uuid1, parent_run_id=tool_uuid, ) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid1) tracer.on_tool_end("test", run_id=tool_uuid) tracer.on_llm_start( serialized=SERIALIZED, prompts=[], run_id=llm_uuid2, parent_run_id=chain_uuid, ) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid2) tracer.on_chain_end(outputs={}, run_id=chain_uuid) compare_run = Run( id=str(chain_uuid), error=None, start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "end", "time": datetime.utcnow()}, ], extra={}, execution_order=1, child_execution_order=4, serialized={"name": "chain"}, inputs={}, outputs={}, run_type="chain", child_runs=[ Run( id=tool_uuid, parent_run_id=chain_uuid, start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "end", "time": datetime.utcnow()}, ], extra={}, execution_order=2, child_execution_order=3, serialized={"name": "tool"}, inputs=dict(input="test"), outputs=dict(output="test"), error=None, run_type="tool", child_runs=[ Run( id=str(llm_uuid1), parent_run_id=str(tool_uuid), error=None, start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "end", "time": datetime.utcnow()}, ], extra={}, execution_order=3, child_execution_order=3, serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=LLMResult(generations=[[]]), run_type="llm", ) ], ), Run( id=str(llm_uuid2), parent_run_id=str(chain_uuid), error=None, start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "end", "time": datetime.utcnow()}, ], extra={}, execution_order=4, child_execution_order=4, serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=LLMResult(generations=[[]]), run_type="llm", ), ], ) assert tracer.runs[0] == compare_run assert tracer.runs == [compare_run] * 10 @freeze_time("2023-01-01") def test_tracer_llm_run_on_error() -> None: """Test tracer on an LLM run with an error.""" exception = Exception("test") uuid = uuid4() compare_run = Run( id=str(uuid), start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "error", "time": datetime.utcnow()}, ], extra={}, execution_order=1, child_execution_order=1, serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=None, error=repr(exception), run_type="llm", ) tracer = FakeTracer() tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_error(exception, run_id=uuid) assert len(tracer.runs) == 1 _compare_run_with_error(tracer.runs[0], compare_run) @freeze_time("2023-01-01") def test_tracer_llm_run_on_error_callback() -> None: """Test tracer on an LLM run with an error and a callback.""" exception = Exception("test") uuid = uuid4() compare_run = Run( id=str(uuid), start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "error", "time": datetime.utcnow()}, ], extra={}, execution_order=1, child_execution_order=1, serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=None, error=repr(exception), run_type="llm", ) class FakeTracerWithLlmErrorCallback(FakeTracer): error_run = None def _on_llm_error(self, run: Run) -> None: self.error_run = run tracer = FakeTracerWithLlmErrorCallback() tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_error(exception, run_id=uuid) assert tracer.error_run is not None _compare_run_with_error(tracer.error_run, compare_run) @freeze_time("2023-01-01") def test_tracer_chain_run_on_error() -> None: """Test tracer on a Chain run with an error.""" exception = Exception("test") uuid = uuid4() compare_run = Run( id=str(uuid), start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "error", "time": datetime.utcnow()}, ], extra={}, execution_order=1, child_execution_order=1, serialized={"name": "chain"}, inputs={}, outputs=None, error=repr(exception), run_type="chain", ) tracer = FakeTracer() tracer.on_chain_start(serialized={"name": "chain"}, inputs={}, run_id=uuid) tracer.on_chain_error(exception, run_id=uuid) _compare_run_with_error(tracer.runs[0], compare_run) @freeze_time("2023-01-01") def test_tracer_tool_run_on_error() -> None: """Test tracer on a Tool run with an error.""" exception = Exception("test") uuid = uuid4() compare_run = Run( id=str(uuid), start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "error", "time": datetime.utcnow()}, ], extra={}, execution_order=1, child_execution_order=1, serialized={"name": "tool"}, inputs=dict(input="test"), outputs=None, action="{'name': 'tool'}", error=repr(exception), run_type="tool", ) tracer = FakeTracer() tracer.on_tool_start(serialized={"name": "tool"}, input_str="test", run_id=uuid) tracer.on_tool_error(exception, run_id=uuid) _compare_run_with_error(tracer.runs[0], compare_run) @freeze_time("2023-01-01") def test_tracer_nested_runs_on_error() -> None: """Test tracer on a nested run with an error.""" exception = Exception("test") tracer = FakeTracer() chain_uuid = uuid4() tool_uuid = uuid4() llm_uuid1 = uuid4() llm_uuid2 = uuid4() llm_uuid3 = uuid4() for _ in range(3): tracer.on_chain_start( serialized={"name": "chain"}, inputs={}, run_id=chain_uuid ) tracer.on_llm_start( serialized=SERIALIZED, prompts=[], run_id=llm_uuid1, parent_run_id=chain_uuid, ) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid1) tracer.on_llm_start( serialized=SERIALIZED, prompts=[], run_id=llm_uuid2, parent_run_id=chain_uuid, ) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid2) tracer.on_tool_start( serialized={"name": "tool"}, input_str="test", run_id=tool_uuid, parent_run_id=chain_uuid, ) tracer.on_llm_start( serialized=SERIALIZED, prompts=[], run_id=llm_uuid3, parent_run_id=tool_uuid, ) tracer.on_llm_error(exception, run_id=llm_uuid3) tracer.on_tool_error(exception, run_id=tool_uuid) tracer.on_chain_error(exception, run_id=chain_uuid) compare_run = Run( id=str(chain_uuid), start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "error", "time": datetime.utcnow()}, ], extra={}, execution_order=1, child_execution_order=5, serialized={"name": "chain"}, error=repr(exception), inputs={}, outputs=None, run_type="chain", child_runs=[ Run( id=str(llm_uuid1), parent_run_id=str(chain_uuid), start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "end", "time": datetime.utcnow()}, ], extra={}, execution_order=2, child_execution_order=2, serialized=SERIALIZED, error=None, inputs=dict(prompts=[]), outputs=LLMResult(generations=[[]], llm_output=None), run_type="llm", ), Run( id=str(llm_uuid2), parent_run_id=str(chain_uuid), start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "end", "time": datetime.utcnow()}, ], extra={}, execution_order=3, child_execution_order=3, serialized=SERIALIZED, error=None, inputs=dict(prompts=[]), outputs=LLMResult(generations=[[]], llm_output=None), run_type="llm", ), Run( id=str(tool_uuid), parent_run_id=str(chain_uuid), start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "error", "time": datetime.utcnow()}, ], extra={}, execution_order=4, child_execution_order=5, serialized={"name": "tool"}, error=repr(exception), inputs=dict(input="test"), outputs=None, action="{'name': 'tool'}", child_runs=[ Run( id=str(llm_uuid3), parent_run_id=str(tool_uuid), start_time=datetime.utcnow(), end_time=datetime.utcnow(), events=[ {"name": "start", "time": datetime.utcnow()}, {"name": "error", "time": datetime.utcnow()}, ], extra={}, execution_order=5, child_execution_order=5, serialized=SERIALIZED, error=repr(exception), inputs=dict(prompts=[]), outputs=None, run_type="llm", ) ], run_type="tool", ), ], ) assert len(tracer.runs) == 3 for run in tracer.runs: _compare_run_with_error(run, compare_run)
[]
2024-01-10
robocorp/langchain
libs~core~langchain_core~language_models~llms.py
"""Base interface for large language models to expose.""" from __future__ import annotations import asyncio import functools import inspect import json import logging import warnings from abc import ABC, abstractmethod from pathlib import Path from typing import ( Any, AsyncIterator, Callable, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Type, Union, cast, ) import yaml from tenacity import ( RetryCallState, before_sleep_log, retry, retry_base, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain_core.callbacks import ( AsyncCallbackManager, AsyncCallbackManagerForLLMRun, BaseCallbackManager, CallbackManager, CallbackManagerForLLMRun, Callbacks, ) from langchain_core.globals import get_llm_cache from langchain_core.language_models.base import BaseLanguageModel, LanguageModelInput from langchain_core.load import dumpd from langchain_core.messages import AIMessage, BaseMessage, get_buffer_string from langchain_core.outputs import Generation, GenerationChunk, LLMResult, RunInfo from langchain_core.prompt_values import ChatPromptValue, PromptValue, StringPromptValue from langchain_core.pydantic_v1 import Field, root_validator, validator from langchain_core.runnables import RunnableConfig, ensure_config, get_config_list from langchain_core.runnables.config import run_in_executor logger = logging.getLogger(__name__) def _get_verbosity() -> bool: from langchain_core.globals import get_verbose return get_verbose() @functools.lru_cache def _log_error_once(msg: str) -> None: """Log an error once.""" logger.error(msg) def create_base_retry_decorator( error_types: List[Type[BaseException]], max_retries: int = 1, run_manager: Optional[ Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] ] = None, ) -> Callable[[Any], Any]: """Create a retry decorator for a given LLM and provided list of error types.""" _logging = before_sleep_log(logger, logging.WARNING) def _before_sleep(retry_state: RetryCallState) -> None: _logging(retry_state) if run_manager: if isinstance(run_manager, AsyncCallbackManagerForLLMRun): coro = run_manager.on_retry(retry_state) try: loop = asyncio.get_event_loop() if loop.is_running(): loop.create_task(coro) else: asyncio.run(coro) except Exception as e: _log_error_once(f"Error in on_retry: {e}") else: run_manager.on_retry(retry_state) return None min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards retry_instance: "retry_base" = retry_if_exception_type(error_types[0]) for error in error_types[1:]: retry_instance = retry_instance | retry_if_exception_type(error) return retry( reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=retry_instance, before_sleep=_before_sleep, ) def get_prompts( params: Dict[str, Any], prompts: List[str] ) -> Tuple[Dict[int, List], str, List[int], List[str]]: """Get prompts that are already cached.""" llm_string = str(sorted([(k, v) for k, v in params.items()])) missing_prompts = [] missing_prompt_idxs = [] existing_prompts = {} llm_cache = get_llm_cache() for i, prompt in enumerate(prompts): if llm_cache is not None: cache_val = llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): existing_prompts[i] = cache_val else: missing_prompts.append(prompt) missing_prompt_idxs.append(i) return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts def update_cache( existing_prompts: Dict[int, List], llm_string: str, missing_prompt_idxs: List[int], new_results: LLMResult, prompts: List[str], ) -> Optional[dict]: """Update the cache and get the LLM output.""" llm_cache = get_llm_cache() for i, result in enumerate(new_results.generations): existing_prompts[missing_prompt_idxs[i]] = result prompt = prompts[missing_prompt_idxs[i]] if llm_cache is not None: llm_cache.update(prompt, llm_string, result) llm_output = new_results.llm_output return llm_output class BaseLLM(BaseLanguageModel[str], ABC): """Base LLM abstract interface. It should take in a prompt and return a string.""" cache: Optional[bool] = None verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callbacks: Callbacks = Field(default=None, exclude=True) callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) tags: Optional[List[str]] = Field(default=None, exclude=True) """Tags to add to the run trace.""" metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True) """Metadata to add to the run trace.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose # --- Runnable methods --- @property def OutputType(self) -> Type[str]: """Get the input type for this runnable.""" return str def _convert_input(self, input: LanguageModelInput) -> PromptValue: if isinstance(input, PromptValue): return input elif isinstance(input, str): return StringPromptValue(text=input) elif isinstance(input, list): return ChatPromptValue(messages=input) else: raise ValueError( f"Invalid input type {type(input)}. " "Must be a PromptValue, str, or list of BaseMessages." ) def invoke( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: config = ensure_config(config) return ( self.generate_prompt( [self._convert_input(input)], stop=stop, callbacks=config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), run_name=config.get("run_name"), **kwargs, ) .generations[0][0] .text ) async def ainvoke( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: config = ensure_config(config) llm_result = await self.agenerate_prompt( [self._convert_input(input)], stop=stop, callbacks=config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), run_name=config.get("run_name"), **kwargs, ) return llm_result.generations[0][0].text def batch( self, inputs: List[LanguageModelInput], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, return_exceptions: bool = False, **kwargs: Any, ) -> List[str]: if not inputs: return [] config = get_config_list(config, len(inputs)) max_concurrency = config[0].get("max_concurrency") if max_concurrency is None: try: llm_result = self.generate_prompt( [self._convert_input(input) for input in inputs], callbacks=[c.get("callbacks") for c in config], tags=[c.get("tags") for c in config], metadata=[c.get("metadata") for c in config], run_name=[c.get("run_name") for c in config], **kwargs, ) return [g[0].text for g in llm_result.generations] except Exception as e: if return_exceptions: return cast(List[str], [e for _ in inputs]) else: raise e else: batches = [ inputs[i : i + max_concurrency] for i in range(0, len(inputs), max_concurrency) ] config = [{**c, "max_concurrency": None} for c in config] # type: ignore[misc] return [ output for i, batch in enumerate(batches) for output in self.batch( batch, config=config[i * max_concurrency : (i + 1) * max_concurrency], return_exceptions=return_exceptions, **kwargs, ) ] async def abatch( self, inputs: List[LanguageModelInput], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, return_exceptions: bool = False, **kwargs: Any, ) -> List[str]: if not inputs: return [] config = get_config_list(config, len(inputs)) max_concurrency = config[0].get("max_concurrency") if max_concurrency is None: try: llm_result = await self.agenerate_prompt( [self._convert_input(input) for input in inputs], callbacks=[c.get("callbacks") for c in config], tags=[c.get("tags") for c in config], metadata=[c.get("metadata") for c in config], run_name=[c.get("run_name") for c in config], **kwargs, ) return [g[0].text for g in llm_result.generations] except Exception as e: if return_exceptions: return cast(List[str], [e for _ in inputs]) else: raise e else: batches = [ inputs[i : i + max_concurrency] for i in range(0, len(inputs), max_concurrency) ] config = [{**c, "max_concurrency": None} for c in config] # type: ignore[misc] return [ output for i, batch in enumerate(batches) for output in await self.abatch( batch, config=config[i * max_concurrency : (i + 1) * max_concurrency], return_exceptions=return_exceptions, **kwargs, ) ] def stream( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> Iterator[str]: if type(self)._stream == BaseLLM._stream: # model doesn't implement streaming, so use default implementation yield self.invoke(input, config=config, stop=stop, **kwargs) else: prompt = self._convert_input(input).to_string() config = ensure_config(config) params = self.dict() params["stop"] = stop params = {**params, **kwargs} options = {"stop": stop} callback_manager = CallbackManager.configure( config.get("callbacks"), self.callbacks, self.verbose, config.get("tags"), self.tags, config.get("metadata"), self.metadata, ) (run_manager,) = callback_manager.on_llm_start( dumpd(self), [prompt], invocation_params=params, options=options, name=config.get("run_name"), batch_size=1, ) generation: Optional[GenerationChunk] = None try: for chunk in self._stream( prompt, stop=stop, run_manager=run_manager, **kwargs ): yield chunk.text if generation is None: generation = chunk else: generation += chunk assert generation is not None except BaseException as e: run_manager.on_llm_error( e, response=LLMResult( generations=[[generation]] if generation else [] ), ) raise e else: run_manager.on_llm_end(LLMResult(generations=[[generation]])) async def astream( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> AsyncIterator[str]: if type(self)._astream == BaseLLM._astream: # model doesn't implement streaming, so use default implementation yield await self.ainvoke(input, config=config, stop=stop, **kwargs) else: prompt = self._convert_input(input).to_string() config = ensure_config(config) params = self.dict() params["stop"] = stop params = {**params, **kwargs} options = {"stop": stop} callback_manager = AsyncCallbackManager.configure( config.get("callbacks"), self.callbacks, self.verbose, config.get("tags"), self.tags, config.get("metadata"), self.metadata, ) (run_manager,) = await callback_manager.on_llm_start( dumpd(self), [prompt], invocation_params=params, options=options, name=config.get("run_name"), batch_size=1, ) generation: Optional[GenerationChunk] = None try: async for chunk in self._astream( prompt, stop=stop, run_manager=run_manager, **kwargs ): yield chunk.text if generation is None: generation = chunk else: generation += chunk assert generation is not None except BaseException as e: await run_manager.on_llm_error( e, response=LLMResult( generations=[[generation]] if generation else [] ), ) raise e else: await run_manager.on_llm_end(LLMResult(generations=[[generation]])) # --- Custom methods --- @abstractmethod def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompts.""" async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompts.""" return await run_in_executor( None, self._generate, prompts, stop, run_manager.get_sync() if run_manager else None, **kwargs, ) def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: raise NotImplementedError() def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: raise NotImplementedError() def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[Callbacks, List[Callbacks]]] = None, **kwargs: Any, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs) async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[Callbacks, List[Callbacks]]] = None, **kwargs: Any, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return await self.agenerate( prompt_strings, stop=stop, callbacks=callbacks, **kwargs ) def _generate_helper( self, prompts: List[str], stop: Optional[List[str]], run_managers: List[CallbackManagerForLLMRun], new_arg_supported: bool, **kwargs: Any, ) -> LLMResult: try: output = ( self._generate( prompts, stop=stop, # TODO: support multiple run managers run_manager=run_managers[0] if run_managers else None, **kwargs, ) if new_arg_supported else self._generate(prompts, stop=stop) ) except BaseException as e: for run_manager in run_managers: run_manager.on_llm_error(e, response=LLMResult(generations=[])) raise e flattened_outputs = output.flatten() for manager, flattened_output in zip(run_managers, flattened_outputs): manager.on_llm_end(flattened_output) if run_managers: output.run = [ RunInfo(run_id=run_manager.run_id) for run_manager in run_managers ] return output def generate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[Callbacks, List[Callbacks]]] = None, *, tags: Optional[Union[List[str], List[List[str]]]] = None, metadata: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None, run_name: Optional[Union[str, List[str]]] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" if not isinstance(prompts, list): raise ValueError( "Argument 'prompts' is expected to be of type List[str], received" f" argument of type {type(prompts)}." ) # Create callback managers if ( isinstance(callbacks, list) and callbacks and ( isinstance(callbacks[0], (list, BaseCallbackManager)) or callbacks[0] is None ) ): # We've received a list of callbacks args to apply to each input assert len(callbacks) == len(prompts) assert tags is None or ( isinstance(tags, list) and len(tags) == len(prompts) ) assert metadata is None or ( isinstance(metadata, list) and len(metadata) == len(prompts) ) assert run_name is None or ( isinstance(run_name, list) and len(run_name) == len(prompts) ) callbacks = cast(List[Callbacks], callbacks) tags_list = cast(List[Optional[List[str]]], tags or ([None] * len(prompts))) metadata_list = cast( List[Optional[Dict[str, Any]]], metadata or ([{}] * len(prompts)) ) run_name_list = run_name or cast( List[Optional[str]], ([None] * len(prompts)) ) callback_managers = [ CallbackManager.configure( callback, self.callbacks, self.verbose, tag, self.tags, meta, self.metadata, ) for callback, tag, meta in zip(callbacks, tags_list, metadata_list) ] else: # We've received a single callbacks arg to apply to all inputs callback_managers = [ CallbackManager.configure( cast(Callbacks, callbacks), self.callbacks, self.verbose, cast(List[str], tags), self.tags, cast(Dict[str, Any], metadata), self.metadata, ) ] * len(prompts) run_name_list = [cast(Optional[str], run_name)] * len(prompts) params = self.dict() params["stop"] = stop options = {"stop": stop} ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache new_arg_supported = inspect.signature(self._generate).parameters.get( "run_manager" ) if get_llm_cache() is None or disregard_cache: if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_managers = [ callback_manager.on_llm_start( dumpd(self), [prompt], invocation_params=params, options=options, name=run_name, batch_size=len(prompts), )[0] for callback_manager, prompt, run_name in zip( callback_managers, prompts, run_name_list ) ] output = self._generate_helper( prompts, stop, run_managers, bool(new_arg_supported), **kwargs ) return output if len(missing_prompts) > 0: run_managers = [ callback_managers[idx].on_llm_start( dumpd(self), [prompts[idx]], invocation_params=params, options=options, name=run_name_list[idx], batch_size=len(missing_prompts), )[0] for idx in missing_prompt_idxs ] new_results = self._generate_helper( missing_prompts, stop, run_managers, bool(new_arg_supported), **kwargs ) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = ( [RunInfo(run_id=run_manager.run_id) for run_manager in run_managers] if run_managers else None ) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) async def _agenerate_helper( self, prompts: List[str], stop: Optional[List[str]], run_managers: List[AsyncCallbackManagerForLLMRun], new_arg_supported: bool, **kwargs: Any, ) -> LLMResult: try: output = ( await self._agenerate( prompts, stop=stop, run_manager=run_managers[0] if run_managers else None, **kwargs, ) if new_arg_supported else await self._agenerate(prompts, stop=stop) ) except BaseException as e: await asyncio.gather( *[ run_manager.on_llm_error(e, response=LLMResult(generations=[])) for run_manager in run_managers ] ) raise e flattened_outputs = output.flatten() await asyncio.gather( *[ run_manager.on_llm_end(flattened_output) for run_manager, flattened_output in zip( run_managers, flattened_outputs ) ] ) if run_managers: output.run = [ RunInfo(run_id=run_manager.run_id) for run_manager in run_managers ] return output async def agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[Callbacks, List[Callbacks]]] = None, *, tags: Optional[Union[List[str], List[List[str]]]] = None, metadata: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None, run_name: Optional[Union[str, List[str]]] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # Create callback managers if isinstance(callbacks, list) and ( isinstance(callbacks[0], (list, BaseCallbackManager)) or callbacks[0] is None ): # We've received a list of callbacks args to apply to each input assert len(callbacks) == len(prompts) assert tags is None or ( isinstance(tags, list) and len(tags) == len(prompts) ) assert metadata is None or ( isinstance(metadata, list) and len(metadata) == len(prompts) ) assert run_name is None or ( isinstance(run_name, list) and len(run_name) == len(prompts) ) callbacks = cast(List[Callbacks], callbacks) tags_list = cast(List[Optional[List[str]]], tags or ([None] * len(prompts))) metadata_list = cast( List[Optional[Dict[str, Any]]], metadata or ([{}] * len(prompts)) ) run_name_list = run_name or cast( List[Optional[str]], ([None] * len(prompts)) ) callback_managers = [ AsyncCallbackManager.configure( callback, self.callbacks, self.verbose, tag, self.tags, meta, self.metadata, ) for callback, tag, meta in zip(callbacks, tags_list, metadata_list) ] else: # We've received a single callbacks arg to apply to all inputs callback_managers = [ AsyncCallbackManager.configure( cast(Callbacks, callbacks), self.callbacks, self.verbose, cast(List[str], tags), self.tags, cast(Dict[str, Any], metadata), self.metadata, ) ] * len(prompts) run_name_list = [cast(Optional[str], run_name)] * len(prompts) params = self.dict() params["stop"] = stop options = {"stop": stop} ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache new_arg_supported = inspect.signature(self._agenerate).parameters.get( "run_manager" ) if get_llm_cache() is None or disregard_cache: if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_managers = await asyncio.gather( *[ callback_manager.on_llm_start( dumpd(self), [prompt], invocation_params=params, options=options, name=run_name, batch_size=len(prompts), ) for callback_manager, prompt, run_name in zip( callback_managers, prompts, run_name_list ) ] ) run_managers = [r[0] for r in run_managers] output = await self._agenerate_helper( prompts, stop, run_managers, bool(new_arg_supported), **kwargs ) return output if len(missing_prompts) > 0: run_managers = await asyncio.gather( *[ callback_managers[idx].on_llm_start( dumpd(self), [prompts[idx]], invocation_params=params, options=options, name=run_name_list[idx], batch_size=len(missing_prompts), ) for idx in missing_prompt_idxs ] ) run_managers = [r[0] for r in run_managers] new_results = await self._agenerate_helper( missing_prompts, stop, run_managers, bool(new_arg_supported), **kwargs ) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = ( [RunInfo(run_id=run_manager.run_id) for run_manager in run_managers] if run_managers else None ) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) def __call__( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> str: """Check Cache and run the LLM on the given prompt and input.""" if not isinstance(prompt, str): raise ValueError( "Argument `prompt` is expected to be a string. Instead found " f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " "`generate` instead." ) return ( self.generate( [prompt], stop=stop, callbacks=callbacks, tags=tags, metadata=metadata, **kwargs, ) .generations[0][0] .text ) async def _call_async( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> str: """Check Cache and run the LLM on the given prompt and input.""" result = await self.agenerate( [prompt], stop=stop, callbacks=callbacks, tags=tags, metadata=metadata, **kwargs, ) return result.generations[0][0].text def predict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: if stop is None: _stop = None else: _stop = list(stop) return self(text, stop=_stop, **kwargs) def predict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = self(text, stop=_stop, **kwargs) return AIMessage(content=content) async def apredict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: if stop is None: _stop = None else: _stop = list(stop) return await self._call_async(text, stop=_stop, **kwargs) async def apredict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = await self._call_async(text, stop=_stop, **kwargs) return AIMessage(content=content) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {} def __str__(self) -> str: """Get a string representation of the object for printing.""" cls_name = f"\033[1m{self.__class__.__name__}\033[0m" return f"{cls_name}\nParams: {self._identifying_params}" @property @abstractmethod def _llm_type(self) -> str: """Return type of llm.""" def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict["_type"] = self._llm_type return starter_dict def save(self, file_path: Union[Path, str]) -> None: """Save the LLM. Args: file_path: Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path="path/llm.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") class LLM(BaseLLM): """Base LLM abstract class. The purpose of this class is to expose a simpler interface for working with LLMs, rather than expect the user to implement the full _generate method. """ @abstractmethod def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Run the LLM on the given prompt and input.""" async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Run the LLM on the given prompt and input.""" return await run_in_executor( None, self._call, prompt, stop, run_manager.get_sync() if run_manager else None, **kwargs, ) def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # TODO: add caching here. generations = [] new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") for prompt in prompts: text = ( self._call(prompt, stop=stop, run_manager=run_manager, **kwargs) if new_arg_supported else self._call(prompt, stop=stop, **kwargs) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" generations = [] new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") for prompt in prompts: text = ( await self._acall(prompt, stop=stop, run_manager=run_manager, **kwargs) if new_arg_supported else await self._acall(prompt, stop=stop, **kwargs) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations)
[ "[]", "{}" ]
2024-01-10
robocorp/langchain
libs~community~langchain_community~utilities~sql_database.py
"""SQLAlchemy wrapper around a database.""" from __future__ import annotations import warnings from typing import Any, Dict, Iterable, List, Literal, Optional, Sequence, Union import sqlalchemy from langchain_core.utils import get_from_env from sqlalchemy import MetaData, Table, create_engine, inspect, select, text from sqlalchemy.engine import Engine from sqlalchemy.exc import ProgrammingError, SQLAlchemyError from sqlalchemy.schema import CreateTable from sqlalchemy.types import NullType def _format_index(index: sqlalchemy.engine.interfaces.ReflectedIndex) -> str: return ( f'Name: {index["name"]}, Unique: {index["unique"]},' f' Columns: {str(index["column_names"])}' ) def truncate_word(content: Any, *, length: int, suffix: str = "...") -> str: """ Truncate a string to a certain number of words, based on the max string length. """ if not isinstance(content, str) or length <= 0: return content if len(content) <= length: return content return content[: length - len(suffix)].rsplit(" ", 1)[0] + suffix class SQLDatabase: """SQLAlchemy wrapper around a database.""" def __init__( self, engine: Engine, schema: Optional[str] = None, metadata: Optional[MetaData] = None, ignore_tables: Optional[List[str]] = None, include_tables: Optional[List[str]] = None, sample_rows_in_table_info: int = 3, indexes_in_table_info: bool = False, custom_table_info: Optional[dict] = None, view_support: bool = False, max_string_length: int = 300, ): """Create engine from database URI.""" self._engine = engine self._schema = schema if include_tables and ignore_tables: raise ValueError("Cannot specify both include_tables and ignore_tables") self._inspector = inspect(self._engine) # including view support by adding the views as well as tables to the all # tables list if view_support is True self._all_tables = set( self._inspector.get_table_names(schema=schema) + (self._inspector.get_view_names(schema=schema) if view_support else []) ) self._include_tables = set(include_tables) if include_tables else set() if self._include_tables: missing_tables = self._include_tables - self._all_tables if missing_tables: raise ValueError( f"include_tables {missing_tables} not found in database" ) self._ignore_tables = set(ignore_tables) if ignore_tables else set() if self._ignore_tables: missing_tables = self._ignore_tables - self._all_tables if missing_tables: raise ValueError( f"ignore_tables {missing_tables} not found in database" ) usable_tables = self.get_usable_table_names() self._usable_tables = set(usable_tables) if usable_tables else self._all_tables if not isinstance(sample_rows_in_table_info, int): raise TypeError("sample_rows_in_table_info must be an integer") self._sample_rows_in_table_info = sample_rows_in_table_info self._indexes_in_table_info = indexes_in_table_info self._custom_table_info = custom_table_info if self._custom_table_info: if not isinstance(self._custom_table_info, dict): raise TypeError( "table_info must be a dictionary with table names as keys and the " "desired table info as values" ) # only keep the tables that are also present in the database intersection = set(self._custom_table_info).intersection(self._all_tables) self._custom_table_info = dict( (table, self._custom_table_info[table]) for table in self._custom_table_info if table in intersection ) self._max_string_length = max_string_length self._metadata = metadata or MetaData() # including view support if view_support = true self._metadata.reflect( views=view_support, bind=self._engine, only=list(self._usable_tables), schema=self._schema, ) @classmethod def from_uri( cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any ) -> SQLDatabase: """Construct a SQLAlchemy engine from URI.""" _engine_args = engine_args or {} return cls(create_engine(database_uri, **_engine_args), **kwargs) @classmethod def from_databricks( cls, catalog: str, schema: str, host: Optional[str] = None, api_token: Optional[str] = None, warehouse_id: Optional[str] = None, cluster_id: Optional[str] = None, engine_args: Optional[dict] = None, **kwargs: Any, ) -> SQLDatabase: """ Class method to create an SQLDatabase instance from a Databricks connection. This method requires the 'databricks-sql-connector' package. If not installed, it can be added using `pip install databricks-sql-connector`. Args: catalog (str): The catalog name in the Databricks database. schema (str): The schema name in the catalog. host (Optional[str]): The Databricks workspace hostname, excluding 'https://' part. If not provided, it attempts to fetch from the environment variable 'DATABRICKS_HOST'. If still unavailable and if running in a Databricks notebook, it defaults to the current workspace hostname. Defaults to None. api_token (Optional[str]): The Databricks personal access token for accessing the Databricks SQL warehouse or the cluster. If not provided, it attempts to fetch from 'DATABRICKS_TOKEN'. If still unavailable and running in a Databricks notebook, a temporary token for the current user is generated. Defaults to None. warehouse_id (Optional[str]): The warehouse ID in the Databricks SQL. If provided, the method configures the connection to use this warehouse. Cannot be used with 'cluster_id'. Defaults to None. cluster_id (Optional[str]): The cluster ID in the Databricks Runtime. If provided, the method configures the connection to use this cluster. Cannot be used with 'warehouse_id'. If running in a Databricks notebook and both 'warehouse_id' and 'cluster_id' are None, it uses the ID of the cluster the notebook is attached to. Defaults to None. engine_args (Optional[dict]): The arguments to be used when connecting Databricks. Defaults to None. **kwargs (Any): Additional keyword arguments for the `from_uri` method. Returns: SQLDatabase: An instance of SQLDatabase configured with the provided Databricks connection details. Raises: ValueError: If 'databricks-sql-connector' is not found, or if both 'warehouse_id' and 'cluster_id' are provided, or if neither 'warehouse_id' nor 'cluster_id' are provided and it's not executing inside a Databricks notebook. """ try: from databricks import sql # noqa: F401 except ImportError: raise ValueError( "databricks-sql-connector package not found, please install with" " `pip install databricks-sql-connector`" ) context = None try: from dbruntime.databricks_repl_context import get_context context = get_context() except ImportError: pass default_host = context.browserHostName if context else None if host is None: host = get_from_env("host", "DATABRICKS_HOST", default_host) default_api_token = context.apiToken if context else None if api_token is None: api_token = get_from_env("api_token", "DATABRICKS_TOKEN", default_api_token) if warehouse_id is None and cluster_id is None: if context: cluster_id = context.clusterId else: raise ValueError( "Need to provide either 'warehouse_id' or 'cluster_id'." ) if warehouse_id and cluster_id: raise ValueError("Can't have both 'warehouse_id' or 'cluster_id'.") if warehouse_id: http_path = f"/sql/1.0/warehouses/{warehouse_id}" else: http_path = f"/sql/protocolv1/o/0/{cluster_id}" uri = ( f"databricks://token:{api_token}@{host}?" f"http_path={http_path}&catalog={catalog}&schema={schema}" ) return cls.from_uri(database_uri=uri, engine_args=engine_args, **kwargs) @classmethod def from_cnosdb( cls, url: str = "127.0.0.1:8902", user: str = "root", password: str = "", tenant: str = "cnosdb", database: str = "public", ) -> SQLDatabase: """ Class method to create an SQLDatabase instance from a CnosDB connection. This method requires the 'cnos-connector' package. If not installed, it can be added using `pip install cnos-connector`. Args: url (str): The HTTP connection host name and port number of the CnosDB service, excluding "http://" or "https://", with a default value of "127.0.0.1:8902". user (str): The username used to connect to the CnosDB service, with a default value of "root". password (str): The password of the user connecting to the CnosDB service, with a default value of "". tenant (str): The name of the tenant used to connect to the CnosDB service, with a default value of "cnosdb". database (str): The name of the database in the CnosDB tenant. Returns: SQLDatabase: An instance of SQLDatabase configured with the provided CnosDB connection details. """ try: from cnosdb_connector import make_cnosdb_langchain_uri uri = make_cnosdb_langchain_uri(url, user, password, tenant, database) return cls.from_uri(database_uri=uri) except ImportError: raise ValueError( "cnos-connector package not found, please install with" " `pip install cnos-connector`" ) @property def dialect(self) -> str: """Return string representation of dialect to use.""" return self._engine.dialect.name def get_usable_table_names(self) -> Iterable[str]: """Get names of tables available.""" if self._include_tables: return sorted(self._include_tables) return sorted(self._all_tables - self._ignore_tables) def get_table_names(self) -> Iterable[str]: """Get names of tables available.""" warnings.warn( "This method is deprecated - please use `get_usable_table_names`." ) return self.get_usable_table_names() @property def table_info(self) -> str: """Information about all tables in the database.""" return self.get_table_info() def get_table_info(self, table_names: Optional[List[str]] = None) -> str: """Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If `sample_rows_in_table_info`, the specified number of sample rows will be appended to each table description. This can increase performance as demonstrated in the paper. """ all_table_names = self.get_usable_table_names() if table_names is not None: missing_tables = set(table_names).difference(all_table_names) if missing_tables: raise ValueError(f"table_names {missing_tables} not found in database") all_table_names = table_names meta_tables = [ tbl for tbl in self._metadata.sorted_tables if tbl.name in set(all_table_names) and not (self.dialect == "sqlite" and tbl.name.startswith("sqlite_")) ] tables = [] for table in meta_tables: if self._custom_table_info and table.name in self._custom_table_info: tables.append(self._custom_table_info[table.name]) continue # Ignore JSON datatyped columns for k, v in table.columns.items(): if type(v.type) is NullType: table._columns.remove(v) # add create table command create_table = str(CreateTable(table).compile(self._engine)) table_info = f"{create_table.rstrip()}" has_extra_info = ( self._indexes_in_table_info or self._sample_rows_in_table_info ) if has_extra_info: table_info += "\n\n/*" if self._indexes_in_table_info: table_info += f"\n{self._get_table_indexes(table)}\n" if self._sample_rows_in_table_info: table_info += f"\n{self._get_sample_rows(table)}\n" if has_extra_info: table_info += "*/" tables.append(table_info) tables.sort() final_str = "\n\n".join(tables) return final_str def _get_table_indexes(self, table: Table) -> str: indexes = self._inspector.get_indexes(table.name) indexes_formatted = "\n".join(map(_format_index, indexes)) return f"Table Indexes:\n{indexes_formatted}" def _get_sample_rows(self, table: Table) -> str: # build the select command command = select(table).limit(self._sample_rows_in_table_info) # save the columns in string format columns_str = "\t".join([col.name for col in table.columns]) try: # get the sample rows with self._engine.connect() as connection: sample_rows_result = connection.execute(command) # type: ignore # shorten values in the sample rows sample_rows = list( map(lambda ls: [str(i)[:100] for i in ls], sample_rows_result) ) # save the sample rows in string format sample_rows_str = "\n".join(["\t".join(row) for row in sample_rows]) # in some dialects when there are no rows in the table a # 'ProgrammingError' is returned except ProgrammingError: sample_rows_str = "" return ( f"{self._sample_rows_in_table_info} rows from {table.name} table:\n" f"{columns_str}\n" f"{sample_rows_str}" ) def _execute( self, command: str, fetch: Union[Literal["all"], Literal["one"]] = "all", ) -> Sequence[Dict[str, Any]]: """ Executes SQL command through underlying engine. If the statement returns no rows, an empty list is returned. """ with self._engine.begin() as connection: if self._schema is not None: if self.dialect == "snowflake": connection.exec_driver_sql( "ALTER SESSION SET search_path = %s", (self._schema,) ) elif self.dialect == "bigquery": connection.exec_driver_sql("SET @@dataset_id=?", (self._schema,)) elif self.dialect == "mssql": pass elif self.dialect == "trino": connection.exec_driver_sql("USE ?", (self._schema,)) elif self.dialect == "duckdb": # Unclear which parameterized argument syntax duckdb supports. # The docs for the duckdb client say they support multiple, # but `duckdb_engine` seemed to struggle with all of them: # https://github.com/Mause/duckdb_engine/issues/796 connection.exec_driver_sql(f"SET search_path TO {self._schema}") elif self.dialect == "oracle": connection.exec_driver_sql( f"ALTER SESSION SET CURRENT_SCHEMA = {self._schema}" ) elif self.dialect == "sqlany": # If anybody using Sybase SQL anywhere database then it should not # go to else condition. It should be same as mssql. pass else: # postgresql and other compatible dialects connection.exec_driver_sql("SET search_path TO %s", (self._schema,)) cursor = connection.execute(text(command)) if cursor.returns_rows: if fetch == "all": result = [x._asdict() for x in cursor.fetchall()] elif fetch == "one": first_result = cursor.fetchone() result = [] if first_result is None else [first_result._asdict()] else: raise ValueError("Fetch parameter must be either 'one' or 'all'") return result return [] def run( self, command: str, fetch: Union[Literal["all"], Literal["one"]] = "all", ) -> str: """Execute a SQL command and return a string representing the results. If the statement returns rows, a string of the results is returned. If the statement returns no rows, an empty string is returned. """ result = self._execute(command, fetch) # Convert columns values to string to avoid issues with sqlalchemy # truncating text res = [ tuple(truncate_word(c, length=self._max_string_length) for c in r.values()) for r in result ] if not res: return "" else: return str(res) def get_table_info_no_throw(self, table_names: Optional[List[str]] = None) -> str: """Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If `sample_rows_in_table_info`, the specified number of sample rows will be appended to each table description. This can increase performance as demonstrated in the paper. """ try: return self.get_table_info(table_names) except ValueError as e: """Format the error message""" return f"Error: {e}" def run_no_throw( self, command: str, fetch: Union[Literal["all"], Literal["one"]] = "all", ) -> str: """Execute a SQL command and return a string representing the results. If the statement returns rows, a string of the results is returned. If the statement returns no rows, an empty string is returned. If the statement throws an error, the error message is returned. """ try: return self.run(command, fetch) except SQLAlchemyError as e: """Format the error message""" return f"Error: {e}"
[]
2024-01-10
robocorp/langchain
libs~core~tests~unit_tests~runnables~test_runnable.py
import sys from functools import partial from operator import itemgetter from typing import ( Any, AsyncIterator, Awaitable, Callable, Dict, Iterator, List, Optional, Sequence, Union, cast, ) from uuid import UUID import pytest from freezegun import freeze_time from pytest_mock import MockerFixture from syrupy import SnapshotAssertion from typing_extensions import TypedDict from langchain_core.callbacks.manager import ( Callbacks, atrace_as_chain_group, trace_as_chain_group, ) from langchain_core.documents import Document from langchain_core.load import dumpd, dumps from langchain_core.messages import ( AIMessage, AIMessageChunk, HumanMessage, SystemMessage, ) from langchain_core.output_parsers import ( BaseOutputParser, CommaSeparatedListOutputParser, StrOutputParser, ) from langchain_core.prompt_values import ChatPromptValue, StringPromptValue from langchain_core.prompts import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, PromptTemplate, SystemMessagePromptTemplate, ) from langchain_core.pydantic_v1 import BaseModel from langchain_core.retrievers import BaseRetriever from langchain_core.runnables import ( AddableDict, ConfigurableField, ConfigurableFieldMultiOption, ConfigurableFieldSingleOption, RouterRunnable, Runnable, RunnableBinding, RunnableBranch, RunnableConfig, RunnableGenerator, RunnableLambda, RunnableParallel, RunnablePassthrough, RunnablePick, RunnableSequence, RunnableWithFallbacks, add, ) from langchain_core.tools import BaseTool, tool from langchain_core.tracers import ( BaseTracer, ConsoleCallbackHandler, Run, RunLog, RunLogPatch, ) from langchain_core.tracers.context import collect_runs from tests.unit_tests.fake.chat_model import FakeListChatModel from tests.unit_tests.fake.llm import FakeListLLM, FakeStreamingListLLM class FakeTracer(BaseTracer): """Fake tracer that records LangChain execution. It replaces run ids with deterministic UUIDs for snapshotting.""" def __init__(self) -> None: """Initialize the tracer.""" super().__init__() self.runs: List[Run] = [] self.uuids_map: Dict[UUID, UUID] = {} self.uuids_generator = ( UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000) ) def _replace_uuid(self, uuid: UUID) -> UUID: if uuid not in self.uuids_map: self.uuids_map[uuid] = next(self.uuids_generator) return self.uuids_map[uuid] def _copy_run(self, run: Run) -> Run: return run.copy( update={ "id": self._replace_uuid(run.id), "parent_run_id": self.uuids_map[run.parent_run_id] if run.parent_run_id else None, "child_runs": [self._copy_run(child) for child in run.child_runs], "execution_order": None, "child_execution_order": None, } ) def _persist_run(self, run: Run) -> None: """Persist a run.""" self.runs.append(self._copy_run(run)) class FakeRunnable(Runnable[str, int]): def invoke( self, input: str, config: Optional[RunnableConfig] = None, ) -> int: return len(input) class FakeRetriever(BaseRetriever): def _get_relevant_documents( self, query: str, *, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: return [Document(page_content="foo"), Document(page_content="bar")] async def _aget_relevant_documents( self, query: str, *, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: return [Document(page_content="foo"), Document(page_content="bar")] def test_schemas(snapshot: SnapshotAssertion) -> None: fake = FakeRunnable() # str -> int assert fake.input_schema.schema() == { "title": "FakeRunnableInput", "type": "string", } assert fake.output_schema.schema() == { "title": "FakeRunnableOutput", "type": "integer", } assert fake.config_schema(include=["tags", "metadata", "run_name"]).schema() == { "title": "FakeRunnableConfig", "type": "object", "properties": { "metadata": {"title": "Metadata", "type": "object"}, "run_name": {"title": "Run Name", "type": "string"}, "tags": {"items": {"type": "string"}, "title": "Tags", "type": "array"}, }, } fake_bound = FakeRunnable().bind(a="b") # str -> int assert fake_bound.input_schema.schema() == { "title": "FakeRunnableInput", "type": "string", } assert fake_bound.output_schema.schema() == { "title": "FakeRunnableOutput", "type": "integer", } fake_w_fallbacks = FakeRunnable().with_fallbacks((fake,)) # str -> int assert fake_w_fallbacks.input_schema.schema() == { "title": "FakeRunnableInput", "type": "string", } assert fake_w_fallbacks.output_schema.schema() == { "title": "FakeRunnableOutput", "type": "integer", } def typed_lambda_impl(x: str) -> int: return len(x) typed_lambda = RunnableLambda(typed_lambda_impl) # str -> int assert typed_lambda.input_schema.schema() == { "title": "typed_lambda_impl_input", "type": "string", } assert typed_lambda.output_schema.schema() == { "title": "typed_lambda_impl_output", "type": "integer", } async def typed_async_lambda_impl(x: str) -> int: return len(x) typed_async_lambda: Runnable = RunnableLambda(typed_async_lambda_impl) # str -> int assert typed_async_lambda.input_schema.schema() == { "title": "typed_async_lambda_impl_input", "type": "string", } assert typed_async_lambda.output_schema.schema() == { "title": "typed_async_lambda_impl_output", "type": "integer", } fake_ret = FakeRetriever() # str -> List[Document] assert fake_ret.input_schema.schema() == { "title": "FakeRetrieverInput", "type": "string", } assert fake_ret.output_schema.schema() == { "title": "FakeRetrieverOutput", "type": "array", "items": {"$ref": "#/definitions/Document"}, "definitions": { "Document": { "title": "Document", "description": "Class for storing a piece of text and associated metadata.", # noqa: E501 "type": "object", "properties": { "page_content": {"title": "Page Content", "type": "string"}, "metadata": {"title": "Metadata", "type": "object"}, "type": { "title": "Type", "enum": ["Document"], "default": "Document", "type": "string", }, }, "required": ["page_content"], } }, } fake_llm = FakeListLLM(responses=["a"]) # str -> List[List[str]] assert fake_llm.input_schema.schema() == snapshot assert fake_llm.output_schema.schema() == { "title": "FakeListLLMOutput", "type": "string", } fake_chat = FakeListChatModel(responses=["a"]) # str -> List[List[str]] assert fake_chat.input_schema.schema() == snapshot assert fake_chat.output_schema.schema() == snapshot chat_prompt = ChatPromptTemplate.from_messages( [ MessagesPlaceholder(variable_name="history"), ("human", "Hello, how are you?"), ] ) assert chat_prompt.input_schema.schema() == { "title": "PromptInput", "type": "object", "properties": { "history": { "title": "History", "type": "array", "items": { "anyOf": [ {"$ref": "#/definitions/AIMessage"}, {"$ref": "#/definitions/HumanMessage"}, {"$ref": "#/definitions/ChatMessage"}, {"$ref": "#/definitions/SystemMessage"}, {"$ref": "#/definitions/FunctionMessage"}, {"$ref": "#/definitions/ToolMessage"}, ] }, } }, "definitions": { "AIMessage": { "title": "AIMessage", "description": "A Message from an AI.", "type": "object", "properties": { "content": { "title": "Content", "anyOf": [ {"type": "string"}, { "type": "array", "items": { "anyOf": [{"type": "string"}, {"type": "object"}] }, }, ], }, "additional_kwargs": { "title": "Additional Kwargs", "type": "object", }, "type": { "title": "Type", "default": "ai", "enum": ["ai"], "type": "string", }, "example": { "title": "Example", "default": False, "type": "boolean", }, }, "required": ["content"], }, "HumanMessage": { "title": "HumanMessage", "description": "A Message from a human.", "type": "object", "properties": { "content": { "title": "Content", "anyOf": [ {"type": "string"}, { "type": "array", "items": { "anyOf": [{"type": "string"}, {"type": "object"}] }, }, ], }, "additional_kwargs": { "title": "Additional Kwargs", "type": "object", }, "type": { "title": "Type", "default": "human", "enum": ["human"], "type": "string", }, "example": { "title": "Example", "default": False, "type": "boolean", }, }, "required": ["content"], }, "ChatMessage": { "title": "ChatMessage", "description": "A Message that can be assigned an arbitrary speaker (i.e. role).", # noqa "type": "object", "properties": { "content": { "title": "Content", "anyOf": [ {"type": "string"}, { "type": "array", "items": { "anyOf": [{"type": "string"}, {"type": "object"}] }, }, ], }, "additional_kwargs": { "title": "Additional Kwargs", "type": "object", }, "type": { "title": "Type", "default": "chat", "enum": ["chat"], "type": "string", }, "role": {"title": "Role", "type": "string"}, }, "required": ["content", "role"], }, "SystemMessage": { "title": "SystemMessage", "description": "A Message for priming AI behavior, usually passed in as the first of a sequence\nof input messages.", # noqa "type": "object", "properties": { "content": { "title": "Content", "anyOf": [ {"type": "string"}, { "type": "array", "items": { "anyOf": [{"type": "string"}, {"type": "object"}] }, }, ], }, "additional_kwargs": { "title": "Additional Kwargs", "type": "object", }, "type": { "title": "Type", "default": "system", "enum": ["system"], "type": "string", }, }, "required": ["content"], }, "FunctionMessage": { "title": "FunctionMessage", "description": "A Message for passing the result of executing a function back to a model.", # noqa "type": "object", "properties": { "content": { "title": "Content", "anyOf": [ {"type": "string"}, { "type": "array", "items": { "anyOf": [{"type": "string"}, {"type": "object"}] }, }, ], }, "additional_kwargs": { "title": "Additional Kwargs", "type": "object", }, "type": { "title": "Type", "default": "function", "enum": ["function"], "type": "string", }, "name": {"title": "Name", "type": "string"}, }, "required": ["content", "name"], }, "ToolMessage": { "title": "ToolMessage", "description": "A Message for passing the result of executing a tool back to a model.", # noqa "type": "object", "properties": { "content": { "title": "Content", "anyOf": [ {"type": "string"}, { "type": "array", "items": { "anyOf": [{"type": "string"}, {"type": "object"}] }, }, ], }, "additional_kwargs": { "title": "Additional Kwargs", "type": "object", }, "type": { "title": "Type", "default": "tool", "enum": ["tool"], "type": "string", }, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, }, "required": ["content", "tool_call_id"], }, }, } assert chat_prompt.output_schema.schema() == snapshot prompt = PromptTemplate.from_template("Hello, {name}!") assert prompt.input_schema.schema() == { "title": "PromptInput", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}}, } assert prompt.output_schema.schema() == snapshot prompt_mapper = PromptTemplate.from_template("Hello, {name}!").map() assert prompt_mapper.input_schema.schema() == { "definitions": { "PromptInput": { "properties": {"name": {"title": "Name", "type": "string"}}, "title": "PromptInput", "type": "object", } }, "items": {"$ref": "#/definitions/PromptInput"}, "type": "array", "title": "RunnableEach<PromptTemplate>Input", } assert prompt_mapper.output_schema.schema() == snapshot list_parser = CommaSeparatedListOutputParser() assert list_parser.input_schema.schema() == snapshot assert list_parser.output_schema.schema() == { "title": "CommaSeparatedListOutputParserOutput", "type": "array", "items": {"type": "string"}, } seq = prompt | fake_llm | list_parser assert seq.input_schema.schema() == { "title": "PromptInput", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}}, } assert seq.output_schema.schema() == { "type": "array", "items": {"type": "string"}, "title": "CommaSeparatedListOutputParserOutput", } router: Runnable = RouterRunnable({}) assert router.input_schema.schema() == { "title": "RouterRunnableInput", "$ref": "#/definitions/RouterInput", "definitions": { "RouterInput": { "title": "RouterInput", "type": "object", "properties": { "key": {"title": "Key", "type": "string"}, "input": {"title": "Input"}, }, "required": ["key", "input"], } }, } assert router.output_schema.schema() == {"title": "RouterRunnableOutput"} seq_w_map: Runnable = ( prompt | fake_llm | { "original": RunnablePassthrough(input_type=str), "as_list": list_parser, "length": typed_lambda_impl, } ) assert seq_w_map.input_schema.schema() == { "title": "PromptInput", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}}, } assert seq_w_map.output_schema.schema() == { "title": "RunnableParallel<original,as_list,length>Output", "type": "object", "properties": { "original": {"title": "Original", "type": "string"}, "length": {"title": "Length", "type": "integer"}, "as_list": { "title": "As List", "type": "array", "items": {"type": "string"}, }, }, } def test_passthrough_assign_schema() -> None: retriever = FakeRetriever() # str -> List[Document] prompt = PromptTemplate.from_template("{context} {question}") fake_llm = FakeListLLM(responses=["a"]) # str -> List[List[str]] seq_w_assign: Runnable = ( RunnablePassthrough.assign(context=itemgetter("question") | retriever) | prompt | fake_llm ) assert seq_w_assign.input_schema.schema() == { "properties": {"question": {"title": "Question", "type": "string"}}, "title": "RunnableSequenceInput", "type": "object", } assert seq_w_assign.output_schema.schema() == { "title": "FakeListLLMOutput", "type": "string", } invalid_seq_w_assign: Runnable = ( RunnablePassthrough.assign(context=itemgetter("question") | retriever) | fake_llm ) # fallback to RunnableAssign.input_schema if next runnable doesn't have # expected dict input_schema assert invalid_seq_w_assign.input_schema.schema() == { "properties": {"question": {"title": "Question"}}, "title": "RunnableParallel<context>Input", "type": "object", } @pytest.mark.skipif( sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run." ) def test_lambda_schemas() -> None: first_lambda = lambda x: x["hello"] # noqa: E731 assert RunnableLambda(first_lambda).input_schema.schema() == { "title": "RunnableLambdaInput", "type": "object", "properties": {"hello": {"title": "Hello"}}, } second_lambda = lambda x, y: (x["hello"], x["bye"], y["bah"]) # noqa: E731 assert ( RunnableLambda(second_lambda).input_schema.schema() # type: ignore[arg-type] == { "title": "RunnableLambdaInput", "type": "object", "properties": {"hello": {"title": "Hello"}, "bye": {"title": "Bye"}}, } ) def get_value(input): # type: ignore[no-untyped-def] return input["variable_name"] assert RunnableLambda(get_value).input_schema.schema() == { "title": "get_value_input", "type": "object", "properties": {"variable_name": {"title": "Variable Name"}}, } async def aget_value(input): # type: ignore[no-untyped-def] return (input["variable_name"], input.get("another")) assert RunnableLambda(aget_value).input_schema.schema() == { "title": "aget_value_input", "type": "object", "properties": { "another": {"title": "Another"}, "variable_name": {"title": "Variable Name"}, }, } async def aget_values(input): # type: ignore[no-untyped-def] return { "hello": input["variable_name"], "bye": input["variable_name"], "byebye": input["yo"], } assert RunnableLambda(aget_values).input_schema.schema() == { "title": "aget_values_input", "type": "object", "properties": { "variable_name": {"title": "Variable Name"}, "yo": {"title": "Yo"}, }, } class InputType(TypedDict): variable_name: str yo: int class OutputType(TypedDict): hello: str bye: str byebye: int async def aget_values_typed(input: InputType) -> OutputType: return { "hello": input["variable_name"], "bye": input["variable_name"], "byebye": input["yo"], } assert ( RunnableLambda(aget_values_typed).input_schema.schema() # type: ignore[arg-type] == { "title": "aget_values_typed_input", "$ref": "#/definitions/InputType", "definitions": { "InputType": { "properties": { "variable_name": { "title": "Variable " "Name", "type": "string", }, "yo": {"title": "Yo", "type": "integer"}, }, "required": ["variable_name", "yo"], "title": "InputType", "type": "object", } }, } ) assert RunnableLambda(aget_values_typed).output_schema.schema() == { # type: ignore[arg-type] "title": "aget_values_typed_output", "$ref": "#/definitions/OutputType", "definitions": { "OutputType": { "properties": { "bye": {"title": "Bye", "type": "string"}, "byebye": {"title": "Byebye", "type": "integer"}, "hello": {"title": "Hello", "type": "string"}, }, "required": ["hello", "bye", "byebye"], "title": "OutputType", "type": "object", } }, } def test_with_types_with_type_generics() -> None: """Verify that with_types works if we use things like List[int]""" def foo(x: int) -> None: """Add one to the input.""" raise NotImplementedError() # Try specifying some RunnableLambda(foo).with_types( output_type=List[int], # type: ignore[arg-type] input_type=List[int], # type: ignore[arg-type] ) RunnableLambda(foo).with_types( output_type=Sequence[int], # type: ignore[arg-type] input_type=Sequence[int], # type: ignore[arg-type] ) def test_schema_complex_seq() -> None: prompt1 = ChatPromptTemplate.from_template("what is the city {person} is from?") prompt2 = ChatPromptTemplate.from_template( "what country is the city {city} in? respond in {language}" ) model = FakeListChatModel(responses=[""]) chain1: Runnable = RunnableSequence( prompt1, model, StrOutputParser(), name="city_chain" ) assert chain1.name == "city_chain" chain2: Runnable = ( {"city": chain1, "language": itemgetter("language")} | prompt2 | model | StrOutputParser() ) assert chain2.input_schema.schema() == { "title": "RunnableParallel<city,language>Input", "type": "object", "properties": { "person": {"title": "Person", "type": "string"}, "language": {"title": "Language"}, }, } assert chain2.output_schema.schema() == { "title": "StrOutputParserOutput", "type": "string", } assert chain2.with_types(input_type=str).input_schema.schema() == { "title": "RunnableSequenceInput", "type": "string", } assert chain2.with_types(input_type=int).output_schema.schema() == { "title": "StrOutputParserOutput", "type": "string", } class InputType(BaseModel): person: str assert chain2.with_types(input_type=InputType).input_schema.schema() == { "title": "InputType", "type": "object", "properties": {"person": {"title": "Person", "type": "string"}}, "required": ["person"], } def test_configurable_fields() -> None: fake_llm = FakeListLLM(responses=["a"]) # str -> List[List[str]] assert fake_llm.invoke("...") == "a" fake_llm_configurable = fake_llm.configurable_fields( responses=ConfigurableField( id="llm_responses", name="LLM Responses", description="A list of fake responses for this LLM", ) ) assert fake_llm_configurable.invoke("...") == "a" assert fake_llm_configurable.config_schema().schema() == { "title": "RunnableConfigurableFieldsConfig", "type": "object", "properties": {"configurable": {"$ref": "#/definitions/Configurable"}}, "definitions": { "Configurable": { "title": "Configurable", "type": "object", "properties": { "llm_responses": { "title": "LLM Responses", "description": "A list of fake responses for this LLM", "default": ["a"], "type": "array", "items": {"type": "string"}, } }, } }, } fake_llm_configured = fake_llm_configurable.with_config( configurable={"llm_responses": ["b"]} ) assert fake_llm_configured.invoke("...") == "b" prompt = PromptTemplate.from_template("Hello, {name}!") assert prompt.invoke({"name": "John"}) == StringPromptValue(text="Hello, John!") prompt_configurable = prompt.configurable_fields( template=ConfigurableField( id="prompt_template", name="Prompt Template", description="The prompt template for this chain", ) ) assert prompt_configurable.invoke({"name": "John"}) == StringPromptValue( text="Hello, John!" ) assert prompt_configurable.config_schema().schema() == { "title": "RunnableConfigurableFieldsConfig", "type": "object", "properties": {"configurable": {"$ref": "#/definitions/Configurable"}}, "definitions": { "Configurable": { "title": "Configurable", "type": "object", "properties": { "prompt_template": { "title": "Prompt Template", "description": "The prompt template for this chain", "default": "Hello, {name}!", "type": "string", } }, } }, } prompt_configured = prompt_configurable.with_config( configurable={"prompt_template": "Hello, {name}! {name}!"} ) assert prompt_configured.invoke({"name": "John"}) == StringPromptValue( text="Hello, John! John!" ) assert prompt_configurable.with_config( configurable={"prompt_template": "Hello {name} in {lang}"} ).input_schema.schema() == { "title": "PromptInput", "type": "object", "properties": { "lang": {"title": "Lang", "type": "string"}, "name": {"title": "Name", "type": "string"}, }, } chain_configurable = prompt_configurable | fake_llm_configurable | StrOutputParser() assert chain_configurable.invoke({"name": "John"}) == "a" assert chain_configurable.config_schema().schema() == { "title": "RunnableSequenceConfig", "type": "object", "properties": {"configurable": {"$ref": "#/definitions/Configurable"}}, "definitions": { "Configurable": { "title": "Configurable", "type": "object", "properties": { "llm_responses": { "title": "LLM Responses", "description": "A list of fake responses for this LLM", "default": ["a"], "type": "array", "items": {"type": "string"}, }, "prompt_template": { "title": "Prompt Template", "description": "The prompt template for this chain", "default": "Hello, {name}!", "type": "string", }, }, } }, } assert ( chain_configurable.with_config( configurable={ "prompt_template": "A very good morning to you, {name} {lang}!", "llm_responses": ["c"], } ).invoke({"name": "John", "lang": "en"}) == "c" ) assert chain_configurable.with_config( configurable={ "prompt_template": "A very good morning to you, {name} {lang}!", "llm_responses": ["c"], } ).input_schema.schema() == { "title": "PromptInput", "type": "object", "properties": { "lang": {"title": "Lang", "type": "string"}, "name": {"title": "Name", "type": "string"}, }, } chain_with_map_configurable: Runnable = prompt_configurable | { "llm1": fake_llm_configurable | StrOutputParser(), "llm2": fake_llm_configurable | StrOutputParser(), "llm3": fake_llm.configurable_fields( responses=ConfigurableField("other_responses") ) | StrOutputParser(), } assert chain_with_map_configurable.invoke({"name": "John"}) == { "llm1": "a", "llm2": "a", "llm3": "a", } assert chain_with_map_configurable.config_schema().schema() == { "title": "RunnableSequenceConfig", "type": "object", "properties": {"configurable": {"$ref": "#/definitions/Configurable"}}, "definitions": { "Configurable": { "title": "Configurable", "type": "object", "properties": { "llm_responses": { "title": "LLM Responses", "description": "A list of fake responses for this LLM", "default": ["a"], "type": "array", "items": {"type": "string"}, }, "other_responses": { "title": "Other Responses", "default": ["a"], "type": "array", "items": {"type": "string"}, }, "prompt_template": { "title": "Prompt Template", "description": "The prompt template for this chain", "default": "Hello, {name}!", "type": "string", }, }, } }, } assert chain_with_map_configurable.with_config( configurable={ "prompt_template": "A very good morning to you, {name}!", "llm_responses": ["c"], "other_responses": ["d"], } ).invoke({"name": "John"}) == {"llm1": "c", "llm2": "c", "llm3": "d"} def test_configurable_alts_factory() -> None: fake_llm = FakeListLLM(responses=["a"]).configurable_alternatives( ConfigurableField(id="llm", name="LLM"), chat=partial(FakeListLLM, responses=["b"]), ) assert fake_llm.invoke("...") == "a" assert fake_llm.with_config(configurable={"llm": "chat"}).invoke("...") == "b" def test_configurable_fields_prefix_keys() -> None: fake_chat = FakeListChatModel(responses=["b"]).configurable_fields( responses=ConfigurableFieldMultiOption( id="responses", name="Chat Responses", options={ "hello": "A good morning to you!", "bye": "See you later!", "helpful": "How can I help you?", }, default=["hello", "bye"], ), # (sleep is a configurable field in FakeListChatModel) sleep=ConfigurableField( id="chat_sleep", is_shared=True, ), ) fake_llm = ( FakeListLLM(responses=["a"]) .configurable_fields( responses=ConfigurableField( id="responses", name="LLM Responses", description="A list of fake responses for this LLM", ) ) .configurable_alternatives( ConfigurableField(id="llm", name="LLM"), chat=fake_chat | StrOutputParser(), prefix_keys=True, ) ) prompt = PromptTemplate.from_template("Hello, {name}!").configurable_fields( template=ConfigurableFieldSingleOption( id="prompt_template", name="Prompt Template", description="The prompt template for this chain", options={ "hello": "Hello, {name}!", "good_morning": "A very good morning to you, {name}!", }, default="hello", ) ) chain = prompt | fake_llm assert chain.config_schema().schema() == { "title": "RunnableSequenceConfig", "type": "object", "properties": {"configurable": {"$ref": "#/definitions/Configurable"}}, "definitions": { "LLM": { "title": "LLM", "description": "An enumeration.", "enum": ["chat", "default"], "type": "string", }, "Chat_Responses": { "title": "Chat Responses", "description": "An enumeration.", "enum": ["hello", "bye", "helpful"], "type": "string", }, "Prompt_Template": { "title": "Prompt Template", "description": "An enumeration.", "enum": ["hello", "good_morning"], "type": "string", }, "Configurable": { "title": "Configurable", "type": "object", "properties": { "prompt_template": { "title": "Prompt Template", "description": "The prompt template for this chain", "default": "hello", "allOf": [{"$ref": "#/definitions/Prompt_Template"}], }, "llm": { "title": "LLM", "default": "default", "allOf": [{"$ref": "#/definitions/LLM"}], }, # not prefixed because marked as shared "chat_sleep": { "title": "Chat Sleep", "type": "number", }, # prefixed for "chat" option "llm==chat/responses": { "title": "Chat Responses", "default": ["hello", "bye"], "type": "array", "items": {"$ref": "#/definitions/Chat_Responses"}, }, # prefixed for "default" option "llm==default/responses": { "title": "LLM Responses", "description": "A list of fake responses for this LLM", "default": ["a"], "type": "array", "items": {"type": "string"}, }, }, }, }, } def test_configurable_fields_example() -> None: fake_chat = FakeListChatModel(responses=["b"]).configurable_fields( responses=ConfigurableFieldMultiOption( id="chat_responses", name="Chat Responses", options={ "hello": "A good morning to you!", "bye": "See you later!", "helpful": "How can I help you?", }, default=["hello", "bye"], ) ) fake_llm = ( FakeListLLM(responses=["a"]) .configurable_fields( responses=ConfigurableField( id="llm_responses", name="LLM Responses", description="A list of fake responses for this LLM", ) ) .configurable_alternatives( ConfigurableField(id="llm", name="LLM"), chat=fake_chat | StrOutputParser(), ) ) prompt = PromptTemplate.from_template("Hello, {name}!").configurable_fields( template=ConfigurableFieldSingleOption( id="prompt_template", name="Prompt Template", description="The prompt template for this chain", options={ "hello": "Hello, {name}!", "good_morning": "A very good morning to you, {name}!", }, default="hello", ) ) # deduplication of configurable fields chain_configurable = prompt | fake_llm | (lambda x: {"name": x}) | prompt | fake_llm assert chain_configurable.invoke({"name": "John"}) == "a" assert chain_configurable.config_schema().schema() == { "title": "RunnableSequenceConfig", "type": "object", "properties": {"configurable": {"$ref": "#/definitions/Configurable"}}, "definitions": { "LLM": { "title": "LLM", "description": "An enumeration.", "enum": ["chat", "default"], "type": "string", }, "Chat_Responses": { "description": "An enumeration.", "enum": ["hello", "bye", "helpful"], "title": "Chat Responses", "type": "string", }, "Prompt_Template": { "description": "An enumeration.", "enum": ["hello", "good_morning"], "title": "Prompt Template", "type": "string", }, "Configurable": { "title": "Configurable", "type": "object", "properties": { "chat_responses": { "default": ["hello", "bye"], "items": {"$ref": "#/definitions/Chat_Responses"}, "title": "Chat Responses", "type": "array", }, "llm": { "title": "LLM", "default": "default", "allOf": [{"$ref": "#/definitions/LLM"}], }, "llm_responses": { "title": "LLM Responses", "description": "A list of fake responses for this LLM", "default": ["a"], "type": "array", "items": {"type": "string"}, }, "prompt_template": { "title": "Prompt Template", "description": "The prompt template for this chain", "default": "hello", "allOf": [{"$ref": "#/definitions/Prompt_Template"}], }, }, }, }, } with pytest.raises(ValueError): chain_configurable.with_config(configurable={"llm123": "chat"}) assert ( chain_configurable.with_config(configurable={"llm": "chat"}).invoke( {"name": "John"} ) == "A good morning to you!" ) assert ( chain_configurable.with_config( configurable={"llm": "chat", "chat_responses": ["helpful"]} ).invoke({"name": "John"}) == "How can I help you?" ) async def test_passthrough_tap_async(mocker: MockerFixture) -> None: fake = FakeRunnable() mock = mocker.Mock() seq: Runnable = fake | RunnablePassthrough(mock) assert await seq.ainvoke("hello") == 5 assert mock.call_args_list == [mocker.call(5)] mock.reset_mock() assert [ part async for part in seq.astream("hello", dict(metadata={"key": "value"})) ] == [5] assert mock.call_args_list == [mocker.call(5)] mock.reset_mock() assert seq.invoke("hello") == 5 assert mock.call_args_list == [mocker.call(5)] mock.reset_mock() assert [part for part in seq.stream("hello", dict(metadata={"key": "value"}))] == [ 5 ] assert mock.call_args_list == [mocker.call(5)] mock.reset_mock() async def test_with_config(mocker: MockerFixture) -> None: fake = FakeRunnable() spy = mocker.spy(fake, "invoke") assert fake.with_config(tags=["a-tag"]).invoke("hello") == 5 assert spy.call_args_list == [ mocker.call("hello", dict(tags=["a-tag"])), ] spy.reset_mock() fake_1: Runnable = RunnablePassthrough() fake_2: Runnable = RunnablePassthrough() spy_seq_step = mocker.spy(fake_1.__class__, "invoke") sequence = fake_1.with_config(tags=["a-tag"]) | fake_2.with_config( tags=["b-tag"], max_concurrency=5 ) assert sequence.invoke("hello") == "hello" assert len(spy_seq_step.call_args_list) == 2 for i, call in enumerate(spy_seq_step.call_args_list): assert call.args[1] == "hello" if i == 0: assert call.args[2].get("tags") == ["a-tag"] assert call.args[2].get("max_concurrency") is None else: assert call.args[2].get("tags") == ["b-tag"] assert call.args[2].get("max_concurrency") == 5 mocker.stop(spy_seq_step) assert [ *fake.with_config(tags=["a-tag"]).stream( "hello", dict(metadata={"key": "value"}) ) ] == [5] assert spy.call_args_list == [ mocker.call("hello", dict(tags=["a-tag"], metadata={"key": "value"})), ] spy.reset_mock() assert fake.with_config(recursion_limit=5).batch( ["hello", "wooorld"], [dict(tags=["a-tag"]), dict(metadata={"key": "value"})] ) == [5, 7] assert len(spy.call_args_list) == 2 for i, call in enumerate( sorted(spy.call_args_list, key=lambda x: 0 if x.args[0] == "hello" else 1) ): assert call.args[0] == ("hello" if i == 0 else "wooorld") if i == 0: assert call.args[1].get("recursion_limit") == 5 assert call.args[1].get("tags") == ["a-tag"] assert call.args[1].get("metadata") == {} else: assert call.args[1].get("recursion_limit") == 5 assert call.args[1].get("tags") == [] assert call.args[1].get("metadata") == {"key": "value"} spy.reset_mock() assert fake.with_config(metadata={"a": "b"}).batch( ["hello", "wooorld"], dict(tags=["a-tag"]) ) == [5, 7] assert len(spy.call_args_list) == 2 for i, call in enumerate(spy.call_args_list): assert call.args[0] == ("hello" if i == 0 else "wooorld") assert call.args[1].get("tags") == ["a-tag"] assert call.args[1].get("metadata") == {"a": "b"} spy.reset_mock() handler = ConsoleCallbackHandler() assert ( await fake.with_config(metadata={"a": "b"}).ainvoke( "hello", config={"callbacks": [handler]} ) == 5 ) assert spy.call_args_list == [ mocker.call("hello", dict(callbacks=[handler], metadata={"a": "b"})), ] spy.reset_mock() assert [ part async for part in fake.with_config(metadata={"a": "b"}).astream("hello") ] == [5] assert spy.call_args_list == [ mocker.call("hello", dict(metadata={"a": "b"})), ] spy.reset_mock() assert await fake.with_config(recursion_limit=5, tags=["c"]).abatch( ["hello", "wooorld"], dict(metadata={"key": "value"}) ) == [ 5, 7, ] assert spy.call_args_list == [ mocker.call( "hello", dict( metadata={"key": "value"}, tags=["c"], callbacks=None, recursion_limit=5, ), ), mocker.call( "wooorld", dict( metadata={"key": "value"}, tags=["c"], callbacks=None, recursion_limit=5, ), ), ] async def test_default_method_implementations(mocker: MockerFixture) -> None: fake = FakeRunnable() spy = mocker.spy(fake, "invoke") assert fake.invoke("hello", dict(tags=["a-tag"])) == 5 assert spy.call_args_list == [ mocker.call("hello", dict(tags=["a-tag"])), ] spy.reset_mock() assert [*fake.stream("hello", dict(metadata={"key": "value"}))] == [5] assert spy.call_args_list == [ mocker.call("hello", dict(metadata={"key": "value"})), ] spy.reset_mock() assert fake.batch( ["hello", "wooorld"], [dict(tags=["a-tag"]), dict(metadata={"key": "value"})] ) == [5, 7] assert len(spy.call_args_list) == 2 for i, call in enumerate(spy.call_args_list): assert call.args[0] == ("hello" if i == 0 else "wooorld") if i == 0: assert call.args[1].get("tags") == ["a-tag"] assert call.args[1].get("metadata") == {} else: assert call.args[1].get("tags") == [] assert call.args[1].get("metadata") == {"key": "value"} spy.reset_mock() assert fake.batch(["hello", "wooorld"], dict(tags=["a-tag"])) == [5, 7] assert len(spy.call_args_list) == 2 for i, call in enumerate(spy.call_args_list): assert call.args[0] == ("hello" if i == 0 else "wooorld") assert call.args[1].get("tags") == ["a-tag"] assert call.args[1].get("metadata") == {} spy.reset_mock() assert await fake.ainvoke("hello", config={"callbacks": []}) == 5 assert spy.call_args_list == [ mocker.call("hello", dict(callbacks=[])), ] spy.reset_mock() assert [part async for part in fake.astream("hello")] == [5] assert spy.call_args_list == [ mocker.call("hello", None), ] spy.reset_mock() assert await fake.abatch(["hello", "wooorld"], dict(metadata={"key": "value"})) == [ 5, 7, ] assert spy.call_args_list == [ mocker.call( "hello", dict( metadata={"key": "value"}, tags=[], callbacks=None, recursion_limit=25, ), ), mocker.call( "wooorld", dict( metadata={"key": "value"}, tags=[], callbacks=None, recursion_limit=25, ), ), ] async def test_prompt() -> None: prompt = ChatPromptTemplate.from_messages( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessagePromptTemplate.from_template("{question}"), ] ) expected = ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) assert prompt.invoke({"question": "What is your name?"}) == expected assert prompt.batch( [ {"question": "What is your name?"}, {"question": "What is your favorite color?"}, ] ) == [ expected, ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your favorite color?"), ] ), ] assert [*prompt.stream({"question": "What is your name?"})] == [expected] assert await prompt.ainvoke({"question": "What is your name?"}) == expected assert await prompt.abatch( [ {"question": "What is your name?"}, {"question": "What is your favorite color?"}, ] ) == [ expected, ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your favorite color?"), ] ), ] assert [ part async for part in prompt.astream({"question": "What is your name?"}) ] == [expected] stream_log = [ part async for part in prompt.astream_log({"question": "What is your name?"}) ] assert len(stream_log[0].ops) == 1 assert stream_log[0].ops[0]["op"] == "replace" assert stream_log[0].ops[0]["path"] == "" assert stream_log[0].ops[0]["value"]["logs"] == {} assert stream_log[0].ops[0]["value"]["final_output"] is None assert stream_log[0].ops[0]["value"]["streamed_output"] == [] assert isinstance(stream_log[0].ops[0]["value"]["id"], str) assert stream_log[1:] == [ RunLogPatch( {"op": "add", "path": "/streamed_output/-", "value": expected}, { "op": "replace", "path": "/final_output", "value": ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ), }, ), ] stream_log_state = [ part async for part in prompt.astream_log( {"question": "What is your name?"}, diff=False ) ] # remove random id stream_log[0].ops[0]["value"]["id"] = "00000000-0000-0000-0000-000000000000" stream_log_state[-1].ops[0]["value"]["id"] = "00000000-0000-0000-0000-000000000000" stream_log_state[-1].state["id"] = "00000000-0000-0000-0000-000000000000" # assert output with diff=False matches output with diff=True assert stream_log_state[-1].ops == [op for chunk in stream_log for op in chunk.ops] assert stream_log_state[-1] == RunLog( *[op for chunk in stream_log for op in chunk.ops], state={ "final_output": ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ), "id": "00000000-0000-0000-0000-000000000000", "logs": {}, "streamed_output": [ ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) ], }, ) # nested inside trace_with_chain_group async with atrace_as_chain_group("a_group") as manager: stream_log_nested = [ part async for part in prompt.astream_log( {"question": "What is your name?"}, config={"callbacks": manager} ) ] assert len(stream_log_nested[0].ops) == 1 assert stream_log_nested[0].ops[0]["op"] == "replace" assert stream_log_nested[0].ops[0]["path"] == "" assert stream_log_nested[0].ops[0]["value"]["logs"] == {} assert stream_log_nested[0].ops[0]["value"]["final_output"] is None assert stream_log_nested[0].ops[0]["value"]["streamed_output"] == [] assert isinstance(stream_log_nested[0].ops[0]["value"]["id"], str) assert stream_log_nested[1:] == [ RunLogPatch( {"op": "add", "path": "/streamed_output/-", "value": expected}, { "op": "replace", "path": "/final_output", "value": ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ), }, ), ] def test_prompt_template_params() -> None: prompt = ChatPromptTemplate.from_template( "Respond to the following question: {question}" ) result = prompt.invoke( { "question": "test", "topic": "test", } ) assert result == ChatPromptValue( messages=[HumanMessage(content="Respond to the following question: test")] ) with pytest.raises(KeyError): prompt.invoke({}) def test_with_listeners(mocker: MockerFixture) -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) chat = FakeListChatModel(responses=["foo"]) chain: Runnable = prompt | chat mock_start = mocker.Mock() mock_end = mocker.Mock() chain.with_listeners(on_start=mock_start, on_end=mock_end).invoke( {"question": "Who are you?"} ) assert mock_start.call_count == 1 assert mock_start.call_args[0][0].name == "RunnableSequence" assert mock_end.call_count == 1 mock_start.reset_mock() mock_end.reset_mock() with trace_as_chain_group("hello") as manager: chain.with_listeners(on_start=mock_start, on_end=mock_end).invoke( {"question": "Who are you?"}, {"callbacks": manager} ) assert mock_start.call_count == 1 assert mock_start.call_args[0][0].name == "RunnableSequence" assert mock_end.call_count == 1 async def test_with_listeners_async(mocker: MockerFixture) -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) chat = FakeListChatModel(responses=["foo"]) chain: Runnable = prompt | chat mock_start = mocker.Mock() mock_end = mocker.Mock() await chain.with_listeners(on_start=mock_start, on_end=mock_end).ainvoke( {"question": "Who are you?"} ) assert mock_start.call_count == 1 assert mock_start.call_args[0][0].name == "RunnableSequence" assert mock_end.call_count == 1 mock_start.reset_mock() mock_end.reset_mock() async with atrace_as_chain_group("hello") as manager: await chain.with_listeners(on_start=mock_start, on_end=mock_end).ainvoke( {"question": "Who are you?"}, {"callbacks": manager} ) assert mock_start.call_count == 1 assert mock_start.call_args[0][0].name == "RunnableSequence" assert mock_end.call_count == 1 @freeze_time("2023-01-01") def test_prompt_with_chat_model( mocker: MockerFixture, snapshot: SnapshotAssertion ) -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) chat = FakeListChatModel(responses=["foo"]) chain: Runnable = prompt | chat assert repr(chain) == snapshot assert isinstance(chain, RunnableSequence) assert chain.first == prompt assert chain.middle == [] assert chain.last == chat assert dumps(chain, pretty=True) == snapshot # Test invoke prompt_spy = mocker.spy(prompt.__class__, "invoke") chat_spy = mocker.spy(chat.__class__, "invoke") tracer = FakeTracer() assert chain.invoke( {"question": "What is your name?"}, dict(callbacks=[tracer]) ) == AIMessage(content="foo") assert prompt_spy.call_args.args[1] == {"question": "What is your name?"} assert chat_spy.call_args.args[1] == ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) assert tracer.runs == snapshot mocker.stop(prompt_spy) mocker.stop(chat_spy) # Test batch prompt_spy = mocker.spy(prompt.__class__, "batch") chat_spy = mocker.spy(chat.__class__, "batch") tracer = FakeTracer() assert chain.batch( [ {"question": "What is your name?"}, {"question": "What is your favorite color?"}, ], dict(callbacks=[tracer]), ) == [ AIMessage(content="foo"), AIMessage(content="foo"), ] assert prompt_spy.call_args.args[1] == [ {"question": "What is your name?"}, {"question": "What is your favorite color?"}, ] assert chat_spy.call_args.args[1] == [ ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ), ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your favorite color?"), ] ), ] assert ( len( [ r for r in tracer.runs if r.parent_run_id is None and len(r.child_runs) == 2 ] ) == 2 ), "Each of 2 outer runs contains exactly two inner runs (1 prompt, 1 chat)" mocker.stop(prompt_spy) mocker.stop(chat_spy) # Test stream prompt_spy = mocker.spy(prompt.__class__, "invoke") chat_spy = mocker.spy(chat.__class__, "stream") tracer = FakeTracer() assert [ *chain.stream({"question": "What is your name?"}, dict(callbacks=[tracer])) ] == [ AIMessageChunk(content="f"), AIMessageChunk(content="o"), AIMessageChunk(content="o"), ] assert prompt_spy.call_args.args[1] == {"question": "What is your name?"} assert chat_spy.call_args.args[1] == ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) @freeze_time("2023-01-01") async def test_prompt_with_chat_model_async( mocker: MockerFixture, snapshot: SnapshotAssertion ) -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) chat = FakeListChatModel(responses=["foo"]) chain: Runnable = prompt | chat assert repr(chain) == snapshot assert isinstance(chain, RunnableSequence) assert chain.first == prompt assert chain.middle == [] assert chain.last == chat assert dumps(chain, pretty=True) == snapshot # Test invoke prompt_spy = mocker.spy(prompt.__class__, "ainvoke") chat_spy = mocker.spy(chat.__class__, "ainvoke") tracer = FakeTracer() assert await chain.ainvoke( {"question": "What is your name?"}, dict(callbacks=[tracer]) ) == AIMessage(content="foo") assert prompt_spy.call_args.args[1] == {"question": "What is your name?"} assert chat_spy.call_args.args[1] == ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) assert tracer.runs == snapshot mocker.stop(prompt_spy) mocker.stop(chat_spy) # Test batch prompt_spy = mocker.spy(prompt.__class__, "abatch") chat_spy = mocker.spy(chat.__class__, "abatch") tracer = FakeTracer() assert await chain.abatch( [ {"question": "What is your name?"}, {"question": "What is your favorite color?"}, ], dict(callbacks=[tracer]), ) == [ AIMessage(content="foo"), AIMessage(content="foo"), ] assert prompt_spy.call_args.args[1] == [ {"question": "What is your name?"}, {"question": "What is your favorite color?"}, ] assert chat_spy.call_args.args[1] == [ ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ), ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your favorite color?"), ] ), ] assert ( len( [ r for r in tracer.runs if r.parent_run_id is None and len(r.child_runs) == 2 ] ) == 2 ), "Each of 2 outer runs contains exactly two inner runs (1 prompt, 1 chat)" mocker.stop(prompt_spy) mocker.stop(chat_spy) # Test stream prompt_spy = mocker.spy(prompt.__class__, "ainvoke") chat_spy = mocker.spy(chat.__class__, "astream") tracer = FakeTracer() assert [ a async for a in chain.astream( {"question": "What is your name?"}, dict(callbacks=[tracer]) ) ] == [ AIMessageChunk(content="f"), AIMessageChunk(content="o"), AIMessageChunk(content="o"), ] assert prompt_spy.call_args.args[1] == {"question": "What is your name?"} assert chat_spy.call_args.args[1] == ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) @freeze_time("2023-01-01") async def test_prompt_with_llm( mocker: MockerFixture, snapshot: SnapshotAssertion ) -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) llm = FakeListLLM(responses=["foo", "bar"]) chain: Runnable = prompt | llm assert isinstance(chain, RunnableSequence) assert chain.first == prompt assert chain.middle == [] assert chain.last == llm assert dumps(chain, pretty=True) == snapshot # Test invoke prompt_spy = mocker.spy(prompt.__class__, "ainvoke") llm_spy = mocker.spy(llm.__class__, "ainvoke") tracer = FakeTracer() assert ( await chain.ainvoke( {"question": "What is your name?"}, dict(callbacks=[tracer]) ) == "foo" ) assert prompt_spy.call_args.args[1] == {"question": "What is your name?"} assert llm_spy.call_args.args[1] == ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) assert tracer.runs == snapshot mocker.stop(prompt_spy) mocker.stop(llm_spy) # Test batch prompt_spy = mocker.spy(prompt.__class__, "abatch") llm_spy = mocker.spy(llm.__class__, "abatch") tracer = FakeTracer() assert await chain.abatch( [ {"question": "What is your name?"}, {"question": "What is your favorite color?"}, ], dict(callbacks=[tracer]), ) == ["bar", "foo"] assert prompt_spy.call_args.args[1] == [ {"question": "What is your name?"}, {"question": "What is your favorite color?"}, ] assert llm_spy.call_args.args[1] == [ ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ), ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your favorite color?"), ] ), ] assert tracer.runs == snapshot mocker.stop(prompt_spy) mocker.stop(llm_spy) # Test stream prompt_spy = mocker.spy(prompt.__class__, "ainvoke") llm_spy = mocker.spy(llm.__class__, "astream") tracer = FakeTracer() assert [ token async for token in chain.astream( {"question": "What is your name?"}, dict(callbacks=[tracer]) ) ] == ["bar"] assert prompt_spy.call_args.args[1] == {"question": "What is your name?"} assert llm_spy.call_args.args[1] == ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) prompt_spy.reset_mock() llm_spy.reset_mock() stream_log = [ part async for part in chain.astream_log({"question": "What is your name?"}) ] # remove ids from logs for part in stream_log: for op in part.ops: if ( isinstance(op["value"], dict) and "id" in op["value"] and not isinstance(op["value"]["id"], list) # serialized lc id ): del op["value"]["id"] expected = [ RunLogPatch( { "op": "replace", "path": "", "value": { "logs": {}, "final_output": None, "streamed_output": [], }, } ), RunLogPatch( { "op": "add", "path": "/logs/ChatPromptTemplate", "value": { "end_time": None, "final_output": None, "metadata": {}, "name": "ChatPromptTemplate", "start_time": "2023-01-01T00:00:00.000", "streamed_output": [], "streamed_output_str": [], "tags": ["seq:step:1"], "type": "prompt", }, } ), RunLogPatch( { "op": "add", "path": "/logs/ChatPromptTemplate/final_output", "value": ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ), }, { "op": "add", "path": "/logs/ChatPromptTemplate/end_time", "value": "2023-01-01T00:00:00.000", }, ), RunLogPatch( { "op": "add", "path": "/logs/FakeListLLM", "value": { "end_time": None, "final_output": None, "metadata": {}, "name": "FakeListLLM", "start_time": "2023-01-01T00:00:00.000", "streamed_output": [], "streamed_output_str": [], "tags": ["seq:step:2"], "type": "llm", }, } ), RunLogPatch( { "op": "add", "path": "/logs/FakeListLLM/final_output", "value": { "generations": [ [{"generation_info": None, "text": "foo", "type": "Generation"}] ], "llm_output": None, "run": None, }, }, { "op": "add", "path": "/logs/FakeListLLM/end_time", "value": "2023-01-01T00:00:00.000", }, ), RunLogPatch( {"op": "add", "path": "/streamed_output/-", "value": "foo"}, {"op": "replace", "path": "/final_output", "value": "foo"}, ), ] assert stream_log == expected @freeze_time("2023-01-01") async def test_stream_log_retriever() -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{documents}" + "{question}" ) llm = FakeListLLM(responses=["foo", "bar"]) chain: Runnable = ( {"documents": FakeRetriever(), "question": itemgetter("question")} | prompt | {"one": llm, "two": llm} ) stream_log = [ part async for part in chain.astream_log({"question": "What is your name?"}) ] # remove ids from logs for part in stream_log: for op in part.ops: if ( isinstance(op["value"], dict) and "id" in op["value"] and not isinstance(op["value"]["id"], list) # serialized lc id ): del op["value"]["id"] assert sorted(cast(RunLog, add(stream_log)).state["logs"]) == [ "ChatPromptTemplate", "FakeListLLM", "FakeListLLM:2", "Retriever", "RunnableLambda", "RunnableParallel<documents,question>", "RunnableParallel<one,two>", ] @freeze_time("2023-01-01") async def test_stream_log_lists() -> None: async def list_producer(input: AsyncIterator[Any]) -> AsyncIterator[AddableDict]: for i in range(4): yield AddableDict(alist=[str(i)]) chain: Runnable = RunnableGenerator(list_producer) stream_log = [ part async for part in chain.astream_log({"question": "What is your name?"}) ] # remove ids from logs for part in stream_log: for op in part.ops: if ( isinstance(op["value"], dict) and "id" in op["value"] and not isinstance(op["value"]["id"], list) # serialized lc id ): del op["value"]["id"] assert stream_log == [ RunLogPatch( { "op": "replace", "path": "", "value": {"final_output": None, "logs": {}, "streamed_output": []}, } ), RunLogPatch( {"op": "add", "path": "/streamed_output/-", "value": {"alist": ["0"]}}, {"op": "replace", "path": "/final_output", "value": {"alist": ["0"]}}, ), RunLogPatch( {"op": "add", "path": "/streamed_output/-", "value": {"alist": ["1"]}}, {"op": "add", "path": "/final_output/alist/1", "value": "1"}, ), RunLogPatch( {"op": "add", "path": "/streamed_output/-", "value": {"alist": ["2"]}}, {"op": "add", "path": "/final_output/alist/2", "value": "2"}, ), RunLogPatch( {"op": "add", "path": "/streamed_output/-", "value": {"alist": ["3"]}}, {"op": "add", "path": "/final_output/alist/3", "value": "3"}, ), ] state = add(stream_log) assert isinstance(state, RunLog) assert state.state == { "final_output": {"alist": ["0", "1", "2", "3"]}, "logs": {}, "streamed_output": [ {"alist": ["0"]}, {"alist": ["1"]}, {"alist": ["2"]}, {"alist": ["3"]}, ], } @freeze_time("2023-01-01") async def test_prompt_with_llm_and_async_lambda( mocker: MockerFixture, snapshot: SnapshotAssertion ) -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) llm = FakeListLLM(responses=["foo", "bar"]) async def passthrough(input: Any) -> Any: return input chain = prompt | llm | passthrough assert isinstance(chain, RunnableSequence) assert chain.first == prompt assert chain.middle == [llm] assert chain.last == RunnableLambda(func=passthrough) assert dumps(chain, pretty=True) == snapshot # Test invoke prompt_spy = mocker.spy(prompt.__class__, "ainvoke") llm_spy = mocker.spy(llm.__class__, "ainvoke") tracer = FakeTracer() assert ( await chain.ainvoke( {"question": "What is your name?"}, dict(callbacks=[tracer]) ) == "foo" ) assert prompt_spy.call_args.args[1] == {"question": "What is your name?"} assert llm_spy.call_args.args[1] == ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) assert tracer.runs == snapshot mocker.stop(prompt_spy) mocker.stop(llm_spy) @freeze_time("2023-01-01") def test_prompt_with_chat_model_and_parser( mocker: MockerFixture, snapshot: SnapshotAssertion ) -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) chat = FakeListChatModel(responses=["foo, bar"]) parser = CommaSeparatedListOutputParser() chain = prompt | chat | parser assert isinstance(chain, RunnableSequence) assert chain.first == prompt assert chain.middle == [chat] assert chain.last == parser assert dumps(chain, pretty=True) == snapshot # Test invoke prompt_spy = mocker.spy(prompt.__class__, "invoke") chat_spy = mocker.spy(chat.__class__, "invoke") parser_spy = mocker.spy(parser.__class__, "invoke") tracer = FakeTracer() assert chain.invoke( {"question": "What is your name?"}, dict(callbacks=[tracer]) ) == ["foo", "bar"] assert prompt_spy.call_args.args[1] == {"question": "What is your name?"} assert chat_spy.call_args.args[1] == ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) assert parser_spy.call_args.args[1] == AIMessage(content="foo, bar") assert tracer.runs == snapshot @freeze_time("2023-01-01") def test_combining_sequences( mocker: MockerFixture, snapshot: SnapshotAssertion ) -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) chat = FakeListChatModel(responses=["foo, bar"]) parser = CommaSeparatedListOutputParser() chain = prompt | chat | parser assert isinstance(chain, RunnableSequence) assert chain.first == prompt assert chain.middle == [chat] assert chain.last == parser if sys.version_info >= (3, 9): assert dumps(chain, pretty=True) == snapshot prompt2 = ( SystemMessagePromptTemplate.from_template("You are a nicer assistant.") + "{question}" ) chat2 = FakeListChatModel(responses=["baz, qux"]) parser2 = CommaSeparatedListOutputParser() input_formatter: RunnableLambda[List[str], Dict[str, Any]] = RunnableLambda( lambda x: {"question": x[0] + x[1]} ) chain2 = cast(RunnableSequence, input_formatter | prompt2 | chat2 | parser2) assert isinstance(chain, RunnableSequence) assert chain2.first == input_formatter assert chain2.middle == [prompt2, chat2] assert chain2.last == parser2 if sys.version_info >= (3, 9): assert dumps(chain2, pretty=True) == snapshot combined_chain = cast(RunnableSequence, chain | chain2) assert combined_chain.first == prompt assert combined_chain.middle == [ chat, parser, input_formatter, prompt2, chat2, ] assert combined_chain.last == parser2 if sys.version_info >= (3, 9): assert dumps(combined_chain, pretty=True) == snapshot # Test invoke tracer = FakeTracer() assert combined_chain.invoke( {"question": "What is your name?"}, dict(callbacks=[tracer]) ) == ["baz", "qux"] if sys.version_info >= (3, 9): assert tracer.runs == snapshot @freeze_time("2023-01-01") def test_seq_dict_prompt_llm( mocker: MockerFixture, snapshot: SnapshotAssertion ) -> None: passthrough = mocker.Mock(side_effect=lambda x: x) retriever = FakeRetriever() prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + """Context: {documents} Question: {question}""" ) chat = FakeListChatModel(responses=["foo, bar"]) parser = CommaSeparatedListOutputParser() chain: Runnable = ( { "question": RunnablePassthrough[str]() | passthrough, "documents": passthrough | retriever, "just_to_test_lambda": passthrough, } | prompt | chat | parser ) assert repr(chain) == snapshot assert isinstance(chain, RunnableSequence) assert isinstance(chain.first, RunnableParallel) assert chain.middle == [prompt, chat] assert chain.last == parser assert dumps(chain, pretty=True) == snapshot # Test invoke prompt_spy = mocker.spy(prompt.__class__, "invoke") chat_spy = mocker.spy(chat.__class__, "invoke") parser_spy = mocker.spy(parser.__class__, "invoke") tracer = FakeTracer() assert chain.invoke("What is your name?", dict(callbacks=[tracer])) == [ "foo", "bar", ] assert prompt_spy.call_args.args[1] == { "documents": [Document(page_content="foo"), Document(page_content="bar")], "question": "What is your name?", "just_to_test_lambda": "What is your name?", } assert chat_spy.call_args.args[1] == ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage( content="""Context: [Document(page_content='foo'), Document(page_content='bar')] Question: What is your name?""" ), ] ) assert parser_spy.call_args.args[1] == AIMessage(content="foo, bar") assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1 parent_run = next(r for r in tracer.runs if r.parent_run_id is None) assert len(parent_run.child_runs) == 4 map_run = parent_run.child_runs[0] assert map_run.name == "RunnableParallel<question,documents,just_to_test_lambda>" assert len(map_run.child_runs) == 3 @freeze_time("2023-01-01") def test_seq_prompt_dict(mocker: MockerFixture, snapshot: SnapshotAssertion) -> None: passthrough = mocker.Mock(side_effect=lambda x: x) prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) chat = FakeListChatModel(responses=["i'm a chatbot"]) llm = FakeListLLM(responses=["i'm a textbot"]) chain = ( prompt | passthrough | { "chat": chat, "llm": llm, } ) assert repr(chain) == snapshot assert isinstance(chain, RunnableSequence) assert chain.first == prompt assert chain.middle == [RunnableLambda(passthrough)] assert isinstance(chain.last, RunnableParallel) assert dumps(chain, pretty=True) == snapshot # Test invoke prompt_spy = mocker.spy(prompt.__class__, "invoke") chat_spy = mocker.spy(chat.__class__, "invoke") llm_spy = mocker.spy(llm.__class__, "invoke") tracer = FakeTracer() assert chain.invoke( {"question": "What is your name?"}, dict(callbacks=[tracer]) ) == { "chat": AIMessage(content="i'm a chatbot"), "llm": "i'm a textbot", } assert prompt_spy.call_args.args[1] == {"question": "What is your name?"} assert chat_spy.call_args.args[1] == ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) assert llm_spy.call_args.args[1] == ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1 parent_run = next(r for r in tracer.runs if r.parent_run_id is None) assert len(parent_run.child_runs) == 3 map_run = parent_run.child_runs[2] assert map_run.name == "RunnableParallel<chat,llm>" assert len(map_run.child_runs) == 2 @freeze_time("2023-01-01") async def test_router_runnable( mocker: MockerFixture, snapshot: SnapshotAssertion ) -> None: chain1: Runnable = ChatPromptTemplate.from_template( "You are a math genius. Answer the question: {question}" ) | FakeListLLM(responses=["4"]) chain2: Runnable = ChatPromptTemplate.from_template( "You are an english major. Answer the question: {question}" ) | FakeListLLM(responses=["2"]) router: Runnable = RouterRunnable({"math": chain1, "english": chain2}) chain: Runnable = { "key": lambda x: x["key"], "input": {"question": lambda x: x["question"]}, } | router assert dumps(chain, pretty=True) == snapshot result = chain.invoke({"key": "math", "question": "2 + 2"}) assert result == "4" result2 = chain.batch( [{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}] ) assert result2 == ["4", "2"] result = await chain.ainvoke({"key": "math", "question": "2 + 2"}) assert result == "4" result2 = await chain.abatch( [{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}] ) assert result2 == ["4", "2"] # Test invoke router_spy = mocker.spy(router.__class__, "invoke") tracer = FakeTracer() assert ( chain.invoke({"key": "math", "question": "2 + 2"}, dict(callbacks=[tracer])) == "4" ) assert router_spy.call_args.args[1] == { "key": "math", "input": {"question": "2 + 2"}, } assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1 parent_run = next(r for r in tracer.runs if r.parent_run_id is None) assert len(parent_run.child_runs) == 2 router_run = parent_run.child_runs[1] assert router_run.name == "RunnableSequence" # TODO: should be RunnableRouter assert len(router_run.child_runs) == 2 @freeze_time("2023-01-01") async def test_higher_order_lambda_runnable( mocker: MockerFixture, snapshot: SnapshotAssertion ) -> None: math_chain: Runnable = ChatPromptTemplate.from_template( "You are a math genius. Answer the question: {question}" ) | FakeListLLM(responses=["4"]) english_chain: Runnable = ChatPromptTemplate.from_template( "You are an english major. Answer the question: {question}" ) | FakeListLLM(responses=["2"]) input_map: Runnable = RunnableParallel( key=lambda x: x["key"], input={"question": lambda x: x["question"]}, ) def router(input: Dict[str, Any]) -> Runnable: if input["key"] == "math": return itemgetter("input") | math_chain elif input["key"] == "english": return itemgetter("input") | english_chain else: raise ValueError(f"Unknown key: {input['key']}") chain: Runnable = input_map | router if sys.version_info >= (3, 9): assert dumps(chain, pretty=True) == snapshot result = chain.invoke({"key": "math", "question": "2 + 2"}) assert result == "4" result2 = chain.batch( [{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}] ) assert result2 == ["4", "2"] result = await chain.ainvoke({"key": "math", "question": "2 + 2"}) assert result == "4" result2 = await chain.abatch( [{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}] ) assert result2 == ["4", "2"] # Test invoke math_spy = mocker.spy(math_chain.__class__, "invoke") tracer = FakeTracer() assert ( chain.invoke({"key": "math", "question": "2 + 2"}, dict(callbacks=[tracer])) == "4" ) assert math_spy.call_args.args[1] == { "key": "math", "input": {"question": "2 + 2"}, } assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1 parent_run = next(r for r in tracer.runs if r.parent_run_id is None) assert len(parent_run.child_runs) == 2 router_run = parent_run.child_runs[1] assert router_run.name == "router" assert len(router_run.child_runs) == 1 math_run = router_run.child_runs[0] assert math_run.name == "RunnableSequence" assert len(math_run.child_runs) == 3 # Test ainvoke async def arouter(input: Dict[str, Any]) -> Runnable: if input["key"] == "math": return itemgetter("input") | math_chain elif input["key"] == "english": return itemgetter("input") | english_chain else: raise ValueError(f"Unknown key: {input['key']}") achain: Runnable = input_map | arouter math_spy = mocker.spy(math_chain.__class__, "ainvoke") tracer = FakeTracer() assert ( await achain.ainvoke( {"key": "math", "question": "2 + 2"}, dict(callbacks=[tracer]) ) == "4" ) assert math_spy.call_args.args[1] == { "key": "math", "input": {"question": "2 + 2"}, } assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1 parent_run = next(r for r in tracer.runs if r.parent_run_id is None) assert len(parent_run.child_runs) == 2 router_run = parent_run.child_runs[1] assert router_run.name == "arouter" assert len(router_run.child_runs) == 1 math_run = router_run.child_runs[0] assert math_run.name == "RunnableSequence" assert len(math_run.child_runs) == 3 @freeze_time("2023-01-01") def test_seq_prompt_map(mocker: MockerFixture, snapshot: SnapshotAssertion) -> None: passthrough = mocker.Mock(side_effect=lambda x: x) prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) chat = FakeListChatModel(responses=["i'm a chatbot"]) llm = FakeListLLM(responses=["i'm a textbot"]) chain = ( prompt | passthrough | { "chat": chat.bind(stop=["Thought:"]), "llm": llm, "passthrough": passthrough, } ) assert isinstance(chain, RunnableSequence) assert chain.first == prompt assert chain.middle == [RunnableLambda(passthrough)] assert isinstance(chain.last, RunnableParallel) assert dumps(chain, pretty=True) == snapshot # Test invoke prompt_spy = mocker.spy(prompt.__class__, "invoke") chat_spy = mocker.spy(chat.__class__, "invoke") llm_spy = mocker.spy(llm.__class__, "invoke") tracer = FakeTracer() assert chain.invoke( {"question": "What is your name?"}, dict(callbacks=[tracer]) ) == { "chat": AIMessage(content="i'm a chatbot"), "llm": "i'm a textbot", "passthrough": ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ), } assert prompt_spy.call_args.args[1] == {"question": "What is your name?"} assert chat_spy.call_args.args[1] == ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) assert llm_spy.call_args.args[1] == ChatPromptValue( messages=[ SystemMessage(content="You are a nice assistant."), HumanMessage(content="What is your name?"), ] ) assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1 parent_run = next(r for r in tracer.runs if r.parent_run_id is None) assert len(parent_run.child_runs) == 3 map_run = parent_run.child_runs[2] assert map_run.name == "RunnableParallel<chat,llm,passthrough>" assert len(map_run.child_runs) == 3 def test_map_stream() -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) chat_res = "i'm a chatbot" # sleep to better simulate a real stream chat = FakeListChatModel(responses=[chat_res], sleep=0.01) llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) chain: Runnable = prompt | { "chat": chat.bind(stop=["Thought:"]), "llm": llm, "passthrough": RunnablePassthrough(), } stream = chain.stream({"question": "What is your name?"}) final_value = None streamed_chunks = [] for chunk in stream: streamed_chunks.append(chunk) if final_value is None: final_value = chunk else: final_value += chunk assert streamed_chunks[0] in [ {"passthrough": prompt.invoke({"question": "What is your name?"})}, {"llm": "i"}, {"chat": AIMessageChunk(content="i")}, ] assert len(streamed_chunks) == len(chat_res) + len(llm_res) + 1 assert all(len(c.keys()) == 1 for c in streamed_chunks) assert final_value is not None assert final_value.get("chat").content == "i'm a chatbot" assert final_value.get("llm") == "i'm a textbot" assert final_value.get("passthrough") == prompt.invoke( {"question": "What is your name?"} ) chain_pick_one = chain.pick("llm") assert chain_pick_one.output_schema.schema() == { "title": "RunnableSequenceOutput", "type": "string", } stream = chain_pick_one.stream({"question": "What is your name?"}) final_value = None streamed_chunks = [] for chunk in stream: streamed_chunks.append(chunk) if final_value is None: final_value = chunk else: final_value += chunk assert streamed_chunks[0] == "i" assert len(streamed_chunks) == len(llm_res) chain_pick_two = chain.assign(hello=RunnablePick("llm").pipe(llm)).pick( ["llm", "hello"] ) assert chain_pick_two.output_schema.schema() == { "title": "RunnableSequenceOutput", "type": "object", "properties": { "hello": {"title": "Hello", "type": "string"}, "llm": {"title": "Llm", "type": "string"}, }, } stream = chain_pick_two.stream({"question": "What is your name?"}) final_value = None streamed_chunks = [] for chunk in stream: streamed_chunks.append(chunk) if final_value is None: final_value = chunk else: final_value += chunk assert streamed_chunks[0] in [ {"llm": "i"}, {"chat": AIMessageChunk(content="i")}, ] assert len(streamed_chunks) == len(llm_res) + len(chat_res) def test_map_stream_iterator_input() -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) chat_res = "i'm a chatbot" # sleep to better simulate a real stream chat = FakeListChatModel(responses=[chat_res], sleep=0.01) llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) chain: Runnable = ( prompt | llm | { "chat": chat.bind(stop=["Thought:"]), "llm": llm, "passthrough": RunnablePassthrough(), } ) stream = chain.stream({"question": "What is your name?"}) final_value = None streamed_chunks = [] for chunk in stream: streamed_chunks.append(chunk) if final_value is None: final_value = chunk else: final_value += chunk assert streamed_chunks[0] in [ {"passthrough": "i"}, {"llm": "i"}, {"chat": AIMessageChunk(content="i")}, ] assert len(streamed_chunks) == len(chat_res) + len(llm_res) + len(llm_res) assert all(len(c.keys()) == 1 for c in streamed_chunks) assert final_value is not None assert final_value.get("chat").content == "i'm a chatbot" assert final_value.get("llm") == "i'm a textbot" assert final_value.get("passthrough") == "i'm a textbot" async def test_map_astream() -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) chat_res = "i'm a chatbot" # sleep to better simulate a real stream chat = FakeListChatModel(responses=[chat_res], sleep=0.01) llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) chain: Runnable = prompt | { "chat": chat.bind(stop=["Thought:"]), "llm": llm, "passthrough": RunnablePassthrough(), } stream = chain.astream({"question": "What is your name?"}) final_value = None streamed_chunks = [] async for chunk in stream: streamed_chunks.append(chunk) if final_value is None: final_value = chunk else: final_value += chunk assert streamed_chunks[0] in [ {"passthrough": prompt.invoke({"question": "What is your name?"})}, {"llm": "i"}, {"chat": AIMessageChunk(content="i")}, ] assert len(streamed_chunks) == len(chat_res) + len(llm_res) + 1 assert all(len(c.keys()) == 1 for c in streamed_chunks) assert final_value is not None assert final_value.get("chat").content == "i'm a chatbot" assert final_value.get("llm") == "i'm a textbot" assert final_value.get("passthrough") == prompt.invoke( {"question": "What is your name?"} ) # Test astream_log state accumulation final_state = None streamed_ops = [] async for chunk in chain.astream_log({"question": "What is your name?"}): streamed_ops.extend(chunk.ops) if final_state is None: final_state = chunk else: final_state += chunk final_state = cast(RunLog, final_state) assert final_state.state["final_output"] == final_value assert len(final_state.state["streamed_output"]) == len(streamed_chunks) assert isinstance(final_state.state["id"], str) assert len(final_state.ops) == len(streamed_ops) assert len(final_state.state["logs"]) == 5 assert ( final_state.state["logs"]["ChatPromptTemplate"]["name"] == "ChatPromptTemplate" ) assert final_state.state["logs"]["ChatPromptTemplate"][ "final_output" ] == prompt.invoke({"question": "What is your name?"}) assert ( final_state.state["logs"]["RunnableParallel<chat,llm,passthrough>"]["name"] == "RunnableParallel<chat,llm,passthrough>" ) assert sorted(final_state.state["logs"]) == [ "ChatPromptTemplate", "FakeListChatModel", "FakeStreamingListLLM", "RunnableParallel<chat,llm,passthrough>", "RunnablePassthrough", ] # Test astream_log with include filters final_state = None async for chunk in chain.astream_log( {"question": "What is your name?"}, include_names=["FakeListChatModel"] ): if final_state is None: final_state = chunk else: final_state += chunk final_state = cast(RunLog, final_state) assert final_state.state["final_output"] == final_value assert len(final_state.state["streamed_output"]) == len(streamed_chunks) assert len(final_state.state["logs"]) == 1 assert final_state.state["logs"]["FakeListChatModel"]["name"] == "FakeListChatModel" # Test astream_log with exclude filters final_state = None async for chunk in chain.astream_log( {"question": "What is your name?"}, exclude_names=["FakeListChatModel"] ): if final_state is None: final_state = chunk else: final_state += chunk final_state = cast(RunLog, final_state) assert final_state.state["final_output"] == final_value assert len(final_state.state["streamed_output"]) == len(streamed_chunks) assert len(final_state.state["logs"]) == 4 assert ( final_state.state["logs"]["ChatPromptTemplate"]["name"] == "ChatPromptTemplate" ) assert final_state.state["logs"]["ChatPromptTemplate"]["final_output"] == ( prompt.invoke({"question": "What is your name?"}) ) assert ( final_state.state["logs"]["RunnableParallel<chat,llm,passthrough>"]["name"] == "RunnableParallel<chat,llm,passthrough>" ) assert sorted(final_state.state["logs"]) == [ "ChatPromptTemplate", "FakeStreamingListLLM", "RunnableParallel<chat,llm,passthrough>", "RunnablePassthrough", ] async def test_map_astream_iterator_input() -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) chat_res = "i'm a chatbot" # sleep to better simulate a real stream chat = FakeListChatModel(responses=[chat_res], sleep=0.01) llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) chain: Runnable = ( prompt | llm | { "chat": chat.bind(stop=["Thought:"]), "llm": llm, "passthrough": RunnablePassthrough(), } ) stream = chain.astream({"question": "What is your name?"}) final_value = None streamed_chunks = [] async for chunk in stream: streamed_chunks.append(chunk) if final_value is None: final_value = chunk else: final_value += chunk assert streamed_chunks[0] in [ {"passthrough": "i"}, {"llm": "i"}, {"chat": AIMessageChunk(content="i")}, ] assert len(streamed_chunks) == len(chat_res) + len(llm_res) + len(llm_res) assert all(len(c.keys()) == 1 for c in streamed_chunks) assert final_value is not None assert final_value.get("chat").content == "i'm a chatbot" assert final_value.get("llm") == "i'm a textbot" assert final_value.get("passthrough") == llm_res def test_with_config_with_config() -> None: llm = FakeListLLM(responses=["i'm a textbot"]) assert dumpd( llm.with_config({"metadata": {"a": "b"}}).with_config(tags=["a-tag"]) ) == dumpd(llm.with_config({"metadata": {"a": "b"}, "tags": ["a-tag"]})) def test_metadata_is_merged() -> None: """Test metadata and tags defined in with_config and at are merged/concatend.""" foo = RunnableLambda(lambda x: x).with_config({"metadata": {"my_key": "my_value"}}) expected_metadata = { "my_key": "my_value", "my_other_key": "my_other_value", } with collect_runs() as cb: foo.invoke("hi", {"metadata": {"my_other_key": "my_other_value"}}) run = cb.traced_runs[0] assert run.extra is not None assert run.extra["metadata"] == expected_metadata def test_tags_are_appended() -> None: """Test tags from with_config are concatenated with those in invocation.""" foo = RunnableLambda(lambda x: x).with_config({"tags": ["my_key"]}) with collect_runs() as cb: foo.invoke("hi", {"tags": ["invoked_key"]}) run = cb.traced_runs[0] assert isinstance(run.tags, list) assert sorted(run.tags) == sorted(["my_key", "invoked_key"]) def test_bind_bind() -> None: llm = FakeListLLM(responses=["i'm a textbot"]) assert dumpd( llm.bind(stop=["Thought:"], one="two").bind( stop=["Observation:"], hello="world" ) ) == dumpd(llm.bind(stop=["Observation:"], one="two", hello="world")) def test_deep_stream() -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) llm = FakeStreamingListLLM(responses=["foo-lish"]) chain = prompt | llm | StrOutputParser() stream = chain.stream({"question": "What up"}) chunks = [] for chunk in stream: chunks.append(chunk) assert len(chunks) == len("foo-lish") assert "".join(chunks) == "foo-lish" chunks = [] for chunk in (chain | RunnablePassthrough()).stream({"question": "What up"}): chunks.append(chunk) assert len(chunks) == len("foo-lish") assert "".join(chunks) == "foo-lish" def test_deep_stream_assign() -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) llm = FakeStreamingListLLM(responses=["foo-lish"]) chain: Runnable = prompt | llm | {"str": StrOutputParser()} stream = chain.stream({"question": "What up"}) chunks = [] for chunk in stream: chunks.append(chunk) assert len(chunks) == len("foo-lish") assert add(chunks) == {"str": "foo-lish"} chain_with_assign = chain.assign(hello=itemgetter("str") | llm) assert chain_with_assign.input_schema.schema() == { "title": "PromptInput", "type": "object", "properties": {"question": {"title": "Question", "type": "string"}}, } assert chain_with_assign.output_schema.schema() == { "title": "RunnableSequenceOutput", "type": "object", "properties": { "str": {"title": "Str", "type": "string"}, "hello": {"title": "Hello", "type": "string"}, }, } chunks = [] for chunk in chain_with_assign.stream({"question": "What up"}): chunks.append(chunk) assert len(chunks) == len("foo-lish") * 2 assert chunks == [ # first stream passthrough input chunks {"str": "f"}, {"str": "o"}, {"str": "o"}, {"str": "-"}, {"str": "l"}, {"str": "i"}, {"str": "s"}, {"str": "h"}, # then stream assign output chunks {"hello": "f"}, {"hello": "o"}, {"hello": "o"}, {"hello": "-"}, {"hello": "l"}, {"hello": "i"}, {"hello": "s"}, {"hello": "h"}, ] assert add(chunks) == {"str": "foo-lish", "hello": "foo-lish"} assert chain_with_assign.invoke({"question": "What up"}) == { "str": "foo-lish", "hello": "foo-lish", } chain_with_assign_shadow = chain.assign( str=lambda _: "shadow", hello=itemgetter("str") | llm, ) assert chain_with_assign_shadow.input_schema.schema() == { "title": "PromptInput", "type": "object", "properties": {"question": {"title": "Question", "type": "string"}}, } assert chain_with_assign_shadow.output_schema.schema() == { "title": "RunnableSequenceOutput", "type": "object", "properties": { "str": {"title": "Str"}, "hello": {"title": "Hello", "type": "string"}, }, } chunks = [] for chunk in chain_with_assign_shadow.stream({"question": "What up"}): chunks.append(chunk) assert len(chunks) == len("foo-lish") + 1 assert add(chunks) == {"str": "shadow", "hello": "foo-lish"} assert chain_with_assign_shadow.invoke({"question": "What up"}) == { "str": "shadow", "hello": "foo-lish", } async def test_deep_astream() -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) llm = FakeStreamingListLLM(responses=["foo-lish"]) chain = prompt | llm | StrOutputParser() stream = chain.astream({"question": "What up"}) chunks = [] async for chunk in stream: chunks.append(chunk) assert len(chunks) == len("foo-lish") assert "".join(chunks) == "foo-lish" chunks = [] async for chunk in (chain | RunnablePassthrough()).astream({"question": "What up"}): chunks.append(chunk) assert len(chunks) == len("foo-lish") assert "".join(chunks) == "foo-lish" async def test_deep_astream_assign() -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) llm = FakeStreamingListLLM(responses=["foo-lish"]) chain: Runnable = prompt | llm | {"str": StrOutputParser()} stream = chain.astream({"question": "What up"}) chunks = [] async for chunk in stream: chunks.append(chunk) assert len(chunks) == len("foo-lish") assert add(chunks) == {"str": "foo-lish"} chain_with_assign = chain.assign( hello=itemgetter("str") | llm, ) assert chain_with_assign.input_schema.schema() == { "title": "PromptInput", "type": "object", "properties": {"question": {"title": "Question", "type": "string"}}, } assert chain_with_assign.output_schema.schema() == { "title": "RunnableSequenceOutput", "type": "object", "properties": { "str": {"title": "Str", "type": "string"}, "hello": {"title": "Hello", "type": "string"}, }, } chunks = [] async for chunk in chain_with_assign.astream({"question": "What up"}): chunks.append(chunk) assert len(chunks) == len("foo-lish") * 2 assert chunks == [ # first stream passthrough input chunks {"str": "f"}, {"str": "o"}, {"str": "o"}, {"str": "-"}, {"str": "l"}, {"str": "i"}, {"str": "s"}, {"str": "h"}, # then stream assign output chunks {"hello": "f"}, {"hello": "o"}, {"hello": "o"}, {"hello": "-"}, {"hello": "l"}, {"hello": "i"}, {"hello": "s"}, {"hello": "h"}, ] assert add(chunks) == {"str": "foo-lish", "hello": "foo-lish"} assert await chain_with_assign.ainvoke({"question": "What up"}) == { "str": "foo-lish", "hello": "foo-lish", } chain_with_assign_shadow = chain | RunnablePassthrough.assign( str=lambda _: "shadow", hello=itemgetter("str") | llm, ) assert chain_with_assign_shadow.input_schema.schema() == { "title": "PromptInput", "type": "object", "properties": {"question": {"title": "Question", "type": "string"}}, } assert chain_with_assign_shadow.output_schema.schema() == { "title": "RunnableSequenceOutput", "type": "object", "properties": { "str": {"title": "Str"}, "hello": {"title": "Hello", "type": "string"}, }, } chunks = [] async for chunk in chain_with_assign_shadow.astream({"question": "What up"}): chunks.append(chunk) assert len(chunks) == len("foo-lish") + 1 assert add(chunks) == {"str": "shadow", "hello": "foo-lish"} assert await chain_with_assign_shadow.ainvoke({"question": "What up"}) == { "str": "shadow", "hello": "foo-lish", } def test_runnable_sequence_transform() -> None: llm = FakeStreamingListLLM(responses=["foo-lish"]) chain: Runnable = llm | StrOutputParser() stream = chain.transform(llm.stream("Hi there!")) chunks = [] for chunk in stream: chunks.append(chunk) assert len(chunks) == len("foo-lish") assert "".join(chunks) == "foo-lish" async def test_runnable_sequence_atransform() -> None: llm = FakeStreamingListLLM(responses=["foo-lish"]) chain: Runnable = llm | StrOutputParser() stream = chain.atransform(llm.astream("Hi there!")) chunks = [] async for chunk in stream: chunks.append(chunk) assert len(chunks) == len("foo-lish") assert "".join(chunks) == "foo-lish" @pytest.fixture() def llm_with_fallbacks() -> RunnableWithFallbacks: error_llm = FakeListLLM(responses=["foo"], i=1) pass_llm = FakeListLLM(responses=["bar"]) return error_llm.with_fallbacks([pass_llm]) @pytest.fixture() def llm_with_multi_fallbacks() -> RunnableWithFallbacks: error_llm = FakeListLLM(responses=["foo"], i=1) error_llm_2 = FakeListLLM(responses=["baz"], i=1) pass_llm = FakeListLLM(responses=["bar"]) return error_llm.with_fallbacks([error_llm_2, pass_llm]) @pytest.fixture() def llm_chain_with_fallbacks() -> Runnable: error_llm = FakeListLLM(responses=["foo"], i=1) pass_llm = FakeListLLM(responses=["bar"]) prompt = PromptTemplate.from_template("what did baz say to {buz}") return RunnableParallel({"buz": lambda x: x}) | (prompt | error_llm).with_fallbacks( [prompt | pass_llm] ) @pytest.mark.parametrize( "runnable", ["llm_with_fallbacks", "llm_with_multi_fallbacks", "llm_chain_with_fallbacks"], ) async def test_llm_with_fallbacks( runnable: RunnableWithFallbacks, request: Any, snapshot: SnapshotAssertion ) -> None: runnable = request.getfixturevalue(runnable) assert runnable.invoke("hello") == "bar" assert runnable.batch(["hi", "hey", "bye"]) == ["bar"] * 3 assert list(runnable.stream("hello")) == ["bar"] assert await runnable.ainvoke("hello") == "bar" assert await runnable.abatch(["hi", "hey", "bye"]) == ["bar"] * 3 assert list(await runnable.ainvoke("hello")) == list("bar") if sys.version_info >= (3, 9): assert dumps(runnable, pretty=True) == snapshot class FakeSplitIntoListParser(BaseOutputParser[List[str]]): """Parse the output of an LLM call to a comma-separated list.""" @classmethod def is_lc_serializable(cls) -> bool: """Return whether or not the class is serializable.""" return True def get_format_instructions(self) -> str: return ( "Your response should be a list of comma separated values, " "eg: `foo, bar, baz`" ) def parse(self, text: str) -> List[str]: """Parse the output of an LLM call.""" return text.strip().split(", ") def test_each_simple() -> None: """Test that each() works with a simple runnable.""" parser = FakeSplitIntoListParser() assert parser.invoke("first item, second item") == ["first item", "second item"] assert parser.map().invoke(["a, b", "c"]) == [["a", "b"], ["c"]] assert parser.map().map().invoke([["a, b", "c"], ["c, e"]]) == [ [["a", "b"], ["c"]], [["c", "e"]], ] def test_each(snapshot: SnapshotAssertion) -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) first_llm = FakeStreamingListLLM(responses=["first item, second item, third item"]) parser = FakeSplitIntoListParser() second_llm = FakeStreamingListLLM(responses=["this", "is", "a", "test"]) chain = prompt | first_llm | parser | second_llm.map() assert dumps(chain, pretty=True) == snapshot output = chain.invoke({"question": "What up"}) assert output == ["this", "is", "a"] assert (parser | second_llm.map()).invoke("first item, second item") == [ "test", "this", ] def test_recursive_lambda() -> None: def _simple_recursion(x: int) -> Union[int, Runnable]: if x < 10: return RunnableLambda(lambda *args: _simple_recursion(x + 1)) else: return x runnable = RunnableLambda(_simple_recursion) assert runnable.invoke(5) == 10 with pytest.raises(RecursionError): runnable.invoke(0, {"recursion_limit": 9}) def test_retrying(mocker: MockerFixture) -> None: def _lambda(x: int) -> Union[int, Runnable]: if x == 1: raise ValueError("x is 1") elif x == 2: raise RuntimeError("x is 2") else: return x _lambda_mock = mocker.Mock(side_effect=_lambda) runnable = RunnableLambda(_lambda_mock) with pytest.raises(ValueError): runnable.invoke(1) assert _lambda_mock.call_count == 1 _lambda_mock.reset_mock() with pytest.raises(ValueError): runnable.with_retry( stop_after_attempt=2, retry_if_exception_type=(ValueError,), ).invoke(1) assert _lambda_mock.call_count == 2 # retried _lambda_mock.reset_mock() with pytest.raises(RuntimeError): runnable.with_retry( stop_after_attempt=2, wait_exponential_jitter=False, retry_if_exception_type=(ValueError,), ).invoke(2) assert _lambda_mock.call_count == 1 # did not retry _lambda_mock.reset_mock() with pytest.raises(ValueError): runnable.with_retry( stop_after_attempt=2, wait_exponential_jitter=False, retry_if_exception_type=(ValueError,), ).batch([1, 2, 0]) # 3rd input isn't retried because it succeeded assert _lambda_mock.call_count == 3 + 2 _lambda_mock.reset_mock() output = runnable.with_retry( stop_after_attempt=2, wait_exponential_jitter=False, retry_if_exception_type=(ValueError,), ).batch([1, 2, 0], return_exceptions=True) # 3rd input isn't retried because it succeeded assert _lambda_mock.call_count == 3 + 2 assert len(output) == 3 assert isinstance(output[0], ValueError) assert isinstance(output[1], RuntimeError) assert output[2] == 0 _lambda_mock.reset_mock() async def test_async_retrying(mocker: MockerFixture) -> None: def _lambda(x: int) -> Union[int, Runnable]: if x == 1: raise ValueError("x is 1") elif x == 2: raise RuntimeError("x is 2") else: return x _lambda_mock = mocker.Mock(side_effect=_lambda) runnable = RunnableLambda(_lambda_mock) with pytest.raises(ValueError): await runnable.ainvoke(1) assert _lambda_mock.call_count == 1 _lambda_mock.reset_mock() with pytest.raises(ValueError): await runnable.with_retry( stop_after_attempt=2, wait_exponential_jitter=False, retry_if_exception_type=(ValueError, KeyError), ).ainvoke(1) assert _lambda_mock.call_count == 2 # retried _lambda_mock.reset_mock() with pytest.raises(RuntimeError): await runnable.with_retry( stop_after_attempt=2, wait_exponential_jitter=False, retry_if_exception_type=(ValueError,), ).ainvoke(2) assert _lambda_mock.call_count == 1 # did not retry _lambda_mock.reset_mock() with pytest.raises(ValueError): await runnable.with_retry( stop_after_attempt=2, wait_exponential_jitter=False, retry_if_exception_type=(ValueError,), ).abatch([1, 2, 0]) # 3rd input isn't retried because it succeeded assert _lambda_mock.call_count == 3 + 2 _lambda_mock.reset_mock() output = await runnable.with_retry( stop_after_attempt=2, wait_exponential_jitter=False, retry_if_exception_type=(ValueError,), ).abatch([1, 2, 0], return_exceptions=True) # 3rd input isn't retried because it succeeded assert _lambda_mock.call_count == 3 + 2 assert len(output) == 3 assert isinstance(output[0], ValueError) assert isinstance(output[1], RuntimeError) assert output[2] == 0 _lambda_mock.reset_mock() def test_runnable_lambda_stream() -> None: """Test that stream works for both normal functions & those returning Runnable.""" # Normal output should work output: List[Any] = [chunk for chunk in RunnableLambda(range).stream(5)] assert output == [range(5)] # Runnable output should also work llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) output = list(RunnableLambda(lambda x: llm).stream("")) assert output == list(llm_res) def test_runnable_lambda_stream_with_callbacks() -> None: """Test that stream works for RunnableLambda when using callbacks.""" tracer = FakeTracer() llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) config: RunnableConfig = {"callbacks": [tracer]} assert list(RunnableLambda(lambda x: llm).stream("", config=config)) == list( llm_res ) assert len(tracer.runs) == 1 assert tracer.runs[0].error is None assert tracer.runs[0].outputs == {"output": llm_res} def raise_value_error(x: int) -> int: """Raise a value error.""" raise ValueError("x is too large") # Check that the chain on error is invoked with pytest.raises(ValueError): for _ in RunnableLambda(raise_value_error).stream(1000, config=config): pass assert len(tracer.runs) == 2 assert "ValueError('x is too large')" in str(tracer.runs[1].error) assert tracer.runs[1].outputs is None async def test_runnable_lambda_astream() -> None: """Test that astream works for both normal functions & those returning Runnable.""" # Wrapper to make a normal function async def awrapper(func: Callable) -> Callable[..., Awaitable[Any]]: async def afunc(*args: Any, **kwargs: Any) -> Any: return func(*args, **kwargs) return afunc # Normal output should work output: List[Any] = [ chunk async for chunk in RunnableLambda( func=id, afunc=awrapper(range), # id func is just dummy ).astream(5) ] assert output == [range(5)] # Normal output using func should also work output = [_ async for _ in RunnableLambda(range).astream(5)] assert output == [range(5)] # Runnable output should also work llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) output = [ _ async for _ in RunnableLambda( func=id, afunc=awrapper(lambda x: llm), ).astream("") ] assert output == list(llm_res) output = [ chunk async for chunk in cast( AsyncIterator[str], RunnableLambda(lambda x: llm).astream("") ) ] assert output == list(llm_res) async def test_runnable_lambda_astream_with_callbacks() -> None: """Test that astream works for RunnableLambda when using callbacks.""" tracer = FakeTracer() llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) config: RunnableConfig = {"callbacks": [tracer]} assert [ _ async for _ in RunnableLambda(lambda x: llm).astream("", config=config) ] == list(llm_res) assert len(tracer.runs) == 1 assert tracer.runs[0].error is None assert tracer.runs[0].outputs == {"output": llm_res} def raise_value_error(x: int) -> int: """Raise a value error.""" raise ValueError("x is too large") # Check that the chain on error is invoked with pytest.raises(ValueError): async for _ in RunnableLambda(raise_value_error).astream(1000, config=config): pass assert len(tracer.runs) == 2 assert "ValueError('x is too large')" in str(tracer.runs[1].error) assert tracer.runs[1].outputs is None @freeze_time("2023-01-01") def test_seq_batch_return_exceptions(mocker: MockerFixture) -> None: class ControlledExceptionRunnable(Runnable[str, str]): def __init__(self, fail_starts_with: str) -> None: self.fail_starts_with = fail_starts_with def invoke(self, input: Any, config: Optional[RunnableConfig] = None) -> Any: raise NotImplementedError() def _batch( self, inputs: List[str], ) -> List: outputs: List[Any] = [] for input in inputs: if input.startswith(self.fail_starts_with): outputs.append(ValueError()) else: outputs.append(input + "a") return outputs def batch( self, inputs: List[str], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, return_exceptions: bool = False, **kwargs: Any, ) -> List[str]: return self._batch_with_config( self._batch, inputs, config, return_exceptions=return_exceptions, **kwargs, ) chain = ( ControlledExceptionRunnable("bux") | ControlledExceptionRunnable("bar") | ControlledExceptionRunnable("baz") | ControlledExceptionRunnable("foo") ) assert isinstance(chain, RunnableSequence) # Test batch with pytest.raises(ValueError): chain.batch(["foo", "bar", "baz", "qux"]) spy = mocker.spy(ControlledExceptionRunnable, "batch") tracer = FakeTracer() inputs = ["foo", "bar", "baz", "qux"] outputs = chain.batch(inputs, dict(callbacks=[tracer]), return_exceptions=True) assert len(outputs) == 4 assert isinstance(outputs[0], ValueError) assert isinstance(outputs[1], ValueError) assert isinstance(outputs[2], ValueError) assert outputs[3] == "quxaaaa" assert spy.call_count == 4 inputs_to_batch = [c[0][1] for c in spy.call_args_list] assert inputs_to_batch == [ # inputs to sequence step 0 # same as inputs to sequence.batch() ["foo", "bar", "baz", "qux"], # inputs to sequence step 1 # == outputs of sequence step 0 as no exceptions were raised ["fooa", "bara", "baza", "quxa"], # inputs to sequence step 2 # 'bar' was dropped as it raised an exception in step 1 ["fooaa", "bazaa", "quxaa"], # inputs to sequence step 3 # 'baz' was dropped as it raised an exception in step 2 ["fooaaa", "quxaaa"], ] parent_runs = sorted( (r for r in tracer.runs if r.parent_run_id is None), key=lambda run: inputs.index(run.inputs["input"]), ) assert len(parent_runs) == 4 parent_run_foo = parent_runs[0] assert parent_run_foo.inputs["input"] == "foo" assert repr(ValueError()) in str(parent_run_foo.error) assert len(parent_run_foo.child_runs) == 4 assert [r.error for r in parent_run_foo.child_runs[:-1]] == [ None, None, None, ] assert repr(ValueError()) in str(parent_run_foo.child_runs[-1].error) parent_run_bar = parent_runs[1] assert parent_run_bar.inputs["input"] == "bar" assert repr(ValueError()) in str(parent_run_bar.error) assert len(parent_run_bar.child_runs) == 2 assert parent_run_bar.child_runs[0].error is None assert repr(ValueError()) in str(parent_run_bar.child_runs[1].error) parent_run_baz = parent_runs[2] assert parent_run_baz.inputs["input"] == "baz" assert repr(ValueError()) in str(parent_run_baz.error) assert len(parent_run_baz.child_runs) == 3 assert [r.error for r in parent_run_baz.child_runs[:-1]] == [ None, None, ] assert repr(ValueError()) in str(parent_run_baz.child_runs[-1].error) parent_run_qux = parent_runs[3] assert parent_run_qux.inputs["input"] == "qux" assert parent_run_qux.error is None assert parent_run_qux.outputs is not None assert parent_run_qux.outputs["output"] == "quxaaaa" assert len(parent_run_qux.child_runs) == 4 assert [r.error for r in parent_run_qux.child_runs] == [None, None, None, None] @freeze_time("2023-01-01") async def test_seq_abatch_return_exceptions(mocker: MockerFixture) -> None: class ControlledExceptionRunnable(Runnable[str, str]): def __init__(self, fail_starts_with: str) -> None: self.fail_starts_with = fail_starts_with def invoke(self, input: Any, config: Optional[RunnableConfig] = None) -> Any: raise NotImplementedError() async def _abatch( self, inputs: List[str], ) -> List: outputs: List[Any] = [] for input in inputs: if input.startswith(self.fail_starts_with): outputs.append(ValueError()) else: outputs.append(input + "a") return outputs async def abatch( self, inputs: List[str], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, return_exceptions: bool = False, **kwargs: Any, ) -> List[str]: return await self._abatch_with_config( self._abatch, inputs, config, return_exceptions=return_exceptions, **kwargs, ) chain = ( ControlledExceptionRunnable("bux") | ControlledExceptionRunnable("bar") | ControlledExceptionRunnable("baz") | ControlledExceptionRunnable("foo") ) assert isinstance(chain, RunnableSequence) # Test abatch with pytest.raises(ValueError): await chain.abatch(["foo", "bar", "baz", "qux"]) spy = mocker.spy(ControlledExceptionRunnable, "abatch") tracer = FakeTracer() inputs = ["foo", "bar", "baz", "qux"] outputs = await chain.abatch( inputs, dict(callbacks=[tracer]), return_exceptions=True ) assert len(outputs) == 4 assert isinstance(outputs[0], ValueError) assert isinstance(outputs[1], ValueError) assert isinstance(outputs[2], ValueError) assert outputs[3] == "quxaaaa" assert spy.call_count == 4 inputs_to_batch = [c[0][1] for c in spy.call_args_list] assert inputs_to_batch == [ # inputs to sequence step 0 # same as inputs to sequence.batch() ["foo", "bar", "baz", "qux"], # inputs to sequence step 1 # == outputs of sequence step 0 as no exceptions were raised ["fooa", "bara", "baza", "quxa"], # inputs to sequence step 2 # 'bar' was dropped as it raised an exception in step 1 ["fooaa", "bazaa", "quxaa"], # inputs to sequence step 3 # 'baz' was dropped as it raised an exception in step 2 ["fooaaa", "quxaaa"], ] parent_runs = sorted( (r for r in tracer.runs if r.parent_run_id is None), key=lambda run: inputs.index(run.inputs["input"]), ) assert len(parent_runs) == 4 parent_run_foo = parent_runs[0] assert parent_run_foo.inputs["input"] == "foo" assert repr(ValueError()) in str(parent_run_foo.error) assert len(parent_run_foo.child_runs) == 4 assert [r.error for r in parent_run_foo.child_runs[:-1]] == [ None, None, None, ] assert repr(ValueError()) in str(parent_run_foo.child_runs[-1].error) parent_run_bar = parent_runs[1] assert parent_run_bar.inputs["input"] == "bar" assert repr(ValueError()) in str(parent_run_bar.error) assert len(parent_run_bar.child_runs) == 2 assert parent_run_bar.child_runs[0].error is None assert repr(ValueError()) in str(parent_run_bar.child_runs[1].error) parent_run_baz = parent_runs[2] assert parent_run_baz.inputs["input"] == "baz" assert repr(ValueError()) in str(parent_run_baz.error) assert len(parent_run_baz.child_runs) == 3 assert [r.error for r in parent_run_baz.child_runs[:-1]] == [ None, None, ] assert repr(ValueError()) in str(parent_run_baz.child_runs[-1].error) parent_run_qux = parent_runs[3] assert parent_run_qux.inputs["input"] == "qux" assert parent_run_qux.error is None assert parent_run_qux.outputs is not None assert parent_run_qux.outputs["output"] == "quxaaaa" assert len(parent_run_qux.child_runs) == 4 assert [r.error for r in parent_run_qux.child_runs] == [None, None, None, None] def test_runnable_branch_init() -> None: """Verify that runnable branch gets initialized properly.""" add = RunnableLambda(lambda x: x + 1) condition = RunnableLambda(lambda x: x > 0) # Test failure with less than 2 branches with pytest.raises(ValueError): RunnableBranch((condition, add)) # Test failure with less than 2 branches with pytest.raises(ValueError): RunnableBranch(condition) @pytest.mark.parametrize( "branches", [ [ (RunnableLambda(lambda x: x > 0), RunnableLambda(lambda x: x + 1)), RunnableLambda(lambda x: x - 1), ], [ (RunnableLambda(lambda x: x > 0), RunnableLambda(lambda x: x + 1)), (RunnableLambda(lambda x: x > 5), RunnableLambda(lambda x: x + 1)), RunnableLambda(lambda x: x - 1), ], [ (lambda x: x > 0, lambda x: x + 1), (lambda x: x > 5, lambda x: x + 1), lambda x: x - 1, ], ], ) def test_runnable_branch_init_coercion(branches: Sequence[Any]) -> None: """Verify that runnable branch gets initialized properly.""" runnable = RunnableBranch[int, int](*branches) for branch in runnable.branches: condition, body = branch assert isinstance(condition, Runnable) assert isinstance(body, Runnable) assert isinstance(runnable.default, Runnable) assert runnable.input_schema.schema() == {"title": "RunnableBranchInput"} def test_runnable_branch_invoke_call_counts(mocker: MockerFixture) -> None: """Verify that runnables are invoked only when necessary.""" # Test with single branch add = RunnableLambda(lambda x: x + 1) sub = RunnableLambda(lambda x: x - 1) condition = RunnableLambda(lambda x: x > 0) spy = mocker.spy(condition, "invoke") add_spy = mocker.spy(add, "invoke") branch = RunnableBranch[int, int]((condition, add), (condition, add), sub) assert spy.call_count == 0 assert add_spy.call_count == 0 assert branch.invoke(1) == 2 assert add_spy.call_count == 1 assert spy.call_count == 1 assert branch.invoke(2) == 3 assert spy.call_count == 2 assert add_spy.call_count == 2 assert branch.invoke(-3) == -4 # Should fall through to default branch with condition being evaluated twice! assert spy.call_count == 4 # Add should not be invoked assert add_spy.call_count == 2 def test_runnable_branch_invoke() -> None: # Test with single branch def raise_value_error(x: int) -> int: """Raise a value error.""" raise ValueError("x is too large") branch = RunnableBranch[int, int]( (lambda x: x > 100, raise_value_error), # mypy cannot infer types from the lambda (lambda x: x > 0 and x < 5, lambda x: x + 1), # type: ignore[misc] (lambda x: x > 5, lambda x: x * 10), lambda x: x - 1, ) assert branch.invoke(1) == 2 assert branch.invoke(10) == 100 assert branch.invoke(0) == -1 # Should raise an exception with pytest.raises(ValueError): branch.invoke(1000) def test_runnable_branch_batch() -> None: """Test batch variant.""" # Test with single branch branch = RunnableBranch[int, int]( (lambda x: x > 0 and x < 5, lambda x: x + 1), (lambda x: x > 5, lambda x: x * 10), lambda x: x - 1, ) assert branch.batch([1, 10, 0]) == [2, 100, -1] async def test_runnable_branch_ainvoke() -> None: """Test async variant of invoke.""" branch = RunnableBranch[int, int]( (lambda x: x > 0 and x < 5, lambda x: x + 1), (lambda x: x > 5, lambda x: x * 10), lambda x: x - 1, ) assert await branch.ainvoke(1) == 2 assert await branch.ainvoke(10) == 100 assert await branch.ainvoke(0) == -1 # Verify that the async variant is used if available async def condition(x: int) -> bool: return x > 0 async def add(x: int) -> int: return x + 1 async def sub(x: int) -> int: return x - 1 branch = RunnableBranch[int, int]((condition, add), sub) assert await branch.ainvoke(1) == 2 assert await branch.ainvoke(-10) == -11 def test_runnable_branch_invoke_callbacks() -> None: """Verify that callbacks are correctly used in invoke.""" tracer = FakeTracer() def raise_value_error(x: int) -> int: """Raise a value error.""" raise ValueError("x is too large") branch = RunnableBranch[int, int]( (lambda x: x > 100, raise_value_error), lambda x: x - 1, ) assert branch.invoke(1, config={"callbacks": [tracer]}) == 0 assert len(tracer.runs) == 1 assert tracer.runs[0].error is None assert tracer.runs[0].outputs == {"output": 0} # Check that the chain on end is invoked with pytest.raises(ValueError): branch.invoke(1000, config={"callbacks": [tracer]}) assert len(tracer.runs) == 2 assert "ValueError('x is too large')" in str(tracer.runs[1].error) assert tracer.runs[1].outputs is None async def test_runnable_branch_ainvoke_callbacks() -> None: """Verify that callbacks are invoked correctly in ainvoke.""" tracer = FakeTracer() async def raise_value_error(x: int) -> int: """Raise a value error.""" raise ValueError("x is too large") branch = RunnableBranch[int, int]( (lambda x: x > 100, raise_value_error), lambda x: x - 1, ) assert await branch.ainvoke(1, config={"callbacks": [tracer]}) == 0 assert len(tracer.runs) == 1 assert tracer.runs[0].error is None assert tracer.runs[0].outputs == {"output": 0} # Check that the chain on end is invoked with pytest.raises(ValueError): await branch.ainvoke(1000, config={"callbacks": [tracer]}) assert len(tracer.runs) == 2 assert "ValueError('x is too large')" in str(tracer.runs[1].error) assert tracer.runs[1].outputs is None async def test_runnable_branch_abatch() -> None: """Test async variant of invoke.""" branch = RunnableBranch[int, int]( (lambda x: x > 0 and x < 5, lambda x: x + 1), (lambda x: x > 5, lambda x: x * 10), lambda x: x - 1, ) assert await branch.abatch([1, 10, 0]) == [2, 100, -1] def test_runnable_branch_stream() -> None: """Verify that stream works for RunnableBranch.""" llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) branch = RunnableBranch[str, Any]( (lambda x: x == "hello", llm), lambda x: x, ) assert list(branch.stream("hello")) == list(llm_res) assert list(branch.stream("bye")) == ["bye"] def test_runnable_branch_stream_with_callbacks() -> None: """Verify that stream works for RunnableBranch when using callbacks.""" tracer = FakeTracer() def raise_value_error(x: str) -> Any: """Raise a value error.""" raise ValueError(f"x is {x}") llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) branch = RunnableBranch[str, Any]( (lambda x: x == "error", raise_value_error), (lambda x: x == "hello", llm), lambda x: x, ) config: RunnableConfig = {"callbacks": [tracer]} assert list(branch.stream("hello", config=config)) == list(llm_res) assert len(tracer.runs) == 1 assert tracer.runs[0].error is None assert tracer.runs[0].outputs == {"output": llm_res} # Verify that the chain on error is invoked with pytest.raises(ValueError): for _ in branch.stream("error", config=config): pass assert len(tracer.runs) == 2 assert "ValueError('x is error')" in str(tracer.runs[1].error) assert tracer.runs[1].outputs is None assert list(branch.stream("bye", config=config)) == ["bye"] assert len(tracer.runs) == 3 assert tracer.runs[2].error is None assert tracer.runs[2].outputs == {"output": "bye"} async def test_runnable_branch_astream() -> None: """Verify that astream works for RunnableBranch.""" llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) branch = RunnableBranch[str, Any]( (lambda x: x == "hello", llm), lambda x: x, ) assert [_ async for _ in branch.astream("hello")] == list(llm_res) assert [_ async for _ in branch.astream("bye")] == ["bye"] # Verify that the async variant is used if available async def condition(x: str) -> bool: return x == "hello" async def repeat(x: str) -> str: return x + x async def reverse(x: str) -> str: return x[::-1] branch = RunnableBranch[str, Any]((condition, repeat), llm) assert [_ async for _ in branch.astream("hello")] == ["hello" * 2] assert [_ async for _ in branch.astream("bye")] == list(llm_res) branch = RunnableBranch[str, Any]((condition, llm), reverse) assert [_ async for _ in branch.astream("hello")] == list(llm_res) assert [_ async for _ in branch.astream("bye")] == ["eyb"] async def test_runnable_branch_astream_with_callbacks() -> None: """Verify that astream works for RunnableBranch when using callbacks.""" tracer = FakeTracer() def raise_value_error(x: str) -> Any: """Raise a value error.""" raise ValueError(f"x is {x}") llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) branch = RunnableBranch[str, Any]( (lambda x: x == "error", raise_value_error), (lambda x: x == "hello", llm), lambda x: x, ) config: RunnableConfig = {"callbacks": [tracer]} assert [_ async for _ in branch.astream("hello", config=config)] == list(llm_res) assert len(tracer.runs) == 1 assert tracer.runs[0].error is None assert tracer.runs[0].outputs == {"output": llm_res} # Verify that the chain on error is invoked with pytest.raises(ValueError): async for _ in branch.astream("error", config=config): pass assert len(tracer.runs) == 2 assert "ValueError('x is error')" in str(tracer.runs[1].error) assert tracer.runs[1].outputs is None assert [_ async for _ in branch.astream("bye", config=config)] == ["bye"] assert len(tracer.runs) == 3 assert tracer.runs[2].error is None assert tracer.runs[2].outputs == {"output": "bye"} @pytest.mark.skipif( sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run." ) def test_representation_of_runnables() -> None: """Test representation of runnables.""" runnable = RunnableLambda(lambda x: x * 2) assert repr(runnable) == "RunnableLambda(lambda x: x * 2)" def f(x: int) -> int: """Return 2.""" return 2 assert repr(RunnableLambda(func=f)) == "RunnableLambda(f)" async def af(x: int) -> int: """Return 2.""" return 2 assert repr(RunnableLambda(func=f, afunc=af)) == "RunnableLambda(f)" assert repr( RunnableLambda(lambda x: x + 2) | { "a": RunnableLambda(lambda x: x * 2), "b": RunnableLambda(lambda x: x * 3), } ) == ( "RunnableLambda(...)\n" "| {\n" " a: RunnableLambda(...),\n" " b: RunnableLambda(...)\n" " }" ), "repr where code string contains multiple lambdas gives up" async def test_tool_from_runnable() -> None: prompt = ( SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) llm = FakeStreamingListLLM(responses=["foo-lish"]) chain = prompt | llm | StrOutputParser() chain_tool = tool("chain_tool", chain) assert isinstance(chain_tool, BaseTool) assert chain_tool.name == "chain_tool" assert chain_tool.run({"question": "What up"}) == chain.invoke( {"question": "What up"} ) assert await chain_tool.arun({"question": "What up"}) == await chain.ainvoke( {"question": "What up"} ) assert chain_tool.description.endswith(repr(chain)) assert chain_tool.args_schema.schema() == chain.input_schema.schema() assert chain_tool.args_schema.schema() == { "properties": {"question": {"title": "Question", "type": "string"}}, "title": "PromptInput", "type": "object", } async def test_runnable_gen() -> None: """Test that a generator can be used as a runnable.""" def gen(input: Iterator[Any]) -> Iterator[int]: yield 1 yield 2 yield 3 runnable = RunnableGenerator(gen) assert runnable.input_schema.schema() == {"title": "RunnableGeneratorInput"} assert runnable.output_schema.schema() == { "title": "RunnableGeneratorOutput", "type": "integer", } assert runnable.invoke(None) == 6 assert list(runnable.stream(None)) == [1, 2, 3] assert runnable.batch([None, None]) == [6, 6] async def agen(input: AsyncIterator[Any]) -> AsyncIterator[int]: yield 1 yield 2 yield 3 arunnable = RunnableGenerator(agen) assert await arunnable.ainvoke(None) == 6 assert [p async for p in arunnable.astream(None)] == [1, 2, 3] assert await arunnable.abatch([None, None]) == [6, 6] async def test_runnable_gen_transform() -> None: """Test that a generator can be used as a runnable.""" def gen_indexes(length_iter: Iterator[int]) -> Iterator[int]: for i in range(next(length_iter)): yield i async def agen_indexes(length_iter: AsyncIterator[int]) -> AsyncIterator[int]: async for length in length_iter: for i in range(length): yield i def plus_one(input: Iterator[int]) -> Iterator[int]: for i in input: yield i + 1 async def aplus_one(input: AsyncIterator[int]) -> AsyncIterator[int]: async for i in input: yield i + 1 chain: Runnable = RunnableGenerator(gen_indexes, agen_indexes) | plus_one achain = RunnableGenerator(gen_indexes, agen_indexes) | aplus_one assert chain.input_schema.schema() == { "title": "RunnableGeneratorInput", "type": "integer", } assert chain.output_schema.schema() == { "title": "RunnableGeneratorOutput", "type": "integer", } assert achain.input_schema.schema() == { "title": "RunnableGeneratorInput", "type": "integer", } assert achain.output_schema.schema() == { "title": "RunnableGeneratorOutput", "type": "integer", } assert list(chain.stream(3)) == [1, 2, 3] assert [p async for p in achain.astream(4)] == [1, 2, 3, 4] def test_with_config_callbacks() -> None: result = RunnableLambda(lambda x: x).with_config({"callbacks": []}) # Bugfix from version 0.0.325 # ConfigError: field "callbacks" not yet prepared so type is still a ForwardRef, # you might need to call RunnableConfig.update_forward_refs(). assert isinstance(result, RunnableBinding) async def test_ainvoke_on_returned_runnable() -> None: """Verify that a runnable returned by a sync runnable in the async path will be runthroughaasync path (issue #13407)""" def idchain_sync(__input: dict) -> bool: return False async def idchain_async(__input: dict) -> bool: return True idchain = RunnableLambda(func=idchain_sync, afunc=idchain_async) def func(__input: dict) -> Runnable: return idchain assert await RunnableLambda(func).ainvoke({}) def test_invoke_stream_passthrough_assign_trace() -> None: def idchain_sync(__input: dict) -> bool: return False chain = RunnablePassthrough.assign(urls=idchain_sync) tracer = FakeTracer() chain.invoke({"example": [1, 2, 3]}, dict(callbacks=[tracer])) assert tracer.runs[0].name == "RunnableAssign<urls>" assert tracer.runs[0].child_runs[0].name == "RunnableParallel<urls>" tracer = FakeTracer() for item in chain.stream({"example": [1, 2, 3]}, dict(callbacks=[tracer])): pass assert tracer.runs[0].name == "RunnableAssign<urls>" assert tracer.runs[0].child_runs[0].name == "RunnableParallel<urls>" async def test_ainvoke_astream_passthrough_assign_trace() -> None: def idchain_sync(__input: dict) -> bool: return False chain = RunnablePassthrough.assign(urls=idchain_sync) tracer = FakeTracer() await chain.ainvoke({"example": [1, 2, 3]}, dict(callbacks=[tracer])) assert tracer.runs[0].name == "RunnableAssign<urls>" assert tracer.runs[0].child_runs[0].name == "RunnableParallel<urls>" tracer = FakeTracer() async for item in chain.astream({"example": [1, 2, 3]}, dict(callbacks=[tracer])): pass assert tracer.runs[0].name == "RunnableAssign<urls>" assert tracer.runs[0].child_runs[0].name == "RunnableParallel<urls>" async def test_astream_log_deep_copies() -> None: """Verify that deep copies are used when using jsonpatch in astream log. jsonpatch re-uses objects in its API; e.g., import jsonpatch obj1 = { "a": 1 } value = { "b": 2 } obj2 = { "a": 1, "value": value } ops = list(jsonpatch.JsonPatch.from_diff(obj1, obj2)) assert id(ops[0]['value']) == id(value) This can create unexpected consequences for downstream code. """ def _get_run_log(run_log_patches: Sequence[RunLogPatch]) -> RunLog: """Get run log""" run_log = RunLog(state=None) # type: ignore for log_patch in run_log_patches: run_log = run_log + log_patch return run_log def add_one(x: int) -> int: """Add one.""" return x + 1 chain = RunnableLambda(add_one) chunks = [] final_output = None async for chunk in chain.astream_log(1): chunks.append(chunk) if final_output is None: final_output = chunk else: final_output = final_output + chunk run_log = _get_run_log(chunks) state = run_log.state.copy() # Ignoring type here since we know that the state is a dict # so we can delete `id` for testing purposes state.pop("id") # type: ignore assert state == { "final_output": 2, "logs": {}, "streamed_output": [2], }
[ "Hello, {name}!", "{context} {question}", "Context:\n{documents}\n\nQuestion:\n{question}", "human", "Prompt Template", "What is your favorite color?", "what country is the city {city} in? respond in {language}", "{question}", "prompt_template", "Respond to the following question: test", "You are a nice assistant.", "{'title': 'Content', 'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf': [{'type': 'string'}, {'type': 'object'}]}}]}", "i'm a chatbot", "Context:\n[Document(page_content='foo'), Document(page_content='bar')]\n\nQuestion:\nWhat is your name?", "Hello, {name}! {name}!", "What is your name?", "foo", "{documents}", "You are an english major. Answer the question: {question}", "Respond to the following question: {question}", "The prompt template for this chain", "A very good morning to you, {name}!", "{'title': 'Page Content', 'type': 'string'}", "invoke", "good_morning", "what did baz say to {buz}", "You are a nicer assistant.", "ainvoke", "what is the city {person} is from?", "Hello, how are you?", "foo, bar", "You are a math genius. Answer the question: {question}" ]
2024-01-10
robocorp/langchain
libs~langchain~langchain~chains~combine_documents~reduce.py
"""Combine many documents together by recursively reducing them.""" from __future__ import annotations from typing import Any, Callable, List, Optional, Protocol, Tuple from langchain_core.documents import Document from langchain_core.pydantic_v1 import Extra from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import BaseCombineDocumentsChain class CombineDocsProtocol(Protocol): """Interface for the combine_docs method.""" def __call__(self, docs: List[Document], **kwargs: Any) -> str: """Interface for the combine_docs method.""" class AsyncCombineDocsProtocol(Protocol): """Interface for the combine_docs method.""" async def __call__(self, docs: List[Document], **kwargs: Any) -> str: """Async interface for the combine_docs method.""" def split_list_of_docs( docs: List[Document], length_func: Callable, token_max: int, **kwargs: Any ) -> List[List[Document]]: """Split Documents into subsets that each meet a cumulative length constraint. Args: docs: The full list of Documents. length_func: Function for computing the cumulative length of a set of Documents. token_max: The maximum cumulative length of any subset of Documents. **kwargs: Arbitrary additional keyword params to pass to each call of the length_func. Returns: A List[List[Document]]. """ new_result_doc_list = [] _sub_result_docs = [] for doc in docs: _sub_result_docs.append(doc) _num_tokens = length_func(_sub_result_docs, **kwargs) if _num_tokens > token_max: if len(_sub_result_docs) == 1: raise ValueError( "A single document was longer than the context length," " we cannot handle this." ) new_result_doc_list.append(_sub_result_docs[:-1]) _sub_result_docs = _sub_result_docs[-1:] new_result_doc_list.append(_sub_result_docs) return new_result_doc_list def collapse_docs( docs: List[Document], combine_document_func: CombineDocsProtocol, **kwargs: Any, ) -> Document: """Execute a collapse function on a set of documents and merge their metadatas. Args: docs: A list of Documents to combine. combine_document_func: A function that takes in a list of Documents and optionally addition keyword parameters and combines them into a single string. **kwargs: Arbitrary additional keyword params to pass to the combine_document_func. Returns: A single Document with the output of combine_document_func for the page content and the combined metadata's of all the input documents. All metadata values are strings, and where there are overlapping keys across documents the values are joined by ", ". """ result = combine_document_func(docs, **kwargs) combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()} for doc in docs[1:]: for k, v in doc.metadata.items(): if k in combined_metadata: combined_metadata[k] += f", {v}" else: combined_metadata[k] = str(v) return Document(page_content=result, metadata=combined_metadata) async def acollapse_docs( docs: List[Document], combine_document_func: AsyncCombineDocsProtocol, **kwargs: Any, ) -> Document: """Execute a collapse function on a set of documents and merge their metadatas. Args: docs: A list of Documents to combine. combine_document_func: A function that takes in a list of Documents and optionally addition keyword parameters and combines them into a single string. **kwargs: Arbitrary additional keyword params to pass to the combine_document_func. Returns: A single Document with the output of combine_document_func for the page content and the combined metadata's of all the input documents. All metadata values are strings, and where there are overlapping keys across documents the values are joined by ", ". """ result = await combine_document_func(docs, **kwargs) combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()} for doc in docs[1:]: for k, v in doc.metadata.items(): if k in combined_metadata: combined_metadata[k] += f", {v}" else: combined_metadata[k] = str(v) return Document(page_content=result, metadata=combined_metadata) class ReduceDocumentsChain(BaseCombineDocumentsChain): """Combine documents by recursively reducing them. This involves - combine_documents_chain - collapse_documents_chain `combine_documents_chain` is ALWAYS provided. This is final chain that is called. We pass all previous results to this chain, and the output of this chain is returned as a final result. `collapse_documents_chain` is used if the documents passed in are too many to all be passed to `combine_documents_chain` in one go. In this case, `collapse_documents_chain` is called recursively on as big of groups of documents as are allowed. Example: .. code-block:: python from langchain.chains import ( StuffDocumentsChain, LLMChain, ReduceDocumentsChain ) from langchain_core.prompts import PromptTemplate from langchain.llms import OpenAI # This controls how each document will be formatted. Specifically, # it will be passed to `format_document` - see that function for more # details. document_prompt = PromptTemplate( input_variables=["page_content"], template="{page_content}" ) document_variable_name = "context" llm = OpenAI() # The prompt here should take as an input variable the # `document_variable_name` prompt = PromptTemplate.from_template( "Summarize this content: {context}" ) llm_chain = LLMChain(llm=llm, prompt=prompt) combine_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, document_variable_name=document_variable_name ) chain = ReduceDocumentsChain( combine_documents_chain=combine_documents_chain, ) # If we wanted to, we could also pass in collapse_documents_chain # which is specifically aimed at collapsing documents BEFORE # the final call. prompt = PromptTemplate.from_template( "Collapse this content: {context}" ) llm_chain = LLMChain(llm=llm, prompt=prompt) collapse_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, document_variable_name=document_variable_name ) chain = ReduceDocumentsChain( combine_documents_chain=combine_documents_chain, collapse_documents_chain=collapse_documents_chain, ) """ combine_documents_chain: BaseCombineDocumentsChain """Final chain to call to combine documents. This is typically a StuffDocumentsChain.""" collapse_documents_chain: Optional[BaseCombineDocumentsChain] = None """Chain to use to collapse documents if needed until they can all fit. If None, will use the combine_documents_chain. This is typically a StuffDocumentsChain.""" token_max: int = 3000 """The maximum number of tokens to group documents into. For example, if set to 3000 then documents will be grouped into chunks of no greater than 3000 tokens before trying to combine them into a smaller chunk.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def _collapse_chain(self) -> BaseCombineDocumentsChain: if self.collapse_documents_chain is not None: return self.collapse_documents_chain else: return self.combine_documents_chain def combine_docs( self, docs: List[Document], token_max: Optional[int] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> Tuple[str, dict]: """Combine multiple documents recursively. Args: docs: List of documents to combine, assumed that each one is less than `token_max`. token_max: Recursively creates groups of documents less than this number of tokens. callbacks: Callbacks to be passed through **kwargs: additional parameters to be passed to LLM calls (like other input variables besides the documents) Returns: The first element returned is the single string output. The second element returned is a dictionary of other keys to return. """ result_docs, extra_return_dict = self._collapse( docs, token_max=token_max, callbacks=callbacks, **kwargs ) return self.combine_documents_chain.combine_docs( docs=result_docs, callbacks=callbacks, **kwargs ) async def acombine_docs( self, docs: List[Document], token_max: Optional[int] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> Tuple[str, dict]: """Async combine multiple documents recursively. Args: docs: List of documents to combine, assumed that each one is less than `token_max`. token_max: Recursively creates groups of documents less than this number of tokens. callbacks: Callbacks to be passed through **kwargs: additional parameters to be passed to LLM calls (like other input variables besides the documents) Returns: The first element returned is the single string output. The second element returned is a dictionary of other keys to return. """ result_docs, extra_return_dict = await self._acollapse( docs, token_max=token_max, callbacks=callbacks, **kwargs ) return await self.combine_documents_chain.acombine_docs( docs=result_docs, callbacks=callbacks, **kwargs ) def _collapse( self, docs: List[Document], token_max: Optional[int] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> Tuple[List[Document], dict]: result_docs = docs length_func = self.combine_documents_chain.prompt_length num_tokens = length_func(result_docs, **kwargs) def _collapse_docs_func(docs: List[Document], **kwargs: Any) -> str: return self._collapse_chain.run( input_documents=docs, callbacks=callbacks, **kwargs ) _token_max = token_max or self.token_max while num_tokens is not None and num_tokens > _token_max: new_result_doc_list = split_list_of_docs( result_docs, length_func, _token_max, **kwargs ) result_docs = [] for docs in new_result_doc_list: new_doc = collapse_docs(docs, _collapse_docs_func, **kwargs) result_docs.append(new_doc) num_tokens = length_func(result_docs, **kwargs) return result_docs, {} async def _acollapse( self, docs: List[Document], token_max: Optional[int] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> Tuple[List[Document], dict]: result_docs = docs length_func = self.combine_documents_chain.prompt_length num_tokens = length_func(result_docs, **kwargs) async def _collapse_docs_func(docs: List[Document], **kwargs: Any) -> str: return await self._collapse_chain.arun( input_documents=docs, callbacks=callbacks, **kwargs ) _token_max = token_max or self.token_max while num_tokens is not None and num_tokens > _token_max: new_result_doc_list = split_list_of_docs( result_docs, length_func, _token_max, **kwargs ) result_docs = [] for docs in new_result_doc_list: new_doc = await acollapse_docs(docs, _collapse_docs_func, **kwargs) result_docs.append(new_doc) num_tokens = length_func(result_docs, **kwargs) return result_docs, {} @property def _chain_type(self) -> str: return "reduce_documents_chain"
[]
2024-01-10
robocorp/langchain
libs~community~langchain_community~chat_models~baidu_qianfan_endpoint.py
from __future__ import annotations import logging from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional, cast from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.messages import ( AIMessage, AIMessageChunk, BaseMessage, ChatMessage, FunctionMessage, HumanMessage, SystemMessage, ) from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from langchain_core.pydantic_v1 import Field, SecretStr, root_validator from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env logger = logging.getLogger(__name__) def convert_message_to_dict(message: BaseMessage) -> dict: """Convert a message to a dictionary that can be passed to the API.""" message_dict: Dict[str, Any] if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} if "function_call" in message.additional_kwargs: message_dict["function_call"] = message.additional_kwargs["function_call"] # If function call only, content is None not empty string if message_dict["content"] == "": message_dict["content"] = None elif isinstance(message, FunctionMessage): message_dict = { "role": "function", "content": message.content, "name": message.name, } else: raise TypeError(f"Got unknown type {message}") return message_dict def _convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage: content = _dict.get("result", "") or "" if _dict.get("function_call"): additional_kwargs = {"function_call": dict(_dict["function_call"])} if "thoughts" in additional_kwargs["function_call"]: # align to api sample, which affects the llm function_call output additional_kwargs["function_call"].pop("thoughts") else: additional_kwargs = {} return AIMessage( content=content, additional_kwargs={**_dict.get("body", {}), **additional_kwargs}, ) class QianfanChatEndpoint(BaseChatModel): """Baidu Qianfan chat models. To use, you should have the ``qianfan`` python package installed, and the environment variable ``qianfan_ak`` and ``qianfan_sk`` set with your API key and Secret Key. ak, sk are required parameters which you could get from https://cloud.baidu.com/product/wenxinworkshop Example: .. code-block:: python from langchain_community.chat_models import QianfanChatEndpoint qianfan_chat = QianfanChatEndpoint(model="ERNIE-Bot", endpoint="your_endpoint", qianfan_ak="your_ak", qianfan_sk="your_sk") """ model_kwargs: Dict[str, Any] = Field(default_factory=dict) client: Any qianfan_ak: Optional[SecretStr] = None qianfan_sk: Optional[SecretStr] = None streaming: Optional[bool] = False """Whether to stream the results or not.""" request_timeout: Optional[int] = 60 """request timeout for chat http requests""" top_p: Optional[float] = 0.8 temperature: Optional[float] = 0.95 penalty_score: Optional[float] = 1 """Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo. In the case of other model, passing these params will not affect the result. """ model: str = "ERNIE-Bot-turbo" """Model name. you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu preset models are mapping to an endpoint. `model` will be ignored if `endpoint` is set. Default is ERNIE-Bot-turbo. """ endpoint: Optional[str] = None """Endpoint of the Qianfan LLM, required if custom model used.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: values["qianfan_ak"] = convert_to_secret_str( get_from_dict_or_env( values, "qianfan_ak", "QIANFAN_AK", default="", ) ) values["qianfan_sk"] = convert_to_secret_str( get_from_dict_or_env( values, "qianfan_sk", "QIANFAN_SK", default="", ) ) params = { "model": values["model"], "stream": values["streaming"], } if values["qianfan_ak"].get_secret_value() != "": params["ak"] = values["qianfan_ak"].get_secret_value() if values["qianfan_sk"].get_secret_value() != "": params["sk"] = values["qianfan_sk"].get_secret_value() if values["endpoint"] is not None and values["endpoint"] != "": params["endpoint"] = values["endpoint"] try: import qianfan values["client"] = qianfan.ChatCompletion(**params) except ImportError: raise ValueError( "qianfan package not found, please install it with " "`pip install qianfan`" ) return values @property def _identifying_params(self) -> Dict[str, Any]: return { **{"endpoint": self.endpoint, "model": self.model}, **super()._identifying_params, } @property def _llm_type(self) -> str: """Return type of chat_model.""" return "baidu-qianfan-chat" @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Qianfan API.""" normal_params = { "model": self.model, "endpoint": self.endpoint, "stream": self.streaming, "request_timeout": self.request_timeout, "top_p": self.top_p, "temperature": self.temperature, "penalty_score": self.penalty_score, } return {**normal_params, **self.model_kwargs} def _convert_prompt_msg_params( self, messages: List[BaseMessage], **kwargs: Any, ) -> Dict[str, Any]: """ Converts a list of messages into a dictionary containing the message content and default parameters. Args: messages (List[BaseMessage]): The list of messages. **kwargs (Any): Optional arguments to add additional parameters to the resulting dictionary. Returns: Dict[str, Any]: A dictionary containing the message content and default parameters. """ messages_dict: Dict[str, Any] = { "messages": [ convert_message_to_dict(m) for m in messages if not isinstance(m, SystemMessage) ] } for i in [i for i, m in enumerate(messages) if isinstance(m, SystemMessage)]: if "system" not in messages_dict: messages_dict["system"] = "" messages_dict["system"] += cast(str, messages[i].content) + "\n" return { **messages_dict, **self._default_params, **kwargs, } def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Call out to an qianfan models endpoint for each generation with a prompt. Args: messages: The messages to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = qianfan_model("Tell me a joke.") """ if self.streaming: completion = "" for chunk in self._stream(messages, stop, run_manager, **kwargs): completion += chunk.text lc_msg = AIMessage(content=completion, additional_kwargs={}) gen = ChatGeneration( message=lc_msg, generation_info=dict(finish_reason="stop"), ) return ChatResult( generations=[gen], llm_output={"token_usage": {}, "model_name": self.model}, ) params = self._convert_prompt_msg_params(messages, **kwargs) response_payload = self.client.do(**params) lc_msg = _convert_dict_to_message(response_payload) gen = ChatGeneration( message=lc_msg, generation_info={ "finish_reason": "stop", **response_payload.get("body", {}), }, ) token_usage = response_payload.get("usage", {}) llm_output = {"token_usage": token_usage, "model_name": self.model} return ChatResult(generations=[gen], llm_output=llm_output) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: if self.streaming: completion = "" token_usage = {} async for chunk in self._astream(messages, stop, run_manager, **kwargs): completion += chunk.text lc_msg = AIMessage(content=completion, additional_kwargs={}) gen = ChatGeneration( message=lc_msg, generation_info=dict(finish_reason="stop"), ) return ChatResult( generations=[gen], llm_output={"token_usage": {}, "model_name": self.model}, ) params = self._convert_prompt_msg_params(messages, **kwargs) response_payload = await self.client.ado(**params) lc_msg = _convert_dict_to_message(response_payload) generations = [] gen = ChatGeneration( message=lc_msg, generation_info={ "finish_reason": "stop", **response_payload.get("body", {}), }, ) generations.append(gen) token_usage = response_payload.get("usage", {}) llm_output = {"token_usage": token_usage, "model_name": self.model} return ChatResult(generations=generations, llm_output=llm_output) def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: params = self._convert_prompt_msg_params(messages, **kwargs) for res in self.client.do(**params): if res: msg = _convert_dict_to_message(res) chunk = ChatGenerationChunk( text=res["result"], message=AIMessageChunk( content=msg.content, role="assistant", additional_kwargs=msg.additional_kwargs, ), ) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk) async def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: params = self._convert_prompt_msg_params(messages, **kwargs) async for res in await self.client.ado(**params): if res: msg = _convert_dict_to_message(res) chunk = ChatGenerationChunk( text=res["result"], message=AIMessageChunk( content=msg.content, role="assistant", additional_kwargs=msg.additional_kwargs, ), ) yield chunk if run_manager: await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
[]
2024-01-10
robocorp/langchain
libs~langchain~langchain~retrievers~web_research.py
import logging import re from typing import List, Optional from langchain_core.documents import Document from langchain_core.prompts import BasePromptTemplate, PromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.retrievers import BaseRetriever from langchain_core.vectorstores import VectorStore from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.chains import LLMChain from langchain.chains.prompt_selector import ConditionalPromptSelector from langchain.document_loaders import AsyncHtmlLoader from langchain.document_transformers import Html2TextTransformer from langchain.llms import LlamaCpp from langchain.llms.base import BaseLLM from langchain.output_parsers.pydantic import PydanticOutputParser from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter from langchain.utilities import GoogleSearchAPIWrapper logger = logging.getLogger(__name__) class SearchQueries(BaseModel): """Search queries to research for the user's goal.""" queries: List[str] = Field( ..., description="List of search queries to look up on Google" ) DEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate( input_variables=["question"], template="""<<SYS>> \n You are an assistant tasked with improving Google search \ results. \n <</SYS>> \n\n [INST] Generate THREE Google search queries that \ are similar to this question. The output should be a numbered list of questions \ and each should have a question mark at the end: \n\n {question} [/INST]""", ) DEFAULT_SEARCH_PROMPT = PromptTemplate( input_variables=["question"], template="""You are an assistant tasked with improving Google search \ results. Generate THREE Google search queries that are similar to \ this question. The output should be a numbered list of questions and each \ should have a question mark at the end: {question}""", ) class LineList(BaseModel): """List of questions.""" lines: List[str] = Field(description="Questions") class QuestionListOutputParser(PydanticOutputParser): """Output parser for a list of numbered questions.""" def __init__(self) -> None: super().__init__(pydantic_object=LineList) def parse(self, text: str) -> LineList: lines = re.findall(r"\d+\..*?(?:\n|$)", text) return LineList(lines=lines) class WebResearchRetriever(BaseRetriever): """`Google Search API` retriever.""" # Inputs vectorstore: VectorStore = Field( ..., description="Vector store for storing web pages" ) llm_chain: LLMChain search: GoogleSearchAPIWrapper = Field(..., description="Google Search API Wrapper") num_search_results: int = Field(1, description="Number of pages per Google search") text_splitter: TextSplitter = Field( RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=50), description="Text splitter for splitting web pages into chunks", ) url_database: List[str] = Field( default_factory=list, description="List of processed URLs" ) @classmethod def from_llm( cls, vectorstore: VectorStore, llm: BaseLLM, search: GoogleSearchAPIWrapper, prompt: Optional[BasePromptTemplate] = None, num_search_results: int = 1, text_splitter: RecursiveCharacterTextSplitter = RecursiveCharacterTextSplitter( chunk_size=1500, chunk_overlap=150 ), ) -> "WebResearchRetriever": """Initialize from llm using default template. Args: vectorstore: Vector store for storing web pages llm: llm for search question generation search: GoogleSearchAPIWrapper prompt: prompt to generating search questions num_search_results: Number of pages per Google search text_splitter: Text splitter for splitting web pages into chunks Returns: WebResearchRetriever """ if not prompt: QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector( default_prompt=DEFAULT_SEARCH_PROMPT, conditionals=[ (lambda llm: isinstance(llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT) ], ) prompt = QUESTION_PROMPT_SELECTOR.get_prompt(llm) # Use chat model prompt llm_chain = LLMChain( llm=llm, prompt=prompt, output_parser=QuestionListOutputParser(), ) return cls( vectorstore=vectorstore, llm_chain=llm_chain, search=search, num_search_results=num_search_results, text_splitter=text_splitter, ) def clean_search_query(self, query: str) -> str: # Some search tools (e.g., Google) will # fail to return results if query has a # leading digit: 1. "LangCh..." # Check if the first character is a digit if query[0].isdigit(): # Find the position of the first quote first_quote_pos = query.find('"') if first_quote_pos != -1: # Extract the part of the string after the quote query = query[first_quote_pos + 1 :] # Remove the trailing quote if present if query.endswith('"'): query = query[:-1] return query.strip() def search_tool(self, query: str, num_search_results: int = 1) -> List[dict]: """Returns num_search_results pages per Google search.""" query_clean = self.clean_search_query(query) result = self.search.results(query_clean, num_search_results) return result def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """Search Google for documents related to the query input. Args: query: user query Returns: Relevant documents from all various urls. """ # Get search questions logger.info("Generating questions for Google Search ...") result = self.llm_chain({"question": query}) logger.info(f"Questions for Google Search (raw): {result}") questions = getattr(result["text"], "lines", []) logger.info(f"Questions for Google Search: {questions}") # Get urls logger.info("Searching for relevant urls...") urls_to_look = [] for query in questions: # Google search search_results = self.search_tool(query, self.num_search_results) logger.info("Searching for relevant urls...") logger.info(f"Search results: {search_results}") for res in search_results: if res.get("link", None): urls_to_look.append(res["link"]) # Relevant urls urls = set(urls_to_look) # Check for any new urls that we have not processed new_urls = list(urls.difference(self.url_database)) logger.info(f"New URLs to load: {new_urls}") # Load, split, and add new urls to vectorstore if new_urls: loader = AsyncHtmlLoader(new_urls, ignore_load_errors=True) html2text = Html2TextTransformer() logger.info("Indexing new urls...") docs = loader.load() docs = list(html2text.transform_documents(docs)) docs = self.text_splitter.split_documents(docs) self.vectorstore.add_documents(docs) self.url_database.extend(new_urls) # Search for relevant splits # TODO: make this async logger.info("Grabbing most relevant splits from urls...") docs = [] for query in questions: docs.extend(self.vectorstore.similarity_search(query)) # Get unique docs unique_documents_dict = { (doc.page_content, tuple(sorted(doc.metadata.items()))): doc for doc in docs } unique_documents = list(unique_documents_dict.values()) return unique_documents async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, ) -> List[Document]: raise NotImplementedError
[ "<<SYS>> \n You are an assistant tasked with improving Google search results. \n <</SYS>> \n\n [INST] Generate THREE Google search queries that are similar to this question. The output should be a numbered list of questions and each should have a question mark at the end: \n\n {question} [/INST]", "question", "You are an assistant tasked with improving Google search results. Generate THREE Google search queries that are similar to this question. The output should be a numbered list of questions and each should have a question mark at the end: {question}" ]
2024-01-10
robocorp/langchain
libs~community~langchain_community~embeddings~baidu_qianfan_endpoint.py
from __future__ import annotations import logging from typing import Any, Dict, List, Optional from langchain_core.embeddings import Embeddings from langchain_core.pydantic_v1 import BaseModel, root_validator from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env logger = logging.getLogger(__name__) class QianfanEmbeddingsEndpoint(BaseModel, Embeddings): """`Baidu Qianfan Embeddings` embedding models.""" qianfan_ak: Optional[str] = None """Qianfan application apikey""" qianfan_sk: Optional[str] = None """Qianfan application secretkey""" chunk_size: int = 16 """Chunk size when multiple texts are input""" model: str = "Embedding-V1" """Model name you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu for now, we support Embedding-V1 and - Embedding-V1 (默认模型) - bge-large-en - bge-large-zh preset models are mapping to an endpoint. `model` will be ignored if `endpoint` is set """ endpoint: str = "" """Endpoint of the Qianfan Embedding, required if custom model used.""" client: Any """Qianfan client""" max_retries: int = 5 """Max reties times""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """ Validate whether qianfan_ak and qianfan_sk in the environment variables or configuration file are available or not. init qianfan embedding client with `ak`, `sk`, `model`, `endpoint` Args: values: a dictionary containing configuration information, must include the fields of qianfan_ak and qianfan_sk Returns: a dictionary containing configuration information. If qianfan_ak and qianfan_sk are not provided in the environment variables or configuration file,the original values will be returned; otherwise, values containing qianfan_ak and qianfan_sk will be returned. Raises: ValueError: qianfan package not found, please install it with `pip install qianfan` """ values["qianfan_ak"] = convert_to_secret_str( get_from_dict_or_env( values, "qianfan_ak", "QIANFAN_AK", default="", ) ) values["qianfan_sk"] = convert_to_secret_str( get_from_dict_or_env( values, "qianfan_sk", "QIANFAN_SK", default="", ) ) try: import qianfan params = { "model": values["model"], } if values["qianfan_ak"].get_secret_value() != "": params["ak"] = values["qianfan_ak"].get_secret_value() if values["qianfan_sk"].get_secret_value() != "": params["sk"] = values["qianfan_sk"].get_secret_value() if values["endpoint"] is not None and values["endpoint"] != "": params["endpoint"] = values["endpoint"] values["client"] = qianfan.Embedding(**params) except ImportError: raise ImportError( "qianfan package not found, please install it with " "`pip install qianfan`" ) return values def embed_query(self, text: str) -> List[float]: resp = self.embed_documents([text]) return resp[0] def embed_documents(self, texts: List[str]) -> List[List[float]]: """ Embeds a list of text documents using the AutoVOT algorithm. Args: texts (List[str]): A list of text documents to embed. Returns: List[List[float]]: A list of embeddings for each document in the input list. Each embedding is represented as a list of float values. """ text_in_chunks = [ texts[i : i + self.chunk_size] for i in range(0, len(texts), self.chunk_size) ] lst = [] for chunk in text_in_chunks: resp = self.client.do(texts=chunk) lst.extend([res["embedding"] for res in resp["data"]]) return lst async def aembed_query(self, text: str) -> List[float]: embeddings = await self.aembed_documents([text]) return embeddings[0] async def aembed_documents(self, texts: List[str]) -> List[List[float]]: text_in_chunks = [ texts[i : i + self.chunk_size] for i in range(0, len(texts), self.chunk_size) ] lst = [] for chunk in text_in_chunks: resp = await self.client.ado(texts=chunk) for res in resp["data"]: lst.extend([res["embedding"]]) return lst
[]
2024-01-10
robocorp/langchain
libs~langchain~tests~unit_tests~callbacks~test_manager.py
from langchain.callbacks.manager import __all__ EXPECTED_ALL = [ "BaseRunManager", "RunManager", "ParentRunManager", "AsyncRunManager", "AsyncParentRunManager", "CallbackManagerForLLMRun", "AsyncCallbackManagerForLLMRun", "CallbackManagerForChainRun", "AsyncCallbackManagerForChainRun", "CallbackManagerForToolRun", "AsyncCallbackManagerForToolRun", "CallbackManagerForRetrieverRun", "AsyncCallbackManagerForRetrieverRun", "CallbackManager", "CallbackManagerForChainGroup", "AsyncCallbackManager", "AsyncCallbackManagerForChainGroup", "tracing_v2_enabled", "collect_runs", "atrace_as_chain_group", "trace_as_chain_group", "handle_event", "ahandle_event", "env_var_is_set", "Callbacks", "get_openai_callback", "wandb_tracing_enabled", ] def test_all_imports() -> None: assert set(__all__) == set(EXPECTED_ALL)
[]
2024-01-10
robocorp/langchain
libs~langchain~langchain~agents~agent_toolkits~vectorstore~toolkit.py
"""Toolkit for interacting with a vector store.""" from typing import List from langchain_core.language_models import BaseLanguageModel from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.vectorstores import VectorStore from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.llms.openai import OpenAI from langchain.tools import BaseTool from langchain.tools.vectorstore.tool import ( VectorStoreQATool, VectorStoreQAWithSourcesTool, ) class VectorStoreInfo(BaseModel): """Information about a VectorStore.""" vectorstore: VectorStore = Field(exclude=True) name: str description: str class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True class VectorStoreToolkit(BaseToolkit): """Toolkit for interacting with a Vector Store.""" vectorstore_info: VectorStoreInfo = Field(exclude=True) llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0)) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" description = VectorStoreQATool.get_description( self.vectorstore_info.name, self.vectorstore_info.description ) qa_tool = VectorStoreQATool( name=self.vectorstore_info.name, description=description, vectorstore=self.vectorstore_info.vectorstore, llm=self.llm, ) description = VectorStoreQAWithSourcesTool.get_description( self.vectorstore_info.name, self.vectorstore_info.description ) qa_with_sources_tool = VectorStoreQAWithSourcesTool( name=f"{self.vectorstore_info.name}_with_sources", description=description, vectorstore=self.vectorstore_info.vectorstore, llm=self.llm, ) return [qa_tool, qa_with_sources_tool] class VectorStoreRouterToolkit(BaseToolkit): """Toolkit for routing between Vector Stores.""" vectorstores: List[VectorStoreInfo] = Field(exclude=True) llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0)) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" tools: List[BaseTool] = [] for vectorstore_info in self.vectorstores: description = VectorStoreQATool.get_description( vectorstore_info.name, vectorstore_info.description ) qa_tool = VectorStoreQATool( name=vectorstore_info.name, description=description, vectorstore=vectorstore_info.vectorstore, llm=self.llm, ) tools.append(qa_tool) return tools
[]
2024-01-10
robocorp/langchain
templates~cohere-librarian~cohere_librarian~router.py
from langchain.prompts import ChatPromptTemplate from langchain.schema.output_parser import StrOutputParser from langchain.schema.runnable import RunnableBranch from .blurb_matcher import book_rec_chain from .chat import chat from .library_info import library_info from .rag import librarian_rag chain = ( ChatPromptTemplate.from_template( """Given the user message below, classify it as either being about `recommendation`, `library` or `other`. '{message}' Respond with just one word. For example, if the message is about a book recommendation,respond with `recommendation`. """ ) | chat | StrOutputParser() ) def extract_op_field(x): return x["output_text"] branch = RunnableBranch( ( lambda x: "recommendation" in x["topic"].lower(), book_rec_chain | extract_op_field, ), ( lambda x: "library" in x["topic"].lower(), {"message": lambda x: x["message"]} | library_info, ), librarian_rag, ) branched_chain = {"topic": chain, "message": lambda x: x["message"]} | branch
[ "Given the user message below,\nclassify it as either being about `recommendation`, `library` or `other`.\n\n'{message}'\n\nRespond with just one word.\nFor example, if the message is about a book recommendation,respond with \n`recommendation`.\n" ]
2024-01-10
robocorp/langchain
templates~cohere-librarian~cohere_librarian~rag.py
from langchain.chat_models import ChatCohere from langchain.retrievers import CohereRagRetriever rag = CohereRagRetriever(llm=ChatCohere()) def get_docs_message(message): docs = rag.get_relevant_documents(message) message_doc = next( (x for x in docs if x.metadata.get("type") == "model_response"), None ) return message_doc.page_content def librarian_rag(x): return get_docs_message(x["message"])
[]
2024-01-10
robocorp/langchain
libs~core~langchain_core~caches.py
from __future__ import annotations from abc import ABC, abstractmethod from typing import Any, Optional, Sequence from langchain_core.outputs import Generation RETURN_VAL_TYPE = Sequence[Generation] class BaseCache(ABC): """Base interface for cache.""" @abstractmethod def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" @abstractmethod def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" @abstractmethod def clear(self, **kwargs: Any) -> None: """Clear cache that can take additional keyword arguments."""
[]
2024-01-10
robocorp/langchain
libs~langchain~langchain~evaluation~agents~trajectory_eval_chain.py
"""A chain for evaluating ReAct style agents. This chain is used to evaluate ReAct style agents by reasoning about the sequence of actions taken and their outcomes. It uses a language model chain (LLMChain) to generate the reasoning and scores. """ import re from typing import ( Any, Dict, List, Optional, Sequence, Tuple, TypedDict, Union, cast, ) from langchain_core.agents import AgentAction from langchain_core.exceptions import OutputParserException from langchain_core.language_models import BaseLanguageModel from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.output_parsers import BaseOutputParser from langchain_core.pydantic_v1 import Extra, Field from langchain_core.tools import BaseTool from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, Callbacks, ) from langchain.chains.llm import LLMChain from langchain.evaluation.agents.trajectory_eval_prompt import ( EVAL_CHAT_PROMPT, TOOL_FREE_EVAL_CHAT_PROMPT, ) from langchain.evaluation.schema import AgentTrajectoryEvaluator, LLMEvalChain class TrajectoryEval(TypedDict): """A named tuple containing the score and reasoning for a trajectory.""" score: float """The score for the trajectory, normalized from 0 to 1.""" reasoning: str """The reasoning for the score.""" class TrajectoryOutputParser(BaseOutputParser): """Trajectory output parser.""" @property def _type(self) -> str: return "agent_trajectory" def parse(self, text: str) -> TrajectoryEval: """Parse the output text and extract the score and reasoning. Args: text (str): The output text to parse. Returns: TrajectoryEval: A named tuple containing the normalized score and reasoning. Raises: OutputParserException: If the score is not found in the output text or if the LLM's score is not a digit in the range 1-5. """ if "Score:" not in text: raise OutputParserException( f"Could not find score in model eval output: {text}" ) reasoning, score_str = text.split("Score: ", maxsplit=1) reasoning, score_str = reasoning.strip(), score_str.strip() # Use regex to extract the score. # This will get the number in the string, even if it is a float or more than 10. # E.g. "Score: 1" will return 1, "Score: 3.5" will return 3.5, and # "Score: 10" will return 10. # The score should be an integer digit in the range 1-5. _score = re.search(r"(\d+(\.\d+)?)", score_str) # If the score is not found or is a float, raise an exception. if _score is None or "." in _score.group(1): raise OutputParserException( f"Score is not an integer digit in the range 1-5: {text}" ) score = int(_score.group(1)) # If the score is not in the range 1-5, raise an exception. if not 1 <= score <= 5: raise OutputParserException( f"Score is not a digit in the range 1-5: {text}" ) normalized_score = (score - 1) / 4 return TrajectoryEval(score=normalized_score, reasoning=reasoning) class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain): """A chain for evaluating ReAct style agents. This chain is used to evaluate ReAct style agents by reasoning about the sequence of actions taken and their outcomes. Example: .. code-block:: python from langchain.agents import AgentType, initialize_agent from langchain.chat_models import ChatOpenAI from langchain.evaluation import TrajectoryEvalChain from langchain.tools import tool @tool def geography_answers(country: str, question: str) -> str: \"\"\"Very helpful answers to geography questions.\"\"\" return f"{country}? IDK - We may never know {question}." llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) agent = initialize_agent( tools=[geography_answers], llm=llm, agent=AgentType.OPENAI_FUNCTIONS, return_intermediate_steps=True, ) question = "How many dwell in the largest minor region in Argentina?" response = agent(question) eval_chain = TrajectoryEvalChain.from_llm( llm=llm, agent_tools=[geography_answers], return_reasoning=True ) result = eval_chain.evaluate_agent_trajectory( input=question, agent_trajectory=response["intermediate_steps"], prediction=response["output"], reference="Paris", ) print(result["score"]) # 0 """ # noqa: E501 agent_tools: Optional[List[BaseTool]] = None """A list of tools available to the agent.""" eval_chain: LLMChain """The language model chain used for evaluation.""" output_parser: TrajectoryOutputParser = Field( default_factory=TrajectoryOutputParser ) """The output parser used to parse the output.""" return_reasoning: bool = False # :meta private: """DEPRECATED. Reasoning always returned.""" class Config: """Configuration for the QAEvalChain.""" extra = Extra.ignore @property def requires_reference(self) -> bool: """Whether this evaluator requires a reference label.""" return False @property def _tools_description(self) -> str: """Get the description of the agent tools. Returns: str: The description of the agent tools. """ if self.agent_tools is None: return "" return "\n\n".join( [ f"""Tool {i}: {tool.name} Description: {tool.description}""" for i, tool in enumerate(self.agent_tools, 1) ] ) @staticmethod def get_agent_trajectory( steps: Union[str, Sequence[Tuple[AgentAction, str]]], ) -> str: """Get the agent trajectory as a formatted string. Args: steps (Union[str, List[Tuple[AgentAction, str]]]): The agent trajectory. Returns: str: The formatted agent trajectory. """ if isinstance(steps, str): return steps return "\n\n".join( [ f"""Step {i}: Tool used: {action.tool} Tool input: {action.tool_input} Tool output: {output}""" for i, (action, output) in enumerate(steps, 1) ] ) @staticmethod def _format_reference(reference: Optional[str]) -> str: """Format the reference text. Args: reference (str): The reference text. Returns: str: The formatted reference text. """ if not reference: return "" return f""" The following is the expected answer. Use this to measure correctness: [GROUND_TRUTH] {reference} [END_GROUND_TRUTH] """ @classmethod def from_llm( cls, llm: BaseLanguageModel, agent_tools: Optional[Sequence[BaseTool]] = None, output_parser: Optional[TrajectoryOutputParser] = None, **kwargs: Any, ) -> "TrajectoryEvalChain": """Create a TrajectoryEvalChain object from a language model chain. Args: llm (BaseChatModel): The language model chain. agent_tools (Optional[Sequence[BaseTool]]): A list of tools available to the agent. output_parser (Optional[TrajectoryOutputParser]): The output parser used to parse the chain output into a score. Returns: TrajectoryEvalChain: The TrajectoryEvalChain object. """ if not isinstance(llm, BaseChatModel): raise NotImplementedError( "Only chat models supported by the current trajectory eval" ) if agent_tools: prompt = EVAL_CHAT_PROMPT else: prompt = TOOL_FREE_EVAL_CHAT_PROMPT eval_chain = LLMChain(llm=llm, prompt=prompt) return cls( agent_tools=agent_tools, eval_chain=eval_chain, output_parser=output_parser or TrajectoryOutputParser(), **kwargs, ) @property def input_keys(self) -> List[str]: """Get the input keys for the chain. Returns: List[str]: The input keys. """ return ["question", "agent_trajectory", "answer", "reference"] @property def output_keys(self) -> List[str]: """Get the output keys for the chain. Returns: List[str]: The output keys. """ return ["score", "reasoning"] def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]: """Validate and prep inputs.""" if "reference" not in inputs: inputs["reference"] = self._format_reference(inputs.get("reference")) return super().prep_inputs(inputs) def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the chain and generate the output. Args: inputs (Dict[str, str]): The input values for the chain. run_manager (Optional[CallbackManagerForChainRun]): The callback manager for the chain run. Returns: Dict[str, Any]: The output values of the chain. """ chain_input = {**inputs} if self.agent_tools: chain_input["tool_descriptions"] = self._tools_description _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() raw_output = self.eval_chain.run( chain_input, callbacks=_run_manager.get_child() ) return cast(dict, self.output_parser.parse(raw_output)) async def _acall( self, inputs: Dict[str, str], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the chain and generate the output. Args: inputs (Dict[str, str]): The input values for the chain. run_manager (Optional[CallbackManagerForChainRun]): The callback manager for the chain run. Returns: Dict[str, Any]: The output values of the chain. """ chain_input = {**inputs} if self.agent_tools: chain_input["tool_descriptions"] = self._tools_description _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() raw_output = await self.eval_chain.arun( chain_input, callbacks=_run_manager.get_child() ) return cast(dict, self.output_parser.parse(raw_output)) def _evaluate_agent_trajectory( self, *, prediction: str, input: str, agent_trajectory: Sequence[Tuple[AgentAction, str]], reference: Optional[str] = None, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Evaluate a trajectory. Args: prediction (str): The final predicted response. input (str): The input to the agent. agent_trajectory (List[Tuple[AgentAction, str]]): The intermediate steps forming the agent trajectory. reference (Optional[str]): The reference answer. callbacks (Callbacks): Callbacks to use for this chain run. Returns: dict: The evaluation result, which includes the score and optionally the reasoning for reaching that. """ inputs = { "question": input, "agent_trajectory": self.get_agent_trajectory(agent_trajectory), "answer": prediction, "reference": reference, } return self.__call__( inputs=inputs, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info, return_only_outputs=True, ) async def _aevaluate_agent_trajectory( self, *, prediction: str, input: str, agent_trajectory: Sequence[Tuple[AgentAction, str]], reference: Optional[str] = None, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Asynchronously evaluate a trajectory. Args: prediction (str): The final predicted response. input (str): The input to the agent. agent_trajectory (List[Tuple[AgentAction, str]]): The intermediate steps forming the agent trajectory. reference (Optional[str]): The reference answer. callbacks (Callbacks): Callbacks to use for this chain run. Returns: dict: The evaluation result, which includes the score and optionally the reasoning for reaching that. """ inputs = { "question": input, "agent_trajectory": self.get_agent_trajectory(agent_trajectory), "answer": prediction, "reference": reference, } return await self.acall( inputs=inputs, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info, return_only_outputs=True, )
[]
2024-01-10
robocorp/langchain
libs~langchain~tests~unit_tests~output_parsers~test_yaml_parser.py
"""Test yamlOutputParser""" from enum import Enum from typing import Optional from langchain_core.exceptions import OutputParserException from langchain_core.pydantic_v1 import BaseModel, Field from langchain.output_parsers.yaml import YamlOutputParser class Actions(Enum): SEARCH = "Search" CREATE = "Create" UPDATE = "Update" DELETE = "Delete" class TestModel(BaseModel): action: Actions = Field(description="Action to be performed") action_input: str = Field(description="Input to be used in the action") additional_fields: Optional[str] = Field( description="Additional fields", default=None ) for_new_lines: str = Field(description="To be used to test newlines") # Prevent pytest from trying to run tests on TestModel TestModel.__test__ = False # type: ignore[attr-defined] DEF_RESULT = """```yaml --- action: Update action_input: The yamlOutputParser class is powerful additional_fields: null for_new_lines: | not_escape_newline: escape_newline: ```""" # action 'update' with a lowercase 'u' to test schema validation failure. DEF_RESULT_FAIL = """```yaml action: update action_input: The yamlOutputParser class is powerful additional_fields: null ```""" DEF_EXPECTED_RESULT = TestModel( action=Actions.UPDATE, action_input="The yamlOutputParser class is powerful", additional_fields=None, for_new_lines="not_escape_newline:\n escape_newline: \n", ) def test_yaml_output_parser() -> None: """Test yamlOutputParser.""" yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser( pydantic_object=TestModel ) result = yaml_parser.parse(DEF_RESULT) print("parse_result:", result) assert DEF_EXPECTED_RESULT == result def test_yaml_output_parser_fail() -> None: """Test YamlOutputParser where completion result fails schema validation.""" yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser( pydantic_object=TestModel ) try: yaml_parser.parse(DEF_RESULT_FAIL) except OutputParserException as e: print("parse_result:", e) assert "Failed to parse TestModel from completion" in str(e) else: assert False, "Expected OutputParserException"
[]
2024-01-10
robocorp/langchain
libs~community~langchain_community~llms~aphrodite.py
from typing import Any, Dict, List, Optional from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.language_models import BaseLLM from langchain_core.outputs import Generation, LLMResult from langchain_core.pydantic_v1 import Field, root_validator class Aphrodite(BaseLLM): """Aphrodite language model.""" model: str = "" """The name or path of a HuggingFace Transformers model.""" tensor_parallel_size: Optional[int] = 1 """The number of GPUs to use for distributed execution with tensor parallelism.""" trust_remote_code: Optional[bool] = False """Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer.""" n: int = 1 """Number of output sequences to return for the given prompt.""" best_of: Optional[int] = None """Number of output sequences that are generated from the prompt. From these `best_of` sequences, the top `n` sequences are returned. `best_of` must be >= `n`. This is treated as the beam width when `use_beam_search` is True. By default, `best_of` is set to `n`.""" presence_penalty: float = 0.0 """Float that penalizes new tokens based on whether they appear in the generated text so far. Values > 0 encourage the model to generate new tokens, while values < 0 encourage the model to repeat tokens.""" frequency_penalty: float = 0.0 """Float that penalizes new tokens based on their frequency in the generated text so far. Applied additively to the logits.""" repetition_penalty: float = 1.0 """Float that penalizes new tokens based on their frequency in the generated text so far. Applied multiplicatively to the logits.""" temperature: float = 1.0 """Float that controls the randomness of the sampling. Lower values make the model more deterministic, while higher values make the model more random. Zero is equivalent to greedy sampling.""" top_p: float = 1.0 """Float that controls the cumulative probability of the top tokens to consider. Must be in (0, 1]. Set to 1.0 to consider all tokens.""" top_k: int = -1 """Integer that controls the number of top tokens to consider. Set to -1 to consider all tokens (disabled).""" top_a: float = 0.0 """Float that controls the cutoff for Top-A sampling. Exact cutoff is top_a*max_prob**2. Must be in [0,inf], 0 to disable.""" min_p: float = 0.0 """Float that controls the cutoff for min-p sampling. Exact cutoff is min_p*max_prob. Must be in [0,1], 0 to disable.""" tfs: float = 1.0 """Float that controls the cumulative approximate curvature of the distribution to retain for Tail Free Sampling. Must be in (0, 1]. Set to 1.0 to disable.""" eta_cutoff: float = 0.0 """Float that controls the cutoff threshold for Eta sampling (a form of entropy adaptive truncation sampling). Threshold is calculated as `min(eta, sqrt(eta)*entropy(probs)). Specified in units of 1e-4. Set to 0 to disable.""" epsilon_cutoff: float = 0.0 """Float that controls the cutoff threshold for Epsilon sampling (simple probability threshold truncation). Specified in units of 1e-4. Set to 0 to disable.""" typical_p: float = 1.0 """Float that controls the cumulative probability of tokens closest in surprise to the expected surprise to consider. Must be in (0, 1]. Set to 1 to disable.""" mirostat_mode: int = 0 """The mirostat mode to use. 0 for no mirostat, 2 for mirostat v2. Mode 1 is not supported.""" mirostat_tau: float = 0.0 """The target 'surprisal' that mirostat works towards. Range [0, inf).""" use_beam_search: bool = False """Whether to use beam search instead of sampling.""" length_penalty: float = 1.0 """Float that penalizes sequences based on their length. Used only when `use_beam_search` is True.""" early_stopping: bool = False """Controls the stopping condition for beam search. It accepts the following values: `True`, where the generation stops as soon as there are `best_of` complete candidates; `False`, where a heuristic is applied to the generation stops when it is very unlikely to find better candidates; `never`, where the beam search procedure only stops where there cannot be better candidates (canonical beam search algorithm).""" stop: Optional[List[str]] = None """List of strings that stop the generation when they are generated. The returned output will not contain the stop tokens.""" stop_token_ids: Optional[List[int]] = None """List of tokens that stop the generation when they are generated. The returned output will contain the stop tokens unless the stop tokens are special tokens.""" ignore_eos: bool = False """Whether to ignore the EOS token and continue generating tokens after the EOS token is generated.""" max_tokens: int = 512 """Maximum number of tokens to generate per output sequence.""" logprobs: Optional[int] = None """Number of log probabilities to return per output token.""" prompt_logprobs: Optional[int] = None """Number of log probabilities to return per prompt token.""" custom_token_bans: Optional[List[int]] = None """List of token IDs to ban from generating.""" skip_special_tokens: bool = True """Whether to skip special tokens in the output. Defaults to True.""" spaces_between_special_tokens: bool = True """Whether to add spaces between special tokens in the output. Defaults to True.""" logit_bias: Optional[Dict[str, float]] = None """List of LogitsProcessors to change the probability of token prediction at runtime.""" dtype: str = "auto" """The data type for the model weights and activations.""" download_dir: Optional[str] = None """Directory to download and load the weights. (Default to the default cache dir of huggingface)""" quantization: Optional[str] = None """Quantization mode to use. Can be one of `awq` or `gptq`.""" aphrodite_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `aphrodite.LLM` call not explicitly specified.""" client: Any #: :meta private: @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that python package exists in environment.""" try: from aphrodite import LLM as AphroditeModel except ImportError: raise ImportError( "Could not import aphrodite-engine python package. " "Please install it with `pip install aphrodite-engine`." ) # aphrodite_kwargs = values["aphrodite_kwargs"] # if values.get("quantization"): # aphrodite_kwargs["quantization"] = values["quantization"] values["client"] = AphroditeModel( model=values["model"], tensor_parallel_size=values["tensor_parallel_size"], trust_remote_code=values["trust_remote_code"], dtype=values["dtype"], download_dir=values["download_dir"], **values["aphrodite_kwargs"], ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling aphrodite.""" return { "n": self.n, "best_of": self.best_of, "max_tokens": self.max_tokens, "top_k": self.top_k, "top_p": self.top_p, "top_a": self.top_a, "min_p": self.min_p, "temperature": self.temperature, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "repetition_penalty": self.repetition_penalty, "tfs": self.tfs, "eta_cutoff": self.eta_cutoff, "epsilon_cutoff": self.epsilon_cutoff, "typical_p": self.typical_p, "mirostat_mode": self.mirostat_mode, "mirostat_tau": self.mirostat_tau, "length_penalty": self.length_penalty, "early_stopping": self.early_stopping, "use_beam_search": self.use_beam_search, "stop": self.stop, "ignore_eos": self.ignore_eos, "logprobs": self.logprobs, "prompt_logprobs": self.prompt_logprobs, "custom_token_bans": self.custom_token_bans, "skip_special_tokens": self.skip_special_tokens, "spaces_between_special_tokens": self.spaces_between_special_tokens, "logit_bias": self.logit_bias, } def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" from aphrodite import SamplingParams # build sampling parameters params = {**self._default_params, **kwargs, "stop": stop} if "logit_bias" in params: del params["logit_bias"] sampling_params = SamplingParams(**params) # call the model outputs = self.client.generate(prompts, sampling_params) generations = [] for output in outputs: text = output.outputs[0].text generations.append([Generation(text=text)]) return LLMResult(generations=generations) @property def _llm_type(self) -> str: """Return type of llm.""" return "aphrodite"
[]