code
stringlengths
141
78.9k
apis
sequencelengths
1
23
extract_api
stringlengths
142
73.2k
from langchain.agents import load_tools from langchain.tools import AIPluginTool from parse import * from langchain.chat_models.base import BaseChatModel from langchain.chat_models import ChatOpenAI, AzureChatOpenAI import utils def create_plugins_static(): plugins = [ AIPluginTool.from_plugin_url( "https://www.klarna.com/.well-known/ai-plugin.json" ) ] plugins += load_tools(["requests_all"]) return plugins def create_chat_model(openai_config: utils.OpenAIConfig) -> BaseChatModel: if openai_config.is_azure_openai(): return AzureChatOpenAI( temperature=0, openai_api_base=openai_config.AZURE_OPENAI_API_ENDPOINT, openai_api_version=openai_config.AZURE_OPENAI_API_VERSION if openai_config.AZURE_OPENAI_API_VERSION else "2023-03-15-preview", deployment_name=openai_config.AZURE_OPENAI_API_DEPLOYMENT_NAME, openai_api_key=openai_config.OPENAI_API_KEY, openai_api_type=openai_config.OPENAI_API_TYPE, ) else: return ChatOpenAI( temperature=0, openai_api_key=openai_config.OPENAI_API_KEY, openai_organization=openai_config.OPENAI_ORG_ID, model_name=openai_config.OPENAI_MODEL_ID, )
[ "langchain.tools.AIPluginTool.from_plugin_url", "langchain.chat_models.AzureChatOpenAI", "langchain.agents.load_tools", "langchain.chat_models.ChatOpenAI" ]
[((410, 438), 'langchain.agents.load_tools', 'load_tools', (["['requests_all']"], {}), "(['requests_all'])\n", (420, 438), False, 'from langchain.agents import load_tools\n'), ((285, 371), 'langchain.tools.AIPluginTool.from_plugin_url', 'AIPluginTool.from_plugin_url', (['"""https://www.klarna.com/.well-known/ai-plugin.json"""'], {}), "(\n 'https://www.klarna.com/.well-known/ai-plugin.json')\n", (313, 371), False, 'from langchain.tools import AIPluginTool\n'), ((590, 984), 'langchain.chat_models.AzureChatOpenAI', 'AzureChatOpenAI', ([], {'temperature': '(0)', 'openai_api_base': 'openai_config.AZURE_OPENAI_API_ENDPOINT', 'openai_api_version': "(openai_config.AZURE_OPENAI_API_VERSION if openai_config.\n AZURE_OPENAI_API_VERSION else '2023-03-15-preview')", 'deployment_name': 'openai_config.AZURE_OPENAI_API_DEPLOYMENT_NAME', 'openai_api_key': 'openai_config.OPENAI_API_KEY', 'openai_api_type': 'openai_config.OPENAI_API_TYPE'}), "(temperature=0, openai_api_base=openai_config.\n AZURE_OPENAI_API_ENDPOINT, openai_api_version=openai_config.\n AZURE_OPENAI_API_VERSION if openai_config.AZURE_OPENAI_API_VERSION else\n '2023-03-15-preview', deployment_name=openai_config.\n AZURE_OPENAI_API_DEPLOYMENT_NAME, openai_api_key=openai_config.\n OPENAI_API_KEY, openai_api_type=openai_config.OPENAI_API_TYPE)\n", (605, 984), False, 'from langchain.chat_models import ChatOpenAI, AzureChatOpenAI\n'), ((1093, 1263), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'openai_api_key': 'openai_config.OPENAI_API_KEY', 'openai_organization': 'openai_config.OPENAI_ORG_ID', 'model_name': 'openai_config.OPENAI_MODEL_ID'}), '(temperature=0, openai_api_key=openai_config.OPENAI_API_KEY,\n openai_organization=openai_config.OPENAI_ORG_ID, model_name=\n openai_config.OPENAI_MODEL_ID)\n', (1103, 1263), False, 'from langchain.chat_models import ChatOpenAI, AzureChatOpenAI\n')]
import re import string from collections import Counter import numpy as np import pandas as pd import tqdm from langchain.evaluation.qa import QAEvalChain from langchain.llms import OpenAI from algos.PWS import PWS_Base, PWS_Extra from algos.notool import CoT, IO from algos.react import ReactBase def normalize_answer(s): def remove_articles(text): return re.sub(r"\b(a|an|the)\b", " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def f1_score(prediction, ground_truth): normalized_prediction = normalize_answer(prediction) normalized_ground_truth = normalize_answer(ground_truth) if normalized_prediction in ['yes', 'no', 'noanswer'] and normalized_prediction != normalized_ground_truth: return 0 if normalized_ground_truth in ['yes', 'no', 'noanswer'] and normalized_prediction != normalized_ground_truth: return 0 prediction_tokens = normalized_prediction.split() ground_truth_tokens = normalized_ground_truth.split() common = Counter(prediction_tokens) & Counter(ground_truth_tokens) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(ground_truth_tokens) f1 = (2 * precision * recall) / (precision + recall) return f1 def llm_accuracy_score(query, prediction, ground_truth): data = [{ 'query': query, 'answer': ground_truth, }] pred = [{ 'query': query, 'answer': ground_truth, 'result': prediction, }] eval_chain = QAEvalChain.from_llm(OpenAI(temperature=0)) graded_outputs = eval_chain.evaluate(data, pred) return 1 if graded_outputs[0]['text'].strip() == 'CORRECT' else 0 class Evaluator: def __init__(self, task, dataset, algo, maxtry=3): assert task in ["hotpot_qa", "trivia_qa", "gsm8k", "physics_question", "disfl_qa", "sports_understanding", "strategy_qa", "sotu_qa"] assert isinstance(dataset, pd.DataFrame) assert isinstance(algo, (PWS_Base, PWS_Extra, ReactBase, IO, CoT)) self.task = task self.dataset = dataset self.algo = algo self.maxtry = maxtry self.failed_response = self._failed_response() self.eval_data = self._initialize_eval_dict() def run(self): print("\n******************* Start Evaluation *******************\n") if self.task in ["hotpot_qa", "sotu_qa"]: for i in tqdm.tqdm(range(len(self.dataset))): question = self.dataset["question"][i] label = self.dataset["answer"][i] for _ in range(self.maxtry): try: response = self.algo.run(question) break except: response = self.failed_response self._update_eval_dict(question, label, response) elif self.task == "fever": for i in tqdm.tqdm(range(len(self.dataset))): question = self.dataset["claim"][i] label = self.dataset["label"][i] for _ in range(self.maxtry): try: response = self.algo.run(question) break except: response = self.failed_response self._update_eval_dict(question, label, response) elif self.task == "trivia_qa": for i in tqdm.tqdm(range(len(self.dataset))): question = self.dataset["question"][i] label = self.dataset["answer"][i]["value"] for _ in range(self.maxtry): try: response = self.algo.run(question) break except: response = self.failed_response self._update_eval_dict(question, label, response) elif self.task == "gsm8k": for i in tqdm.tqdm(range(len(self.dataset))): question = self.dataset["question"][i] label = self.dataset["answer"][i].split("#### ")[1] for _ in range(self.maxtry): try: response = self.algo.run(question) break except: response = self.failed_response self._update_eval_dict(question, label, response) elif self.task in ["physics_question", "sports_understanding", "strategy_qa"]: for i in tqdm.tqdm(range(len(self.dataset))): question = self.dataset["input"][i] label = self.dataset["target"][i] for _ in range(self.maxtry): try: response = self.algo.run(question) break except: response = self.failed_response self._update_eval_dict(question, label, response) else: raise NotImplementedError return self._get_avg_results(), self.eval_data def _initialize_eval_dict(self): data = {} for d in ["label", "preds", "em", "f1", "acc", "wall_time", "total_tokens", "total_cost", "steps", "token_cost", "tool_cost", "planner_log", "solver_log"]: data[d] = [] return data def _update_eval_dict(self, question, label, response): pred = self._parse_prediction(response["output"]) self.eval_data["label"] += [label] self.eval_data["preds"] += [pred] self.eval_data["em"] += [self.get_metrics(question, label, pred)["em"]] self.eval_data["f1"] += [self.get_metrics(question, label, pred)["f1"]] self.eval_data["acc"] += [self.get_metrics(question, label, pred)["acc"]] self.eval_data["wall_time"] += [response["wall_time"]] self.eval_data["total_tokens"] += [response["total_tokens"]] self.eval_data["total_cost"] += [response["total_cost"]] self.eval_data["steps"] += [response["steps"]] self.eval_data["token_cost"] += [response["token_cost"]] self.eval_data["tool_cost"] += [response["tool_cost"]] if "planner_log" in response: self.eval_data["planner_log"] += [response["planner_log"]] if "solver_log" in response: self.eval_data["solver_log"] += [response["solver_log"]] def _get_avg_results(self): result = {} result["avg_em"] = np.nanmean(self.eval_data["em"]) result["avg_f1"] = np.nanmean(self.eval_data["f1"]) result["avg_acc"] = np.nanmean(self.eval_data["acc"]) result["avg_wall_time"] = np.nanmean(self.eval_data["wall_time"]) result["avg_total_tokens"] = np.nanmean(self.eval_data["total_tokens"]) result["avg_total_cost"] = np.nanmean(self.eval_data["total_cost"]) result["avg_steps"] = np.nanmean(self.eval_data["steps"]) result["avg_token_cost"] = np.nanmean(self.eval_data["token_cost"]) result["avg_tool_cost"] = np.nanmean(self.eval_data["tool_cost"]) return result def get_metrics(self, query, label, pred): if pred is None: return {'em': 0, 'f1': 0} norm_label = normalize_answer(label) norm_pred = normalize_answer(pred) em = (norm_pred == norm_label) f1 = f1_score(norm_pred, norm_label) acc = llm_accuracy_score(query, pred, label) return {'em': em, 'f1': f1, 'acc': acc} def _parse_prediction(self, output): if isinstance(self.algo, IO): return str(output).strip("\n") elif isinstance(self.algo, CoT): return str(output).split("\n")[-1].replace("Answer:", "") elif isinstance(self.algo, ReactBase): return str(output).strip("\n") elif isinstance(self.algo, PWS_Base): return str(output).strip("\n") elif isinstance(self.algo, PWS_Extra): return str(output).strip("\n") def _failed_response(self): resposne = {} for key in ["input", "output", "wall_time", "total_tokens", "total_cost", "steps", "token_cost", "tool_cost"]: resposne[key] = np.nan return resposne
[ "langchain.llms.OpenAI" ]
[((373, 410), 're.sub', 're.sub', (['"""\\\\b(a|an|the)\\\\b"""', '""" """', 'text'], {}), "('\\\\b(a|an|the)\\\\b', ' ', text)\n", (379, 410), False, 'import re\n'), ((1278, 1304), 'collections.Counter', 'Counter', (['prediction_tokens'], {}), '(prediction_tokens)\n', (1285, 1304), False, 'from collections import Counter\n'), ((1307, 1335), 'collections.Counter', 'Counter', (['ground_truth_tokens'], {}), '(ground_truth_tokens)\n', (1314, 1335), False, 'from collections import Counter\n'), ((1874, 1895), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (1880, 1895), False, 'from langchain.llms import OpenAI\n'), ((6841, 6873), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['em']"], {}), "(self.eval_data['em'])\n", (6851, 6873), True, 'import numpy as np\n'), ((6901, 6933), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['f1']"], {}), "(self.eval_data['f1'])\n", (6911, 6933), True, 'import numpy as np\n'), ((6962, 6995), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['acc']"], {}), "(self.eval_data['acc'])\n", (6972, 6995), True, 'import numpy as np\n'), ((7030, 7069), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['wall_time']"], {}), "(self.eval_data['wall_time'])\n", (7040, 7069), True, 'import numpy as np\n'), ((7107, 7149), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['total_tokens']"], {}), "(self.eval_data['total_tokens'])\n", (7117, 7149), True, 'import numpy as np\n'), ((7185, 7225), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['total_cost']"], {}), "(self.eval_data['total_cost'])\n", (7195, 7225), True, 'import numpy as np\n'), ((7256, 7291), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['steps']"], {}), "(self.eval_data['steps'])\n", (7266, 7291), True, 'import numpy as np\n'), ((7327, 7367), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['token_cost']"], {}), "(self.eval_data['token_cost'])\n", (7337, 7367), True, 'import numpy as np\n'), ((7402, 7441), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['tool_cost']"], {}), "(self.eval_data['tool_cost'])\n", (7412, 7441), True, 'import numpy as np\n')]
from datetime import date, datetime from decimal import Decimal from langchain.chains import LLMChain from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, ) from sqlalchemy import text from dataherald.model.chat_model import ChatModel from dataherald.repositories.database_connections import DatabaseConnectionRepository from dataherald.repositories.prompts import PromptRepository from dataherald.sql_database.base import SQLDatabase, SQLInjectionError from dataherald.types import LLMConfig, NLGeneration, SQLGeneration HUMAN_TEMPLATE = """Given a Question, a Sql query and the sql query result try to answer the question If the sql query result doesn't answer the question just say 'I don't know' Answer the question given the sql query and the sql query result. Question: {prompt} SQL query: {sql_query} SQL query result: {sql_query_result} """ class GeneratesNlAnswer: def __init__(self, system, storage, llm_config: LLMConfig): self.system = system self.storage = storage self.llm_config = llm_config self.model = ChatModel(self.system) def execute( self, sql_generation: SQLGeneration, top_k: int = 100, ) -> NLGeneration: prompt_repository = PromptRepository(self.storage) prompt = prompt_repository.find_by_id(sql_generation.prompt_id) db_connection_repository = DatabaseConnectionRepository(self.storage) database_connection = db_connection_repository.find_by_id( prompt.db_connection_id ) self.llm = self.model.get_model( database_connection=database_connection, temperature=0, model_name=self.llm_config.llm_name, api_base=self.llm_config.api_base, ) database = SQLDatabase.get_sql_engine(database_connection, True) if sql_generation.status == "INVALID": return NLGeneration( sql_generation_id=sql_generation.id, text="I don't know, the SQL query is invalid.", created_at=datetime.now(), ) try: query = database.parser_to_filter_commands(sql_generation.sql) with database._engine.connect() as connection: execution = connection.execute(text(query)) result = execution.fetchmany(top_k) rows = [] for row in result: modified_row = {} for key, value in zip(row.keys(), row, strict=True): if type(value) in [ date, datetime, ]: # Check if the value is an instance of datetime.date modified_row[key] = str(value) elif ( type(value) is Decimal ): # Check if the value is an instance of decimal.Decimal modified_row[key] = float(value) else: modified_row[key] = value rows.append(modified_row) except SQLInjectionError as e: raise SQLInjectionError( "Sensitive SQL keyword detected in the query." ) from e human_message_prompt = HumanMessagePromptTemplate.from_template(HUMAN_TEMPLATE) chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt]) chain = LLMChain(llm=self.llm, prompt=chat_prompt) nl_resp = chain.invoke( { "prompt": prompt.text, "sql_query": sql_generation.sql, "sql_query_result": "\n".join([str(row) for row in rows]), } ) return NLGeneration( sql_generation_id=sql_generation.id, llm_config=self.llm_config, text=nl_resp["text"], created_at=datetime.now(), )
[ "langchain.prompts.chat.HumanMessagePromptTemplate.from_template", "langchain.chains.LLMChain", "langchain.prompts.chat.ChatPromptTemplate.from_messages" ]
[((1101, 1123), 'dataherald.model.chat_model.ChatModel', 'ChatModel', (['self.system'], {}), '(self.system)\n', (1110, 1123), False, 'from dataherald.model.chat_model import ChatModel\n'), ((1272, 1302), 'dataherald.repositories.prompts.PromptRepository', 'PromptRepository', (['self.storage'], {}), '(self.storage)\n', (1288, 1302), False, 'from dataherald.repositories.prompts import PromptRepository\n'), ((1411, 1453), 'dataherald.repositories.database_connections.DatabaseConnectionRepository', 'DatabaseConnectionRepository', (['self.storage'], {}), '(self.storage)\n', (1439, 1453), False, 'from dataherald.repositories.database_connections import DatabaseConnectionRepository\n'), ((1813, 1866), 'dataherald.sql_database.base.SQLDatabase.get_sql_engine', 'SQLDatabase.get_sql_engine', (['database_connection', '(True)'], {}), '(database_connection, True)\n', (1839, 1866), False, 'from dataherald.sql_database.base import SQLDatabase, SQLInjectionError\n'), ((3295, 3351), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['HUMAN_TEMPLATE'], {}), '(HUMAN_TEMPLATE)\n', (3335, 3351), False, 'from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate\n'), ((3374, 3430), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[human_message_prompt]'], {}), '([human_message_prompt])\n', (3406, 3430), False, 'from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate\n'), ((3447, 3489), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'chat_prompt'}), '(llm=self.llm, prompt=chat_prompt)\n', (3455, 3489), False, 'from langchain.chains import LLMChain\n'), ((3160, 3225), 'dataherald.sql_database.base.SQLInjectionError', 'SQLInjectionError', (['"""Sensitive SQL keyword detected in the query."""'], {}), "('Sensitive SQL keyword detected in the query.')\n", (3177, 3225), False, 'from dataherald.sql_database.base import SQLDatabase, SQLInjectionError\n'), ((3898, 3912), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3910, 3912), False, 'from datetime import date, datetime\n'), ((2092, 2106), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2104, 2106), False, 'from datetime import date, datetime\n'), ((2317, 2328), 'sqlalchemy.text', 'text', (['query'], {}), '(query)\n', (2321, 2328), False, 'from sqlalchemy import text\n')]
import os import re import urllib import urllib.parse import urllib.request from typing import Any, List, Tuple, Union from urllib.parse import urlparse import requests from bs4 import BeautifulSoup from langchain.chains import LLMChain from langchain.prompts import Prompt from langchain.tools import BaseTool from langchain.utilities import GoogleSerperAPIWrapper from langchain.vectorstores.base import VectorStoreRetriever from loguru import logger from typing_extensions import Literal import sherpa_ai.config as cfg from sherpa_ai.config.task_config import AgentConfig from sherpa_ai.output_parser import TaskAction def get_tools(memory, config): tools = [] # tools.append(ContextTool(memory=memory)) tools.append(UserInputTool()) if cfg.SERPER_API_KEY is not None: search_tool = SearchTool(config=config) tools.append(search_tool) else: logger.warning( "No SERPER_API_KEY found in environment variables, skipping SearchTool" ) return tools class SearchArxivTool(BaseTool): name = "Arxiv Search" description = ( "Access all the papers from Arxiv to search for domain-specific scientific publication." # noqa: E501 "Only use this tool when you need information in the scientific paper." ) def _run(self, query: str) -> str: top_k = 10 logger.debug(f"Search query: {query}") query = urllib.parse.quote_plus(query) url = ( "http://export.arxiv.org/api/query?search_query=all:" + query.strip() + "&start=0&max_results=" + str(top_k) ) data = urllib.request.urlopen(url) xml_content = data.read().decode("utf-8") summary_pattern = r"<summary>(.*?)</summary>" summaries = re.findall(summary_pattern, xml_content, re.DOTALL) title_pattern = r"<title>(.*?)</title>" titles = re.findall(title_pattern, xml_content, re.DOTALL) result_list = [] for i in range(len(titles)): result_list.append( "Title: " + titles[i] + "\n" + "Summary: " + summaries[i] ) logger.debug(f"Arxiv Search Result: {result_list}") return " ".join(result_list) def _arun(self, query: str) -> str: raise NotImplementedError("SearchArxivTool does not support async run") class SearchTool(BaseTool): name = "Search" config = AgentConfig() top_k: int = 10 description = ( "Access the internet to search for the information. Only use this tool when " "you cannot find the information using internal search." ) def _run( self, query: str, require_meta=False ) -> Union[str, Tuple[str, List[dict]]]: result = "" if self.config.search_domains: query_list = [ query + " Site: " + str(i) for i in self.config.search_domains ] if len(query_list) >= 5: query_list = query_list[:5] result = ( result + "Warning: Only the first 5 URLs are taken into consideration.\n" ) # noqa: E501 else: query_list = [query] if self.config.invalid_domains: invalid_domain_string = ", ".join(self.config.invalid_domains) result = ( result + f"Warning: The doman {invalid_domain_string} is invalid and is not taken into consideration.\n" # noqa: E501 ) # noqa: E501 top_k = int(self.top_k / len(query_list)) if require_meta: meta = [] for query in query_list: cur_result = self._run_single_query(query, top_k, require_meta) if require_meta: result += "\n" + cur_result[0] meta.extend(cur_result[1]) else: result += "\n" + cur_result if require_meta: result = (result, meta) return result def _run_single_query( self, query: str, top_k: int, require_meta=False ) -> Union[str, Tuple[str, List[dict]]]: logger.debug(f"Search query: {query}") google_serper = GoogleSerperAPIWrapper() search_results = google_serper._google_serper_api_results(query) logger.debug(f"Google Search Result: {search_results}") # case 1: answerBox in the result dictionary if search_results.get("answerBox", False): answer_box = search_results.get("answerBox", {}) if answer_box.get("answer"): answer = answer_box.get("answer") elif answer_box.get("snippet"): answer = answer_box.get("snippet").replace("\n", " ") elif answer_box.get("snippetHighlighted"): answer = answer_box.get("snippetHighlighted") title = search_results["organic"][0]["title"] link = search_results["organic"][0]["link"] response = "Answer: " + answer meta = [{"Document": answer, "Source": link}] if require_meta: return response, meta else: return response + "\nLink:" + link # case 2: knowledgeGraph in the result dictionary snippets = [] if search_results.get("knowledgeGraph", False): kg = search_results.get("knowledgeGraph", {}) title = kg.get("title") entity_type = kg.get("type") if entity_type: snippets.append(f"{title}: {entity_type}.") description = kg.get("description") if description: snippets.append(description) for attribute, value in kg.get("attributes", {}).items(): snippets.append(f"{title} {attribute}: {value}.") search_type: Literal["news", "search", "places", "images"] = "search" result_key_for_type = { "news": "news", "places": "places", "images": "images", "search": "organic", } # case 3: general search results for result in search_results[result_key_for_type[search_type]][:top_k]: if "snippet" in result: snippets.append(result["snippet"]) for attribute, value in result.get("attributes", {}).items(): snippets.append(f"{attribute}: {value}.") if len(snippets) == 0: return ["No good Google Search Result was found"] result = [] meta = [] for i in range(len(search_results["organic"][:top_k])): r = search_results["organic"][i] single_result = r["title"] + r["snippet"] # If the links are not considered explicitly, add it to the search result # so that it can be considered by the LLM if not require_meta: single_result += "\nLink:" + r["link"] result.append(single_result) meta.append( { "Document": "Description: " + r["title"] + r["snippet"], "Source": r["link"], } ) full_result = "\n".join(result) # answer = " ".join(snippets) if ( "knowledgeGraph" in search_results and "description" in search_results["knowledgeGraph"] and "descriptionLink" in search_results["knowledgeGraph"] ): answer = ( "Description: " + search_results["knowledgeGraph"]["title"] + search_results["knowledgeGraph"]["description"] + "\nLink:" + search_results["knowledgeGraph"]["descriptionLink"] ) full_result = answer + "\n\n" + full_result if require_meta: return full_result, meta else: return full_result def _arun(self, query: str) -> str: raise NotImplementedError("SearchTool does not support async run") class ContextTool(BaseTool): name = "Context Search" description = ( "Access internal technical documentation for AI related projects, including" + "Fixie, LangChain, GPT index, GPTCache, GPT4ALL, autoGPT, db-GPT, AgentGPT, sherpa." # noqa: E501 + "Only use this tool if you need information for these projects specifically." ) memory: VectorStoreRetriever def _run(self, query: str, need_meta=False) -> str: docs = self.memory.get_relevant_documents(query) result = "" metadata = [] for doc in docs: result += ( "Document" + doc.page_content + "\nLink:" + doc.metadata.get("source", "") + "\n" ) if need_meta: metadata.append( { "Document": doc.page_content, "Source": doc.metadata.get("source", ""), } ) if need_meta: return result, metadata else: return result def _arun(self, query: str) -> str: raise NotImplementedError("ContextTool does not support async run") class UserInputTool(BaseTool): # TODO: Make an action for the user input name = "UserInput" description = ( "Access the user input for the task." "You use this tool if you need more context and would like to ask clarifying questions to solve the task" # noqa: E501 ) def _run(self, query: str) -> str: return input(query) def _arun(self, query: str) -> str: raise NotImplementedError("UserInputTool does not support async run")
[ "langchain.utilities.GoogleSerperAPIWrapper" ]
[((2438, 2451), 'sherpa_ai.config.task_config.AgentConfig', 'AgentConfig', ([], {}), '()\n', (2449, 2451), False, 'from sherpa_ai.config.task_config import AgentConfig\n'), ((894, 986), 'loguru.logger.warning', 'logger.warning', (['"""No SERPER_API_KEY found in environment variables, skipping SearchTool"""'], {}), "(\n 'No SERPER_API_KEY found in environment variables, skipping SearchTool')\n", (908, 986), False, 'from loguru import logger\n'), ((1368, 1406), 'loguru.logger.debug', 'logger.debug', (['f"""Search query: {query}"""'], {}), "(f'Search query: {query}')\n", (1380, 1406), False, 'from loguru import logger\n'), ((1423, 1453), 'urllib.parse.quote_plus', 'urllib.parse.quote_plus', (['query'], {}), '(query)\n', (1446, 1453), False, 'import urllib\n'), ((1652, 1679), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (1674, 1679), False, 'import urllib\n'), ((1805, 1856), 're.findall', 're.findall', (['summary_pattern', 'xml_content', 're.DOTALL'], {}), '(summary_pattern, xml_content, re.DOTALL)\n', (1815, 1856), False, 'import re\n'), ((1922, 1971), 're.findall', 're.findall', (['title_pattern', 'xml_content', 're.DOTALL'], {}), '(title_pattern, xml_content, re.DOTALL)\n', (1932, 1971), False, 'import re\n'), ((2164, 2215), 'loguru.logger.debug', 'logger.debug', (['f"""Arxiv Search Result: {result_list}"""'], {}), "(f'Arxiv Search Result: {result_list}')\n", (2176, 2215), False, 'from loguru import logger\n'), ((4164, 4202), 'loguru.logger.debug', 'logger.debug', (['f"""Search query: {query}"""'], {}), "(f'Search query: {query}')\n", (4176, 4202), False, 'from loguru import logger\n'), ((4227, 4251), 'langchain.utilities.GoogleSerperAPIWrapper', 'GoogleSerperAPIWrapper', ([], {}), '()\n', (4249, 4251), False, 'from langchain.utilities import GoogleSerperAPIWrapper\n'), ((4333, 4388), 'loguru.logger.debug', 'logger.debug', (['f"""Google Search Result: {search_results}"""'], {}), "(f'Google Search Result: {search_results}')\n", (4345, 4388), False, 'from loguru import logger\n')]
from dotenv import load_dotenv from langchain_core.prompts import PromptTemplate load_dotenv() from langchain import hub from langchain.agents import create_react_agent, AgentExecutor from langchain_core.tools import Tool from langchain_openai import ChatOpenAI from tools.tools import get_profile_url def lookup(name: str) -> str: llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") template = """ given the name {name_of_person} I want you to find a link to their Twitter profile page, and extract from it their username In Your Final answer only the person's username""" tools_for_agent_twitter = [ Tool( name="Crawl Google 4 Twitter profile page", func=get_profile_url, description="useful for when you need get the Twitter Page URL", ), ] # agent = initialize_agent( # tools_for_agent_twitter, # llm, # agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, # verbose=True, # ) prompt_template = PromptTemplate( input_variables=["name_of_person"], template=template ) react_prompt = hub.pull("hwchase17/react") agent = create_react_agent( llm=llm, tools=tools_for_agent_twitter, prompt=react_prompt ) agent_executor = AgentExecutor( agent=agent, tools=tools_for_agent_twitter, verbose=True ) result = agent_executor.invoke( input={"input": prompt_template.format_prompt(name_of_person=name)} ) twitter_username = result["output"] return twitter_username
[ "langchain_openai.ChatOpenAI", "langchain.agents.AgentExecutor", "langchain.agents.create_react_agent", "langchain_core.tools.Tool", "langchain_core.prompts.PromptTemplate", "langchain.hub.pull" ]
[((82, 95), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (93, 95), False, 'from dotenv import load_dotenv\n'), ((346, 399), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (356, 399), False, 'from langchain_openai import ChatOpenAI\n'), ((1031, 1100), 'langchain_core.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['name_of_person']", 'template': 'template'}), "(input_variables=['name_of_person'], template=template)\n", (1045, 1100), False, 'from langchain_core.prompts import PromptTemplate\n'), ((1135, 1162), 'langchain.hub.pull', 'hub.pull', (['"""hwchase17/react"""'], {}), "('hwchase17/react')\n", (1143, 1162), False, 'from langchain import hub\n'), ((1175, 1254), 'langchain.agents.create_react_agent', 'create_react_agent', ([], {'llm': 'llm', 'tools': 'tools_for_agent_twitter', 'prompt': 'react_prompt'}), '(llm=llm, tools=tools_for_agent_twitter, prompt=react_prompt)\n', (1193, 1254), False, 'from langchain.agents import create_react_agent, AgentExecutor\n'), ((1290, 1361), 'langchain.agents.AgentExecutor', 'AgentExecutor', ([], {'agent': 'agent', 'tools': 'tools_for_agent_twitter', 'verbose': '(True)'}), '(agent=agent, tools=tools_for_agent_twitter, verbose=True)\n', (1303, 1361), False, 'from langchain.agents import create_react_agent, AgentExecutor\n'), ((648, 787), 'langchain_core.tools.Tool', 'Tool', ([], {'name': '"""Crawl Google 4 Twitter profile page"""', 'func': 'get_profile_url', 'description': '"""useful for when you need get the Twitter Page URL"""'}), "(name='Crawl Google 4 Twitter profile page', func=get_profile_url,\n description='useful for when you need get the Twitter Page URL')\n", (652, 787), False, 'from langchain_core.tools import Tool\n')]
import logging, json, os from Utilities.envVars import * from Utilities.envVars import * # Import required libraries from Utilities.cogSearchVsRetriever import CognitiveSearchVsRetriever from langchain.chains import RetrievalQA from langchain import PromptTemplate from Utilities.evaluator import indexDocs import json import time import pandas as pd from collections import namedtuple from Utilities.evaluator import searchEvaluatorRunIdIndex import uuid import tempfile from Utilities.azureBlob import getBlob, getFullPath from langchain.document_loaders import PDFMinerLoader, UnstructuredFileLoader from Utilities.evaluator import createEvaluatorResultIndex, searchEvaluatorRunIdIndex from langchain.chat_models import AzureChatOpenAI, ChatOpenAI from langchain.evaluation.qa import QAEvalChain from Utilities.evaluator import searchEvaluatorRunIndex, createEvaluatorRunIndex, getEvaluatorResult RunDocs = namedtuple('RunDoc', ['evalatorQaData', 'totalQuestions', 'promptStyle', 'documentId', 'splitMethods', 'chunkSizes', 'overlaps', 'retrieverType', 'reEvaluate', 'topK', 'model', 'fileName', 'embeddingModelType', 'temperature', 'tokenLength']) def getPrompts(): template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. {context} Question: {question} Helpful Answer:""" QaChainPrompt = PromptTemplate(input_variables=["context", "question"],template=template,) template = """You are a teacher grading a quiz. You are given a question, the student's answer, and the true answer, and are asked to score the student answer as either Correct or Incorrect. Example Format: QUESTION: question here STUDENT ANSWER: student's answer here TRUE ANSWER: true answer here GRADE: Correct or Incorrect here Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. If the student answers that there is no specific information provided in the context, then the answer is Incorrect. Begin! QUESTION: {query} STUDENT ANSWER: {result} TRUE ANSWER: {answer} GRADE:""" promptStyleFast = PromptTemplate(input_variables=["query", "result", "answer"], template=template) template = """You are a teacher grading a quiz. You are given a question, the student's answer, and the true answer, and are asked to score the student answer as either Correct or Incorrect. You are also asked to identify potential sources of bias in the question and in the true answer. Example Format: QUESTION: question here STUDENT ANSWER: student's answer here TRUE ANSWER: true answer here GRADE: Correct or Incorrect here Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. If the student answers that there is no specific information provided in the context, then the answer is Incorrect. Begin! QUESTION: {query} STUDENT ANSWER: {result} TRUE ANSWER: {answer} GRADE: Your response should be as follows: GRADE: (Correct or Incorrect) (line break) JUSTIFICATION: (Without mentioning the student/teacher framing of this prompt, explain why the STUDENT ANSWER is Correct or Incorrect, identify potential sources of bias in the QUESTION, and identify potential sources of bias in the TRUE ANSWER. Use one or two sentences maximum. Keep the answer as concise as possible.) """ promptStyleBias = PromptTemplate(input_variables=["query", "result", "answer"], template=template) template = """You are assessing a submitted student answer to a question relative to the true answer based on the provided criteria: *** QUESTION: {query} *** STUDENT ANSWER: {result} *** TRUE ANSWER: {answer} *** Criteria: relevance: Is the submission referring to a real quote from the text?" conciseness: Is the answer concise and to the point?" correct: Is the answer correct?" *** Does the submission meet the criterion? First, write out in a step by step manner your reasoning about the criterion to be sure that your conclusion is correct. Avoid simply stating the correct answers at the outset. Then print "Correct" or "Incorrect" (without quotes or punctuation) on its own line corresponding to the correct answer. Reasoning: """ promptStyleGrading = PromptTemplate(input_variables=["query", "result", "answer"], template=template) template = """You are a teacher grading a quiz. You are given a question, the student's answer, and the true answer, and are asked to score the student answer as either Correct or Incorrect. Example Format: QUESTION: question here STUDENT ANSWER: student's answer here TRUE ANSWER: true answer here GRADE: Correct or Incorrect here Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. If the student answers that there is no specific information provided in the context, then the answer is Incorrect. Begin! QUESTION: {query} STUDENT ANSWER: {result} TRUE ANSWER: {answer} GRADE: Your response should be as follows: GRADE: (Correct or Incorrect) (line break) JUSTIFICATION: (Without mentioning the student/teacher framing of this prompt, explain why the STUDENT ANSWER is Correct or Incorrect. Use one or two sentences maximum. Keep the answer as concise as possible.) """ promptStyleDefault = PromptTemplate(input_variables=["query", "result", "answer"], template=template) template = """ Given the question: \n {query} Here are some documents retrieved in response to the question: \n {result} And here is the answer to the question: \n {answer} Criteria: relevance: Are the retrieved documents relevant to the question and do they support the answer?" Do the retrieved documents meet the criterion? Print "Correct" (without quotes or punctuation) if the retrieved context are relevant or "Incorrect" if not (without quotes or punctuation) on its own line. """ gradeDocsPromptFast = PromptTemplate(input_variables=["query", "result", "answer"], template=template) template = """ Given the question: \n {query} Here are some documents retrieved in response to the question: \n {result} And here is the answer to the question: \n {answer} Criteria: relevance: Are the retrieved documents relevant to the question and do they support the answer?" Your response should be as follows: GRADE: (Correct or Incorrect, depending if the retrieved documents meet the criterion) (line break) JUSTIFICATION: (Write out in a step by step manner your reasoning about the criterion to be sure that your conclusion is correct. Use one or two sentences maximum. Keep the answer as concise as possible.) """ gradeDocsPromptDefault = PromptTemplate(input_variables=["query", "result", "answer"], template=template) return QaChainPrompt, promptStyleFast, promptStyleBias, promptStyleGrading, promptStyleDefault, gradeDocsPromptFast, gradeDocsPromptDefault def gradeModelAnswer(llm, predictedDataSet, predictions, promptStyle, promptStyleFast, promptStyleBias, promptStyleGrading, promptStyleDefault): if promptStyle == "Fast": prompt = promptStyleFast elif promptStyle == "Descriptive w/ bias check": prompt = promptStyleBias elif promptStyle == "OpenAI grading prompt": prompt = promptStyleGrading else: prompt = promptStyleDefault # Note: GPT-4 grader is advised by OAI evalChain = QAEvalChain.from_llm(llm=llm, prompt=prompt) gradedOutputs = evalChain.evaluate(predictedDataSet, predictions, question_key="question", prediction_key="result") return gradedOutputs def gradeModelRetrieval(llm, getDataSet, predictions, gradeDocsPrompt, gradeDocsPromptFast, gradeDocsPromptDefault): if gradeDocsPrompt == "Fast": prompt = gradeDocsPromptFast else: prompt = gradeDocsPromptDefault # Note: GPT-4 grader is advised by OAI evalChain = QAEvalChain.from_llm(llm=llm,prompt=prompt) gradedOutputs = evalChain.evaluate(getDataSet, predictions, question_key="question", prediction_key="result") return gradedOutputs def blobLoad(blobConnectionString, blobContainer, blobName): readBytes = getBlob(blobConnectionString, blobContainer, blobName) downloadPath = os.path.join(tempfile.gettempdir(), blobName) os.makedirs(os.path.dirname(tempfile.gettempdir()), exist_ok=True) try: with open(downloadPath, "wb") as file: file.write(readBytes) except Exception as e: logging.error(e) logging.info("File created " + downloadPath) if (blobName.endswith(".pdf")): loader = PDFMinerLoader(downloadPath) rawDocs = loader.load() fullPath = getFullPath(blobConnectionString, blobContainer, blobName) for doc in rawDocs: doc.metadata['source'] = fullPath return rawDocs def runEvaluator(llm, evaluatorQaData, totalQuestions, chain, retriever, promptStyle, promptStyleFast, promptStyleBias, promptStyleGrading, promptStyleDefault, gradeDocsPromptFast, gradeDocsPromptDefault) -> list: d = pd.DataFrame(columns=['question', 'answer', 'predictedAnswer', 'answerScore', 'retrievalScore', 'latency']) for i in range(int(totalQuestions)): predictions = [] retrievedDocs = [] gtDataSet = [] latency = [] currentDataSet = evaluatorQaData[i] try: startTime = time.time() predictions.append(chain({"query": currentDataSet["question"]}, return_only_outputs=True)) gtDataSet.append(currentDataSet) endTime = time.time() elapsedTime = endTime - startTime latency.append(elapsedTime) except: predictions.append({'result': 'Error in prediction'}) print("Error in prediction") # Extract text from retrieved docs retrievedDocText = "" docs = retriever.get_relevant_documents(currentDataSet["question"]) for i, doc in enumerate(docs): retrievedDocText += "Doc %s: " % str(i+1) + \ doc.page_content + " " # Log retrieved = {"question": currentDataSet["question"], "answer": currentDataSet["answer"], "result": retrievedDocText} retrievedDocs.append(retrieved) # Grade gradedAnswer = gradeModelAnswer(llm, gtDataSet, predictions, promptStyle, promptStyleFast, promptStyleBias, promptStyleGrading, promptStyleDefault) gradedRetrieval = gradeModelRetrieval(llm, gtDataSet, retrievedDocs, promptStyle, gradeDocsPromptFast, gradeDocsPromptDefault) # Assemble output # Summary statistics dfOutput = {'question': currentDataSet['question'], 'answer': currentDataSet['answer'], 'predictedAnswer': predictions[0]['result'], 'answerScore': [{'score': 1 if "Incorrect" not in text else 0, 'justification': text} for text in [g['text'] for g in gradedAnswer]], 'retrievalScore': [{'score': 1 if "Incorrect" not in text else 0, 'justification': text} for text in [g['text'] for g in gradedRetrieval]], 'latency': latency} #yield dfOutput # Add to dataframe d = pd.concat([d, pd.DataFrame(dfOutput)], axis=0) d_dict = d.to_dict('records') return d_dict def main(runDocs: RunDocs) -> str: evaluatorQaData,totalQuestions,promptStyle,documentId,splitMethods,chunkSizes,overlaps,retrieverType,reEvaluate,topK,model,fileName, embeddingModelType, temperature, tokenLength = runDocs evaluatorDataIndexName = "evaluatordata" evaluatorRunIndexName = "evaluatorrun" evaluatorRunResultIndexName = "evaluatorrunresult" qaChainPrompt, promptStyleFast, promptStyleBias, promptStyleGrading, promptStyleDefault, gradeDocsPromptFast, gradeDocsPromptDefault = getPrompts() logging.info("Python HTTP trigger function processed a request.") if (embeddingModelType == 'azureopenai'): llm = AzureChatOpenAI( azure_endpoint=OpenAiEndPoint, api_version=OpenAiVersion, azure_deployment=OpenAiChat, temperature=temperature, api_key=OpenAiKey, max_tokens=tokenLength) logging.info("LLM Setup done") elif embeddingModelType == "openai": llm = ChatOpenAI(temperature=temperature, api_key=OpenAiApiKey, model_name="gpt-3.5-turbo", max_tokens=tokenLength) # Select retriever createEvaluatorResultIndex(SearchService, SearchKey, evaluatorRunResultIndexName) # Check if we already have runId for this document r = searchEvaluatorRunIdIndex(SearchService, SearchKey, evaluatorRunResultIndexName, documentId) if r.get_count() == 0: runId = str(uuid.uuid4()) else: for run in r: runId = run['runId'] break for splitMethod in splitMethods: for chunkSize in chunkSizes: for overlap in overlaps: # Verify if we have created the Run ID r = searchEvaluatorRunIndex(SearchService, SearchKey, evaluatorRunResultIndexName, documentId, retrieverType, promptStyle, splitMethod, chunkSize, overlap) if r.get_count() == 0 or reEvaluate: # Create the Run ID print("Processing: ", documentId, retrieverType, promptStyle, splitMethod, chunkSize, overlap) runIdData = [] subRunId = str(uuid.uuid4()) retriever = CognitiveSearchVsRetriever(contentKey="contentVector", serviceName=SearchService, apiKey=SearchKey, indexName=evaluatorDataIndexName, topK=topK, splitMethod = splitMethod, model = model, chunkSize = chunkSize, overlap = overlap, openAiEndPoint = OpenAiEndPoint, openAiKey = OpenAiKey, openAiVersion = OpenAiVersion, openAiApiKey = OpenAiApiKey, documentId = documentId, openAiEmbedding=OpenAiEmbedding, returnFields=["id", "content", "sourceFile", "splitMethod", "chunkSize", "overlap", "model", "modelType", "documentId"] ) vectorStoreChain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, chain_type_kwargs={"prompt": qaChainPrompt}) runEvaluations = runEvaluator(llm, evaluatorQaData, totalQuestions, vectorStoreChain, retriever, promptStyle, promptStyleFast, promptStyleBias, promptStyleGrading, promptStyleDefault, gradeDocsPromptFast, gradeDocsPromptDefault) #yield runEvaluations runEvaluationData = [] for runEvaluation in runEvaluations: runEvaluationData.append({ "id": str(uuid.uuid4()), "runId": runId, "subRunId": subRunId, "documentId": documentId, "retrieverType": retrieverType, "promptStyle": promptStyle, "splitMethod": splitMethod, "chunkSize": chunkSize, "overlap": overlap, "question": runEvaluation['question'], "answer": runEvaluation['answer'], "predictedAnswer": runEvaluation['predictedAnswer'], "answerScore": json.dumps(runEvaluation['answerScore']), "retrievalScore": json.dumps(runEvaluation['retrievalScore']), "latency": str(runEvaluation['latency']), }) indexDocs(SearchService, SearchKey, evaluatorRunResultIndexName, runEvaluationData) return "Success"
[ "langchain.evaluation.qa.QAEvalChain.from_llm", "langchain.chains.RetrievalQA.from_chain_type", "langchain.chat_models.ChatOpenAI", "langchain.document_loaders.PDFMinerLoader", "langchain.chat_models.AzureChatOpenAI", "langchain.PromptTemplate" ]
[((911, 1164), 'collections.namedtuple', 'namedtuple', (['"""RunDoc"""', "['evalatorQaData', 'totalQuestions', 'promptStyle', 'documentId',\n 'splitMethods', 'chunkSizes', 'overlaps', 'retrieverType', 'reEvaluate',\n 'topK', 'model', 'fileName', 'embeddingModelType', 'temperature',\n 'tokenLength']"], {}), "('RunDoc', ['evalatorQaData', 'totalQuestions', 'promptStyle',\n 'documentId', 'splitMethods', 'chunkSizes', 'overlaps', 'retrieverType',\n 'reEvaluate', 'topK', 'model', 'fileName', 'embeddingModelType',\n 'temperature', 'tokenLength'])\n", (921, 1164), False, 'from collections import namedtuple\n'), ((1626, 1700), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'question']", 'template': 'template'}), "(input_variables=['context', 'question'], template=template)\n", (1640, 1700), False, 'from langchain import PromptTemplate\n'), ((2601, 2686), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query', 'result', 'answer']", 'template': 'template'}), "(input_variables=['query', 'result', 'answer'], template=template\n )\n", (2615, 2686), False, 'from langchain import PromptTemplate\n'), ((4106, 4191), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query', 'result', 'answer']", 'template': 'template'}), "(input_variables=['query', 'result', 'answer'], template=template\n )\n", (4120, 4191), False, 'from langchain import PromptTemplate\n'), ((5082, 5167), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query', 'result', 'answer']", 'template': 'template'}), "(input_variables=['query', 'result', 'answer'], template=template\n )\n", (5096, 5167), False, 'from langchain import PromptTemplate\n'), ((6378, 6463), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query', 'result', 'answer']", 'template': 'template'}), "(input_variables=['query', 'result', 'answer'], template=template\n )\n", (6392, 6463), False, 'from langchain import PromptTemplate\n'), ((7054, 7139), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query', 'result', 'answer']", 'template': 'template'}), "(input_variables=['query', 'result', 'answer'], template=template\n )\n", (7068, 7139), False, 'from langchain import PromptTemplate\n'), ((7904, 7989), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query', 'result', 'answer']", 'template': 'template'}), "(input_variables=['query', 'result', 'answer'], template=template\n )\n", (7918, 7989), False, 'from langchain import PromptTemplate\n'), ((8618, 8662), 'langchain.evaluation.qa.QAEvalChain.from_llm', 'QAEvalChain.from_llm', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (8638, 8662), False, 'from langchain.evaluation.qa import QAEvalChain\n'), ((9269, 9313), 'langchain.evaluation.qa.QAEvalChain.from_llm', 'QAEvalChain.from_llm', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (9289, 9313), False, 'from langchain.evaluation.qa import QAEvalChain\n'), ((9654, 9708), 'Utilities.azureBlob.getBlob', 'getBlob', (['blobConnectionString', 'blobContainer', 'blobName'], {}), '(blobConnectionString, blobContainer, blobName)\n', (9661, 9708), False, 'from Utilities.azureBlob import getBlob, getFullPath\n'), ((9992, 10036), 'logging.info', 'logging.info', (["('File created ' + downloadPath)"], {}), "('File created ' + downloadPath)\n", (10004, 10036), False, 'import logging, json, os\n'), ((10164, 10222), 'Utilities.azureBlob.getFullPath', 'getFullPath', (['blobConnectionString', 'blobContainer', 'blobName'], {}), '(blobConnectionString, blobContainer, blobName)\n', (10175, 10222), False, 'from Utilities.azureBlob import getBlob, getFullPath\n'), ((10566, 10677), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['question', 'answer', 'predictedAnswer', 'answerScore', 'retrievalScore',\n 'latency']"}), "(columns=['question', 'answer', 'predictedAnswer',\n 'answerScore', 'retrievalScore', 'latency'])\n", (10578, 10677), True, 'import pandas as pd\n'), ((13425, 13490), 'logging.info', 'logging.info', (['"""Python HTTP trigger function processed a request."""'], {}), "('Python HTTP trigger function processed a request.')\n", (13437, 13490), False, 'import logging, json, os\n'), ((14167, 14252), 'Utilities.evaluator.createEvaluatorResultIndex', 'createEvaluatorResultIndex', (['SearchService', 'SearchKey', 'evaluatorRunResultIndexName'], {}), '(SearchService, SearchKey,\n evaluatorRunResultIndexName)\n', (14193, 14252), False, 'from Utilities.evaluator import createEvaluatorResultIndex, searchEvaluatorRunIdIndex\n'), ((14312, 14408), 'Utilities.evaluator.searchEvaluatorRunIdIndex', 'searchEvaluatorRunIdIndex', (['SearchService', 'SearchKey', 'evaluatorRunResultIndexName', 'documentId'], {}), '(SearchService, SearchKey,\n evaluatorRunResultIndexName, documentId)\n', (14337, 14408), False, 'from Utilities.evaluator import createEvaluatorResultIndex, searchEvaluatorRunIdIndex\n'), ((9741, 9762), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (9760, 9762), False, 'import tempfile\n'), ((10090, 10118), 'langchain.document_loaders.PDFMinerLoader', 'PDFMinerLoader', (['downloadPath'], {}), '(downloadPath)\n', (10104, 10118), False, 'from langchain.document_loaders import PDFMinerLoader, UnstructuredFileLoader\n'), ((13563, 13741), 'langchain.chat_models.AzureChatOpenAI', 'AzureChatOpenAI', ([], {'azure_endpoint': 'OpenAiEndPoint', 'api_version': 'OpenAiVersion', 'azure_deployment': 'OpenAiChat', 'temperature': 'temperature', 'api_key': 'OpenAiKey', 'max_tokens': 'tokenLength'}), '(azure_endpoint=OpenAiEndPoint, api_version=OpenAiVersion,\n azure_deployment=OpenAiChat, temperature=temperature, api_key=OpenAiKey,\n max_tokens=tokenLength)\n', (13578, 13741), False, 'from langchain.chat_models import AzureChatOpenAI, ChatOpenAI\n'), ((13891, 13921), 'logging.info', 'logging.info', (['"""LLM Setup done"""'], {}), "('LLM Setup done')\n", (13903, 13921), False, 'import logging, json, os\n'), ((9806, 9827), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (9825, 9827), False, 'import tempfile\n'), ((9970, 9986), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (9983, 9986), False, 'import logging, json, os\n'), ((10897, 10908), 'time.time', 'time.time', ([], {}), '()\n', (10906, 10908), False, 'import time\n'), ((11079, 11090), 'time.time', 'time.time', ([], {}), '()\n', (11088, 11090), False, 'import time\n'), ((13981, 14095), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'temperature', 'api_key': 'OpenAiApiKey', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': 'tokenLength'}), "(temperature=temperature, api_key=OpenAiApiKey, model_name=\n 'gpt-3.5-turbo', max_tokens=tokenLength)\n", (13991, 14095), False, 'from langchain.chat_models import AzureChatOpenAI, ChatOpenAI\n'), ((14452, 14464), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14462, 14464), False, 'import uuid\n'), ((12809, 12831), 'pandas.DataFrame', 'pd.DataFrame', (['dfOutput'], {}), '(dfOutput)\n', (12821, 12831), True, 'import pandas as pd\n'), ((14735, 14894), 'Utilities.evaluator.searchEvaluatorRunIndex', 'searchEvaluatorRunIndex', (['SearchService', 'SearchKey', 'evaluatorRunResultIndexName', 'documentId', 'retrieverType', 'promptStyle', 'splitMethod', 'chunkSize', 'overlap'], {}), '(SearchService, SearchKey,\n evaluatorRunResultIndexName, documentId, retrieverType, promptStyle,\n splitMethod, chunkSize, overlap)\n', (14758, 14894), False, 'from Utilities.evaluator import searchEvaluatorRunIndex, createEvaluatorRunIndex, getEvaluatorResult\n'), ((15269, 15805), 'Utilities.cogSearchVsRetriever.CognitiveSearchVsRetriever', 'CognitiveSearchVsRetriever', ([], {'contentKey': '"""contentVector"""', 'serviceName': 'SearchService', 'apiKey': 'SearchKey', 'indexName': 'evaluatorDataIndexName', 'topK': 'topK', 'splitMethod': 'splitMethod', 'model': 'model', 'chunkSize': 'chunkSize', 'overlap': 'overlap', 'openAiEndPoint': 'OpenAiEndPoint', 'openAiKey': 'OpenAiKey', 'openAiVersion': 'OpenAiVersion', 'openAiApiKey': 'OpenAiApiKey', 'documentId': 'documentId', 'openAiEmbedding': 'OpenAiEmbedding', 'returnFields': "['id', 'content', 'sourceFile', 'splitMethod', 'chunkSize', 'overlap',\n 'model', 'modelType', 'documentId']"}), "(contentKey='contentVector', serviceName=\n SearchService, apiKey=SearchKey, indexName=evaluatorDataIndexName, topK\n =topK, splitMethod=splitMethod, model=model, chunkSize=chunkSize,\n overlap=overlap, openAiEndPoint=OpenAiEndPoint, openAiKey=OpenAiKey,\n openAiVersion=OpenAiVersion, openAiApiKey=OpenAiApiKey, documentId=\n documentId, openAiEmbedding=OpenAiEmbedding, returnFields=['id',\n 'content', 'sourceFile', 'splitMethod', 'chunkSize', 'overlap', 'model',\n 'modelType', 'documentId'])\n", (15295, 15805), False, 'from Utilities.cogSearchVsRetriever import CognitiveSearchVsRetriever\n'), ((16345, 16472), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'chain_type': '"""stuff"""', 'retriever': 'retriever', 'chain_type_kwargs': "{'prompt': qaChainPrompt}"}), "(llm=llm, chain_type='stuff', retriever=\n retriever, chain_type_kwargs={'prompt': qaChainPrompt})\n", (16372, 16472), False, 'from langchain.chains import RetrievalQA\n'), ((18132, 18219), 'Utilities.evaluator.indexDocs', 'indexDocs', (['SearchService', 'SearchKey', 'evaluatorRunResultIndexName', 'runEvaluationData'], {}), '(SearchService, SearchKey, evaluatorRunResultIndexName,\n runEvaluationData)\n', (18141, 18219), False, 'from Utilities.evaluator import indexDocs\n'), ((15206, 15218), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (15216, 15218), False, 'import uuid\n'), ((17870, 17910), 'json.dumps', 'json.dumps', (["runEvaluation['answerScore']"], {}), "(runEvaluation['answerScore'])\n", (17880, 17910), False, 'import json\n'), ((17962, 18005), 'json.dumps', 'json.dumps', (["runEvaluation['retrievalScore']"], {}), "(runEvaluation['retrievalScore'])\n", (17972, 18005), False, 'import json\n'), ((17133, 17145), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (17143, 17145), False, 'import uuid\n')]
from langchain.agents import load_tools from langchain.agents import initialize_agent from langchain.chat_models import ChatOpenAI from virl.config import cfg from virl.utils.common_utils import print_prompt, print_answer, parse_answer_to_json from .gpt_chat import GPTChat from .azure_gpt import AzureGPTChat __all__ = { 'GPT': GPTChat, 'AzureGPT': AzureGPTChat, } def build_chatbot(name): return __all__[name](cfg) class UnifiedChat(object): chatbots = None def __init__(self): UnifiedChat.chatbots = { name: build_chatbot(name) for name in cfg.LLM.NAMES } @classmethod def ask(cls, question, **kwargs): print_prompt(question) chatbot = kwargs.get('chatbot', cfg.LLM.DEFAULT) answer = cls.chatbots[chatbot].ask(question, **kwargs) print_answer(answer) return answer @classmethod def search(cls, question, json=False): llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=0) tools = load_tools(["serpapi"], llm=llm) agent = initialize_agent(tools, llm, verbose=True) answer = agent.run(question) if json: answer = parse_answer_to_json(answer) return answer
[ "langchain.agents.initialize_agent", "langchain.agents.load_tools", "langchain.chat_models.ChatOpenAI" ]
[((679, 701), 'virl.utils.common_utils.print_prompt', 'print_prompt', (['question'], {}), '(question)\n', (691, 701), False, 'from virl.utils.common_utils import print_prompt, print_answer, parse_answer_to_json\n'), ((830, 850), 'virl.utils.common_utils.print_answer', 'print_answer', (['answer'], {}), '(answer)\n', (842, 850), False, 'from virl.utils.common_utils import print_prompt, print_answer, parse_answer_to_json\n'), ((949, 997), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model='gpt-3.5-turbo', temperature=0)\n", (959, 997), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1014, 1046), 'langchain.agents.load_tools', 'load_tools', (["['serpapi']"], {'llm': 'llm'}), "(['serpapi'], llm=llm)\n", (1024, 1046), False, 'from langchain.agents import load_tools\n'), ((1063, 1105), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'verbose': '(True)'}), '(tools, llm, verbose=True)\n', (1079, 1105), False, 'from langchain.agents import initialize_agent\n'), ((1182, 1210), 'virl.utils.common_utils.parse_answer_to_json', 'parse_answer_to_json', (['answer'], {}), '(answer)\n', (1202, 1210), False, 'from virl.utils.common_utils import print_prompt, print_answer, parse_answer_to_json\n')]
from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from tqdm import tqdm from lmchain.tools import tool_register class GLMToolChain: def __init__(self, llm): self.llm = llm self.tool_register = tool_register self.tools = tool_register.get_tools() def __call__(self, query="", tools=None): if query == "": raise "query需要填入查询问题" if tools != None: self.tools = tools else: raise "将使用默认tools完成函数工具调用~" template = f""" 你现在是一个专业的人工智能助手,你现在的需求是{query}。而你需要借助于工具在{self.tools}中找到对应的函数,用json格式返回对应的函数名和参数。 函数名定义为function_name,参数名为params,还要求写入详细的形参与实参。 如果找到合适的函数,就返回json格式的函数名和需要的参数,不要回答任何描述和解释。 如果没有找到合适的函数,则返回:'未找到合适参数,请提供更详细的描述。' """ flag = True counter = 0 while flag: try: res = self.llm(template) import json res_dict = json.loads(res) res_dict = json.loads(res_dict) flag = False except: # print("失败输出,现在开始重新验证") template = f""" 你现在是一个专业的人工智能助手,你现在的需求是{query}。而你需要借助于工具在{self.tools}中找到对应的函数,用json格式返回对应的函数名和参数。 函数名定义为function_name,参数名为params,还要求写入详细的形参与实参。 如果找到合适的函数,就返回json格式的函数名和需要的参数,不要回答任何描述和解释。 如果没有找到合适的函数,则返回:'未找到合适参数,请提供更详细的描述。' 你刚才生成了一组结果,但是返回不符合json格式,现在请你重新按json格式生成并返回结果。 """ counter += 1 if counter >= 5: return '未找到合适参数,请提供更详细的描述。' return res_dict def run(self, query, tools=None): tools = (self.tool_register.get_tools()) result = self.__call__(query, tools) if result == "未找到合适参数,请提供更详细的描述。": return "未找到合适参数,请提供更详细的描述。" else: print("找到对应工具函数,格式如下:", result) result = self.dispatch_tool(result) from lmchain.prompts.templates import PromptTemplate tool_prompt = PromptTemplate( input_variables=["query", "result"], # 输入变量包括中文和英文。 template="你现在是一个私人助手,现在你的查询任务是{query},而你通过工具从网上查询的结果是{result},现在根据查询的内容与查询的结果,生成最终答案。", # 使用模板格式化输入和输出。 ) from langchain.chains import LLMChain chain = LLMChain(llm=self.llm, prompt=tool_prompt) response = (chain.run({"query": query, "result": result})) return response def add_tools(self, tool): self.tool_register.register_tool(tool) return True def dispatch_tool(self, tool_result) -> str: tool_name = tool_result["function_name"] tool_params = tool_result["params"] if tool_name not in self.tool_register._TOOL_HOOKS: return f"Tool `{tool_name}` not found. Please use a provided tool." tool_call = self.tool_register._TOOL_HOOKS[tool_name] try: ret = tool_call(**tool_params) except: import traceback ret = traceback.format_exc() return str(ret) def get_tools(self): return (self.tool_register.get_tools()) if __name__ == '__main__': from lmchain.agents import llmMultiAgent llm = llmMultiAgent.AgentZhipuAI() from lmchain.chains import toolchain tool_chain = toolchain.GLMToolChain(llm) from typing import Annotated def rando_numbr( seed: Annotated[int, 'The random seed used by the generator', True], range: Annotated[tuple[int, int], 'The range of the generated numbers', True], ) -> int: """ Generates a random number x, s.t. range[0] <= x < range[1] """ import random return random.Random(seed).randint(*range) tool_chain.add_tools(rando_numbr) print("------------------------------------------------------") query = "今天shanghai的天气是什么?" result = tool_chain.run(query) result = tool_chain.dispatch_tool(result) print(result)
[ "langchain.chains.LLMChain" ]
[((3292, 3320), 'lmchain.agents.llmMultiAgent.AgentZhipuAI', 'llmMultiAgent.AgentZhipuAI', ([], {}), '()\n', (3318, 3320), False, 'from lmchain.agents import llmMultiAgent\n'), ((3381, 3408), 'lmchain.chains.toolchain.GLMToolChain', 'toolchain.GLMToolChain', (['llm'], {}), '(llm)\n', (3403, 3408), False, 'from lmchain.chains import toolchain\n'), ((285, 310), 'lmchain.tools.tool_register.get_tools', 'tool_register.get_tools', ([], {}), '()\n', (308, 310), False, 'from lmchain.tools import tool_register\n'), ((2073, 2222), 'lmchain.prompts.templates.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query', 'result']", 'template': '"""你现在是一个私人助手,现在你的查询任务是{query},而你通过工具从网上查询的结果是{result},现在根据查询的内容与查询的结果,生成最终答案。"""'}), "(input_variables=['query', 'result'], template=\n '你现在是一个私人助手,现在你的查询任务是{query},而你通过工具从网上查询的结果是{result},现在根据查询的内容与查询的结果,生成最终答案。'\n )\n", (2087, 2222), False, 'from lmchain.prompts.templates import PromptTemplate\n'), ((2378, 2420), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'tool_prompt'}), '(llm=self.llm, prompt=tool_prompt)\n', (2386, 2420), False, 'from langchain.chains import LLMChain\n'), ((981, 996), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (991, 996), False, 'import json\n'), ((1024, 1044), 'json.loads', 'json.loads', (['res_dict'], {}), '(res_dict)\n', (1034, 1044), False, 'import json\n'), ((3086, 3108), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3106, 3108), False, 'import traceback\n'), ((3780, 3799), 'random.Random', 'random.Random', (['seed'], {}), '(seed)\n', (3793, 3799), False, 'import random\n')]
import json import time import hashlib from typing import Dict, Any, List, Tuple import re from os import environ import streamlit as st from langchain.schema import BaseRetriever from langchain.tools import Tool from langchain.pydantic_v1 import BaseModel, Field from sqlalchemy import Column, Text, create_engine, MetaData from langchain.agents import AgentExecutor try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from clickhouse_sqlalchemy import ( types, engines ) from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain from langchain_experimental.retrievers.vector_sql_database import VectorSQLDatabaseChainRetriever from langchain.utilities.sql_database import SQLDatabase from langchain.chains import LLMChain from sqlalchemy import create_engine, MetaData from langchain.prompts import PromptTemplate, ChatPromptTemplate, \ SystemMessagePromptTemplate, HumanMessagePromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.chat_models import ChatOpenAI from langchain.schema import BaseRetriever, Document from langchain import OpenAI from langchain.chains.query_constructor.base import AttributeInfo, VirtualColumnName from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain.retrievers.self_query.myscale import MyScaleTranslator from langchain.embeddings import HuggingFaceInstructEmbeddings, SentenceTransformerEmbeddings from langchain.vectorstores import MyScaleSettings from chains.arxiv_chains import MyScaleWithoutMetadataJson from langchain.prompts.prompt import PromptTemplate from langchain.prompts.chat import MessagesPlaceholder from langchain.agents.openai_functions_agent.agent_token_buffer_memory import AgentTokenBufferMemory from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.schema.messages import BaseMessage, HumanMessage, AIMessage, FunctionMessage, \ SystemMessage, ChatMessage, ToolMessage from langchain.memory import SQLChatMessageHistory from langchain.memory.chat_message_histories.sql import \ DefaultMessageConverter from langchain.schema.messages import BaseMessage # from langchain.agents.agent_toolkits import create_retriever_tool from prompts.arxiv_prompt import combine_prompt_template, _myscale_prompt from chains.arxiv_chains import ArXivQAwithSourcesChain, ArXivStuffDocumentChain from chains.arxiv_chains import VectorSQLRetrieveCustomOutputParser from .json_conv import CustomJSONEncoder environ['TOKENIZERS_PARALLELISM'] = 'true' environ['OPENAI_API_BASE'] = st.secrets['OPENAI_API_BASE'] # query_model_name = "gpt-3.5-turbo-instruct" query_model_name = "gpt-3.5-turbo-instruct" chat_model_name = "gpt-3.5-turbo-16k" OPENAI_API_KEY = st.secrets['OPENAI_API_KEY'] OPENAI_API_BASE = st.secrets['OPENAI_API_BASE'] MYSCALE_USER = st.secrets['MYSCALE_USER'] MYSCALE_PASSWORD = st.secrets['MYSCALE_PASSWORD'] MYSCALE_HOST = st.secrets['MYSCALE_HOST'] MYSCALE_PORT = st.secrets['MYSCALE_PORT'] UNSTRUCTURED_API = st.secrets['UNSTRUCTURED_API'] COMBINE_PROMPT = ChatPromptTemplate.from_strings( string_messages=[(SystemMessagePromptTemplate, combine_prompt_template), (HumanMessagePromptTemplate, '{question}')]) DEFAULT_SYSTEM_PROMPT = ( "Do your best to answer the questions. " "Feel free to use any tools available to look up " "relevant information. Please keep all details in query " "when calling search functions." ) def hint_arxiv(): st.info("We provides you metadata columns below for query. Please choose a natural expression to describe filters on those columns.\n\n" "For example: \n\n" "*If you want to search papers with complex filters*:\n\n" "- What is a Bayesian network? Please use articles published later than Feb 2018 and with more than 2 categories and whose title like `computer` and must have `cs.CV` in its category.\n\n" "*If you want to ask questions based on papers in database*:\n\n" "- What is PageRank?\n" "- Did Geoffrey Hinton wrote paper about Capsule Neural Networks?\n" "- Introduce some applications of GANs published around 2019.\n" "- 请根据 2019 年左右的文章介绍一下 GAN 的应用都有哪些\n" "- Veuillez présenter les applications du GAN sur la base des articles autour de 2019 ?\n" "- Is it possible to synthesize room temperature super conductive material?") def hint_sql_arxiv(): st.info("You can retrieve papers with button `Query` or ask questions based on retrieved papers with button `Ask`.", icon='💡') st.markdown('''```sql CREATE TABLE default.ChatArXiv ( `abstract` String, `id` String, `vector` Array(Float32), `metadata` Object('JSON'), `pubdate` DateTime, `title` String, `categories` Array(String), `authors` Array(String), `comment` String, `primary_category` String, VECTOR INDEX vec_idx vector TYPE MSTG('fp16_storage=1', 'metric_type=Cosine', 'disk_mode=3'), CONSTRAINT vec_len CHECK length(vector) = 768) ENGINE = ReplacingMergeTree ORDER BY id ```''') def hint_wiki(): st.info("We provides you metadata columns below for query. Please choose a natural expression to describe filters on those columns.\n\n" "For example: \n\n" "- Which company did Elon Musk found?\n" "- What is Iron Gwazi?\n" "- What is a Ring in mathematics?\n" "- 苹果的发源地是那里?\n") def hint_sql_wiki(): st.info("You can retrieve papers with button `Query` or ask questions based on retrieved papers with button `Ask`.", icon='💡') st.markdown('''```sql CREATE TABLE wiki.Wikipedia ( `id` String, `title` String, `text` String, `url` String, `wiki_id` UInt64, `views` Float32, `paragraph_id` UInt64, `langs` UInt32, `emb` Array(Float32), VECTOR INDEX vec_idx emb TYPE MSTG('fp16_storage=1', 'metric_type=Cosine', 'disk_mode=3'), CONSTRAINT emb_len CHECK length(emb) = 768) ENGINE = ReplacingMergeTree ORDER BY id ```''') sel_map = { 'Wikipedia': { "database": "wiki", "table": "Wikipedia", "hint": hint_wiki, "hint_sql": hint_sql_wiki, "doc_prompt": PromptTemplate( input_variables=["page_content", "url", "title", "ref_id", "views"], template="Title for Doc #{ref_id}: {title}\n\tviews: {views}\n\tcontent: {page_content}\nSOURCE: {url}"), "metadata_cols": [ AttributeInfo( name="title", description="title of the wikipedia page", type="string", ), AttributeInfo( name="text", description="paragraph from this wiki page", type="string", ), AttributeInfo( name="views", description="number of views", type="float" ), ], "must_have_cols": ['id', 'title', 'url', 'text', 'views'], "vector_col": "emb", "text_col": "text", "metadata_col": "metadata", "emb_model": lambda: SentenceTransformerEmbeddings( model_name='sentence-transformers/paraphrase-multilingual-mpnet-base-v2',), "tool_desc": ("search_among_wikipedia", "Searches among Wikipedia and returns related wiki pages"), }, 'ArXiv Papers': { "database": "default", "table": "ChatArXiv", "hint": hint_arxiv, "hint_sql": hint_sql_arxiv, "doc_prompt": PromptTemplate( input_variables=["page_content", "id", "title", "ref_id", "authors", "pubdate", "categories"], template="Title for Doc #{ref_id}: {title}\n\tAbstract: {page_content}\n\tAuthors: {authors}\n\tDate of Publication: {pubdate}\n\tCategories: {categories}\nSOURCE: {id}"), "metadata_cols": [ AttributeInfo( name=VirtualColumnName(name="pubdate"), description="The year the paper is published", type="timestamp", ), AttributeInfo( name="authors", description="List of author names", type="list[string]", ), AttributeInfo( name="title", description="Title of the paper", type="string", ), AttributeInfo( name="categories", description="arxiv categories to this paper", type="list[string]" ), AttributeInfo( name="length(categories)", description="length of arxiv categories to this paper", type="int" ), ], "must_have_cols": ['title', 'id', 'categories', 'abstract', 'authors', 'pubdate'], "vector_col": "vector", "text_col": "abstract", "metadata_col": "metadata", "emb_model": lambda: HuggingFaceInstructEmbeddings( model_name='hkunlp/instructor-xl', embed_instruction="Represent the question for retrieving supporting scientific papers: "), "tool_desc": ("search_among_scientific_papers", "Searches among scientific papers from ArXiv and returns research papers"), } } def build_embedding_model(_sel): """Build embedding model """ with st.spinner("Loading Model..."): embeddings = sel_map[_sel]["emb_model"]() return embeddings def build_chains_retrievers(_sel: str) -> Dict[str, Any]: """build chains and retrievers :param _sel: selected knowledge base :type _sel: str :return: _description_ :rtype: Dict[str, Any] """ metadata_field_info = sel_map[_sel]["metadata_cols"] retriever = build_self_query(_sel) chain = build_qa_chain(_sel, retriever, name="Self Query Retriever") sql_retriever = build_vector_sql(_sel) sql_chain = build_qa_chain(_sel, sql_retriever, name="Vector SQL") return { "metadata_columns": [{'name': m.name.name if type(m.name) is VirtualColumnName else m.name, 'desc': m.description, 'type': m.type} for m in metadata_field_info], "retriever": retriever, "chain": chain, "sql_retriever": sql_retriever, "sql_chain": sql_chain } def build_self_query(_sel: str) -> SelfQueryRetriever: """Build self querying retriever :param _sel: selected knowledge base :type _sel: str :return: retriever used by chains :rtype: SelfQueryRetriever """ with st.spinner(f"Connecting DB for {_sel}..."): myscale_connection = { "host": MYSCALE_HOST, "port": MYSCALE_PORT, "username": MYSCALE_USER, "password": MYSCALE_PASSWORD, } config = MyScaleSettings(**myscale_connection, database=sel_map[_sel]["database"], table=sel_map[_sel]["table"], column_map={ "id": "id", "text": sel_map[_sel]["text_col"], "vector": sel_map[_sel]["vector_col"], "metadata": sel_map[_sel]["metadata_col"] }) doc_search = MyScaleWithoutMetadataJson(st.session_state[f"emb_model_{_sel}"], config, must_have_cols=sel_map[_sel]['must_have_cols']) with st.spinner(f"Building Self Query Retriever for {_sel}..."): metadata_field_info = sel_map[_sel]["metadata_cols"] retriever = SelfQueryRetriever.from_llm( OpenAI(model_name=query_model_name, openai_api_key=OPENAI_API_KEY, temperature=0), doc_search, "Scientific papers indexes with abstracts. All in English.", metadata_field_info, use_original_query=False, structured_query_translator=MyScaleTranslator()) return retriever def build_vector_sql(_sel: str) -> VectorSQLDatabaseChainRetriever: """Build Vector SQL Database Retriever :param _sel: selected knowledge base :type _sel: str :return: retriever used by chains :rtype: VectorSQLDatabaseChainRetriever """ with st.spinner(f'Building Vector SQL Database Retriever for {_sel}...'): engine = create_engine( f'clickhouse://{MYSCALE_USER}:{MYSCALE_PASSWORD}@{MYSCALE_HOST}:{MYSCALE_PORT}/{sel_map[_sel]["database"]}?protocol=https') metadata = MetaData(bind=engine) PROMPT = PromptTemplate( input_variables=["input", "table_info", "top_k"], template=_myscale_prompt, ) output_parser = VectorSQLRetrieveCustomOutputParser.from_embeddings( model=st.session_state[f'emb_model_{_sel}'], must_have_columns=sel_map[_sel]["must_have_cols"]) sql_query_chain = VectorSQLDatabaseChain.from_llm( llm=OpenAI(model_name=query_model_name, openai_api_key=OPENAI_API_KEY, temperature=0), prompt=PROMPT, top_k=10, return_direct=True, db=SQLDatabase(engine, None, metadata, max_string_length=1024), sql_cmd_parser=output_parser, native_format=True ) sql_retriever = VectorSQLDatabaseChainRetriever( sql_db_chain=sql_query_chain, page_content_key=sel_map[_sel]["text_col"]) return sql_retriever def build_qa_chain(_sel: str, retriever: BaseRetriever, name: str = "Self-query") -> ArXivQAwithSourcesChain: """_summary_ :param _sel: selected knowledge base :type _sel: str :param retriever: retriever used by chains :type retriever: BaseRetriever :param name: display name, defaults to "Self-query" :type name: str, optional :return: QA chain interacts with user :rtype: ArXivQAwithSourcesChain """ with st.spinner(f'Building QA Chain with {name} for {_sel}...'): chain = ArXivQAwithSourcesChain( retriever=retriever, combine_documents_chain=ArXivStuffDocumentChain( llm_chain=LLMChain( prompt=COMBINE_PROMPT, llm=ChatOpenAI(model_name=chat_model_name, openai_api_key=OPENAI_API_KEY, temperature=0.6), ), document_prompt=sel_map[_sel]["doc_prompt"], document_variable_name="summaries", ), return_source_documents=True, max_tokens_limit=12000, ) return chain @st.cache_resource def build_all() -> Tuple[Dict[str, Any], Dict[str, Any]]: """build all resources :return: sel_map_obj :rtype: Dict[str, Any] """ sel_map_obj = {} embeddings = {} for k in sel_map: embeddings[k] = build_embedding_model(k) st.session_state[f'emb_model_{k}'] = embeddings[k] sel_map_obj[k] = build_chains_retrievers(k) return sel_map_obj, embeddings def create_message_model(table_name, DynamicBase): # type: ignore """ Create a message model for a given table name. Args: table_name: The name of the table to use. DynamicBase: The base class to use for the model. Returns: The model class. """ # Model decleared inside a function to have a dynamic table name class Message(DynamicBase): __tablename__ = table_name id = Column(types.Float64) session_id = Column(Text) user_id = Column(Text) msg_id = Column(Text, primary_key=True) type = Column(Text) addtionals = Column(Text) message = Column(Text) __table_args__ = ( engines.ReplacingMergeTree( partition_by='session_id', order_by=('id', 'msg_id')), {'comment': 'Store Chat History'} ) return Message def _message_from_dict(message: dict) -> BaseMessage: _type = message["type"] if _type == "human": return HumanMessage(**message["data"]) elif _type == "ai": return AIMessage(**message["data"]) elif _type == "system": return SystemMessage(**message["data"]) elif _type == "chat": return ChatMessage(**message["data"]) elif _type == "function": return FunctionMessage(**message["data"]) elif _type == "tool": return ToolMessage(**message["data"]) elif _type == "AIMessageChunk": message["data"]["type"] = "ai" return AIMessage(**message["data"]) else: raise ValueError(f"Got unexpected message type: {_type}") class DefaultClickhouseMessageConverter(DefaultMessageConverter): """The default message converter for SQLChatMessageHistory.""" def __init__(self, table_name: str): self.model_class = create_message_model(table_name, declarative_base()) def to_sql_model(self, message: BaseMessage, session_id: str) -> Any: tstamp = time.time() msg_id = hashlib.sha256( f"{session_id}_{message}_{tstamp}".encode('utf-8')).hexdigest() user_id, _ = session_id.split("?") return self.model_class( id=tstamp, msg_id=msg_id, user_id=user_id, session_id=session_id, type=message.type, addtionals=json.dumps(message.additional_kwargs), message=json.dumps({ "type": message.type, "additional_kwargs": {"timestamp": tstamp}, "data": message.dict()}) ) def from_sql_model(self, sql_message: Any) -> BaseMessage: msg_dump = json.loads(sql_message.message) msg = _message_from_dict(msg_dump) msg.additional_kwargs = msg_dump["additional_kwargs"] return msg def get_sql_model_class(self) -> Any: return self.model_class def create_agent_executor(name, session_id, llm, tools, system_prompt, **kwargs): name = name.replace(" ", "_") conn_str = f'clickhouse://{MYSCALE_USER}:{MYSCALE_PASSWORD}@{MYSCALE_HOST}:{MYSCALE_PORT}' chat_memory = SQLChatMessageHistory( session_id, connection_string=f'{conn_str}/chat?protocol=https', custom_message_converter=DefaultClickhouseMessageConverter(name)) memory = AgentTokenBufferMemory(llm=llm, chat_memory=chat_memory) _system_message = SystemMessage( content=system_prompt ) prompt = OpenAIFunctionsAgent.create_prompt( system_message=_system_message, extra_prompt_messages=[MessagesPlaceholder(variable_name="history")], ) agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt) return AgentExecutor( agent=agent, tools=tools, memory=memory, verbose=True, return_intermediate_steps=True, **kwargs ) class RetrieverInput(BaseModel): query: str = Field(description="query to look up in retriever") def create_retriever_tool( retriever: BaseRetriever, name: str, description: str ) -> Tool: """Create a tool to do retrieval of documents. Args: retriever: The retriever to use for the retrieval name: The name for the tool. This will be passed to the language model, so should be unique and somewhat descriptive. description: The description for the tool. This will be passed to the language model, so should be descriptive. Returns: Tool class to pass to an agent """ def wrap(func): def wrapped_retrieve(*args, **kwargs): docs: List[Document] = func(*args, **kwargs) return json.dumps([d.dict() for d in docs], cls=CustomJSONEncoder) return wrapped_retrieve return Tool( name=name, description=description, func=wrap(retriever.get_relevant_documents), coroutine=retriever.aget_relevant_documents, args_schema=RetrieverInput, ) @st.cache_resource def build_tools(): """build all resources :return: sel_map_obj :rtype: Dict[str, Any] """ sel_map_obj = {} for k in sel_map: if f'emb_model_{k}' not in st.session_state: st.session_state[f'emb_model_{k}'] = build_embedding_model(k) if "sel_map_obj" not in st.session_state: st.session_state["sel_map_obj"] = {} if k not in st.session_state.sel_map_obj: st.session_state["sel_map_obj"][k] = {} if "langchain_retriever" not in st.session_state.sel_map_obj[k] or "vecsql_retriever" not in st.session_state.sel_map_obj[k]: st.session_state.sel_map_obj[k].update(build_chains_retrievers(k)) sel_map_obj.update({ f"{k} + Self Querying": create_retriever_tool(st.session_state.sel_map_obj[k]["retriever"], *sel_map[k]["tool_desc"],), f"{k} + Vector SQL": create_retriever_tool(st.session_state.sel_map_obj[k]["sql_retriever"], *sel_map[k]["tool_desc"],), }) return sel_map_obj def build_agents(session_id, tool_names, chat_model_name=chat_model_name, temperature=0.6, system_prompt=DEFAULT_SYSTEM_PROMPT): chat_llm = ChatOpenAI(model_name=chat_model_name, temperature=temperature, openai_api_base=OPENAI_API_BASE, openai_api_key=OPENAI_API_KEY, streaming=True, ) tools = st.session_state.tools if "tools_with_users" not in st.session_state else st.session_state.tools_with_users sel_tools = [tools[k] for k in tool_names] agent = create_agent_executor( "chat_memory", session_id, chat_llm, tools=sel_tools, system_prompt=system_prompt ) return agent def display(dataframe, columns_=None, index=None): if len(dataframe) > 0: if index: dataframe.set_index(index) if columns_: st.dataframe(dataframe[columns_]) else: st.dataframe(dataframe) else: st.write("Sorry 😵 we didn't find any articles related to your query.\n\nMaybe the LLM is too naughty that does not follow our instruction... \n\nPlease try again and use verbs that may match the datatype.", unsafe_allow_html=True)
[ "langchain.agents.openai_functions_agent.agent_token_buffer_memory.AgentTokenBufferMemory", "langchain.pydantic_v1.Field", "langchain_experimental.retrievers.vector_sql_database.VectorSQLDatabaseChainRetriever", "langchain.utilities.sql_database.SQLDatabase", "langchain.schema.messages.ToolMessage", "langchain.OpenAI", "langchain.prompts.ChatPromptTemplate.from_strings", "langchain.agents.openai_functions_agent.base.OpenAIFunctionsAgent", "langchain.chains.query_constructor.base.AttributeInfo", "langchain.embeddings.HuggingFaceInstructEmbeddings", "langchain.schema.messages.FunctionMessage", "langchain.chat_models.ChatOpenAI", "langchain.prompts.chat.MessagesPlaceholder", "langchain.schema.messages.ChatMessage", "langchain.prompts.prompt.PromptTemplate", "langchain.agents.AgentExecutor", "langchain.embeddings.SentenceTransformerEmbeddings", "langchain.schema.messages.AIMessage", "langchain.schema.messages.HumanMessage", "langchain.chains.query_constructor.base.VirtualColumnName", "langchain.schema.messages.SystemMessage", "langchain.vectorstores.MyScaleSettings", "langchain.retrievers.self_query.myscale.MyScaleTranslator" ]
[((3163, 3322), 'langchain.prompts.ChatPromptTemplate.from_strings', 'ChatPromptTemplate.from_strings', ([], {'string_messages': "[(SystemMessagePromptTemplate, combine_prompt_template), (\n HumanMessagePromptTemplate, '{question}')]"}), "(string_messages=[(\n SystemMessagePromptTemplate, combine_prompt_template), (\n HumanMessagePromptTemplate, '{question}')])\n", (3194, 3322), False, 'from langchain.prompts import PromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((3590, 4394), 'streamlit.info', 'st.info', (['"""We provides you metadata columns below for query. Please choose a natural expression to describe filters on those columns.\n\nFor example: \n\n*If you want to search papers with complex filters*:\n\n- What is a Bayesian network? Please use articles published later than Feb 2018 and with more than 2 categories and whose title like `computer` and must have `cs.CV` in its category.\n\n*If you want to ask questions based on papers in database*:\n\n- What is PageRank?\n- Did Geoffrey Hinton wrote paper about Capsule Neural Networks?\n- Introduce some applications of GANs published around 2019.\n- 请根据 2019 年左右的文章介绍一下 GAN 的应用都有哪些\n- Veuillez présenter les applications du GAN sur la base des articles autour de 2019 ?\n- Is it possible to synthesize room temperature super conductive material?"""'], {}), '(\n """We provides you metadata columns below for query. Please choose a natural expression to describe filters on those columns.\n\nFor example: \n\n*If you want to search papers with complex filters*:\n\n- What is a Bayesian network? Please use articles published later than Feb 2018 and with more than 2 categories and whose title like `computer` and must have `cs.CV` in its category.\n\n*If you want to ask questions based on papers in database*:\n\n- What is PageRank?\n- Did Geoffrey Hinton wrote paper about Capsule Neural Networks?\n- Introduce some applications of GANs published around 2019.\n- 请根据 2019 年左右的文章介绍一下 GAN 的应用都有哪些\n- Veuillez présenter les applications du GAN sur la base des articles autour de 2019 ?\n- Is it possible to synthesize room temperature super conductive material?"""\n )\n', (3597, 4394), True, 'import streamlit as st\n'), ((4574, 4710), 'streamlit.info', 'st.info', (['"""You can retrieve papers with button `Query` or ask questions based on retrieved papers with button `Ask`."""'], {'icon': '"""💡"""'}), "(\n 'You can retrieve papers with button `Query` or ask questions based on retrieved papers with button `Ask`.'\n , icon='💡')\n", (4581, 4710), True, 'import streamlit as st\n'), ((4705, 5231), 'streamlit.markdown', 'st.markdown', (['"""```sql\nCREATE TABLE default.ChatArXiv (\n `abstract` String, \n `id` String, \n `vector` Array(Float32), \n `metadata` Object(\'JSON\'), \n `pubdate` DateTime,\n `title` String,\n `categories` Array(String),\n `authors` Array(String), \n `comment` String,\n `primary_category` String,\n VECTOR INDEX vec_idx vector TYPE MSTG(\'fp16_storage=1\', \'metric_type=Cosine\', \'disk_mode=3\'), \n CONSTRAINT vec_len CHECK length(vector) = 768) \nENGINE = ReplacingMergeTree ORDER BY id\n```"""'], {}), '(\n """```sql\nCREATE TABLE default.ChatArXiv (\n `abstract` String, \n `id` String, \n `vector` Array(Float32), \n `metadata` Object(\'JSON\'), \n `pubdate` DateTime,\n `title` String,\n `categories` Array(String),\n `authors` Array(String), \n `comment` String,\n `primary_category` String,\n VECTOR INDEX vec_idx vector TYPE MSTG(\'fp16_storage=1\', \'metric_type=Cosine\', \'disk_mode=3\'), \n CONSTRAINT vec_len CHECK length(vector) = 768) \nENGINE = ReplacingMergeTree ORDER BY id\n```"""\n )\n', (4716, 5231), True, 'import streamlit as st\n'), ((5245, 5514), 'streamlit.info', 'st.info', (['"""We provides you metadata columns below for query. Please choose a natural expression to describe filters on those columns.\n\nFor example: \n\n- Which company did Elon Musk found?\n- What is Iron Gwazi?\n- What is a Ring in mathematics?\n- 苹果的发源地是那里?\n"""'], {}), '(\n """We provides you metadata columns below for query. Please choose a natural expression to describe filters on those columns.\n\nFor example: \n\n- Which company did Elon Musk found?\n- What is Iron Gwazi?\n- What is a Ring in mathematics?\n- 苹果的发源地是那里?\n"""\n )\n', (5252, 5514), True, 'import streamlit as st\n'), ((5611, 5747), 'streamlit.info', 'st.info', (['"""You can retrieve papers with button `Query` or ask questions based on retrieved papers with button `Ask`."""'], {'icon': '"""💡"""'}), "(\n 'You can retrieve papers with button `Query` or ask questions based on retrieved papers with button `Ask`.'\n , icon='💡')\n", (5618, 5747), True, 'import streamlit as st\n'), ((5742, 6195), 'streamlit.markdown', 'st.markdown', (['"""```sql\nCREATE TABLE wiki.Wikipedia (\n `id` String, \n `title` String, \n `text` String, \n `url` String, \n `wiki_id` UInt64, \n `views` Float32, \n `paragraph_id` UInt64, \n `langs` UInt32, \n `emb` Array(Float32), \n VECTOR INDEX vec_idx emb TYPE MSTG(\'fp16_storage=1\', \'metric_type=Cosine\', \'disk_mode=3\'), \n CONSTRAINT emb_len CHECK length(emb) = 768) \nENGINE = ReplacingMergeTree ORDER BY id\n```"""'], {}), '(\n """```sql\nCREATE TABLE wiki.Wikipedia (\n `id` String, \n `title` String, \n `text` String, \n `url` String, \n `wiki_id` UInt64, \n `views` Float32, \n `paragraph_id` UInt64, \n `langs` UInt32, \n `emb` Array(Float32), \n VECTOR INDEX vec_idx emb TYPE MSTG(\'fp16_storage=1\', \'metric_type=Cosine\', \'disk_mode=3\'), \n CONSTRAINT emb_len CHECK length(emb) = 768) \nENGINE = ReplacingMergeTree ORDER BY id\n```"""\n )\n', (5753, 6195), True, 'import streamlit as st\n'), ((18531, 18587), 'langchain.agents.openai_functions_agent.agent_token_buffer_memory.AgentTokenBufferMemory', 'AgentTokenBufferMemory', ([], {'llm': 'llm', 'chat_memory': 'chat_memory'}), '(llm=llm, chat_memory=chat_memory)\n', (18553, 18587), False, 'from langchain.agents.openai_functions_agent.agent_token_buffer_memory import AgentTokenBufferMemory\n'), ((18611, 18647), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': 'system_prompt'}), '(content=system_prompt)\n', (18624, 18647), False, 'from langchain.schema.messages import BaseMessage, HumanMessage, AIMessage, FunctionMessage, SystemMessage, ChatMessage, ToolMessage\n'), ((18847, 18904), 'langchain.agents.openai_functions_agent.base.OpenAIFunctionsAgent', 'OpenAIFunctionsAgent', ([], {'llm': 'llm', 'tools': 'tools', 'prompt': 'prompt'}), '(llm=llm, tools=tools, prompt=prompt)\n', (18867, 18904), False, 'from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent\n'), ((18916, 19030), 'langchain.agents.AgentExecutor', 'AgentExecutor', ([], {'agent': 'agent', 'tools': 'tools', 'memory': 'memory', 'verbose': '(True)', 'return_intermediate_steps': '(True)'}), '(agent=agent, tools=tools, memory=memory, verbose=True,\n return_intermediate_steps=True, **kwargs)\n', (18929, 19030), False, 'from langchain.agents import AgentExecutor\n'), ((19133, 19183), 'langchain.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (19138, 19183), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((21372, 21523), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'chat_model_name', 'temperature': 'temperature', 'openai_api_base': 'OPENAI_API_BASE', 'openai_api_key': 'OPENAI_API_KEY', 'streaming': '(True)'}), '(model_name=chat_model_name, temperature=temperature,\n openai_api_base=OPENAI_API_BASE, openai_api_key=OPENAI_API_KEY,\n streaming=True)\n', (21382, 21523), False, 'from langchain.chat_models import ChatOpenAI\n'), ((6361, 6562), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['page_content', 'url', 'title', 'ref_id', 'views']", 'template': '"""Title for Doc #{ref_id}: {title}\n\tviews: {views}\n\tcontent: {page_content}\nSOURCE: {url}"""'}), '(input_variables=[\'page_content\', \'url\', \'title\', \'ref_id\',\n \'views\'], template=\n """Title for Doc #{ref_id}: {title}\n\tviews: {views}\n\tcontent: {page_content}\nSOURCE: {url}"""\n )\n', (6375, 6562), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((7708, 7997), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['page_content', 'id', 'title', 'ref_id', 'authors', 'pubdate', 'categories']", 'template': '"""Title for Doc #{ref_id}: {title}\n\tAbstract: {page_content}\n\tAuthors: {authors}\n\tDate of Publication: {pubdate}\n\tCategories: {categories}\nSOURCE: {id}"""'}), '(input_variables=[\'page_content\', \'id\', \'title\', \'ref_id\',\n \'authors\', \'pubdate\', \'categories\'], template=\n """Title for Doc #{ref_id}: {title}\n\tAbstract: {page_content}\n\tAuthors: {authors}\n\tDate of Publication: {pubdate}\n\tCategories: {categories}\nSOURCE: {id}"""\n )\n', (7722, 7997), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((9574, 9604), 'streamlit.spinner', 'st.spinner', (['"""Loading Model..."""'], {}), "('Loading Model...')\n", (9584, 9604), True, 'import streamlit as st\n'), ((10739, 10781), 'streamlit.spinner', 'st.spinner', (['f"""Connecting DB for {_sel}..."""'], {}), "(f'Connecting DB for {_sel}...')\n", (10749, 10781), True, 'import streamlit as st\n'), ((10989, 11247), 'langchain.vectorstores.MyScaleSettings', 'MyScaleSettings', ([], {'database': "sel_map[_sel]['database']", 'table': "sel_map[_sel]['table']", 'column_map': "{'id': 'id', 'text': sel_map[_sel]['text_col'], 'vector': sel_map[_sel][\n 'vector_col'], 'metadata': sel_map[_sel]['metadata_col']}"}), "(**myscale_connection, database=sel_map[_sel]['database'],\n table=sel_map[_sel]['table'], column_map={'id': 'id', 'text': sel_map[\n _sel]['text_col'], 'vector': sel_map[_sel]['vector_col'], 'metadata':\n sel_map[_sel]['metadata_col']})\n", (11004, 11247), False, 'from langchain.vectorstores import MyScaleSettings\n'), ((11538, 11663), 'chains.arxiv_chains.MyScaleWithoutMetadataJson', 'MyScaleWithoutMetadataJson', (["st.session_state[f'emb_model_{_sel}']", 'config'], {'must_have_cols': "sel_map[_sel]['must_have_cols']"}), "(st.session_state[f'emb_model_{_sel}'], config,\n must_have_cols=sel_map[_sel]['must_have_cols'])\n", (11564, 11663), False, 'from chains.arxiv_chains import MyScaleWithoutMetadataJson\n'), ((11718, 11776), 'streamlit.spinner', 'st.spinner', (['f"""Building Self Query Retriever for {_sel}..."""'], {}), "(f'Building Self Query Retriever for {_sel}...')\n", (11728, 11776), True, 'import streamlit as st\n'), ((12490, 12557), 'streamlit.spinner', 'st.spinner', (['f"""Building Vector SQL Database Retriever for {_sel}..."""'], {}), "(f'Building Vector SQL Database Retriever for {_sel}...')\n", (12500, 12557), True, 'import streamlit as st\n'), ((12576, 12723), 'sqlalchemy.create_engine', 'create_engine', (['f"""clickhouse://{MYSCALE_USER}:{MYSCALE_PASSWORD}@{MYSCALE_HOST}:{MYSCALE_PORT}/{sel_map[_sel][\'database\']}?protocol=https"""'], {}), '(\n f"clickhouse://{MYSCALE_USER}:{MYSCALE_PASSWORD}@{MYSCALE_HOST}:{MYSCALE_PORT}/{sel_map[_sel][\'database\']}?protocol=https"\n )\n', (12589, 12723), False, 'from sqlalchemy import create_engine, MetaData\n'), ((12746, 12767), 'sqlalchemy.MetaData', 'MetaData', ([], {'bind': 'engine'}), '(bind=engine)\n', (12754, 12767), False, 'from sqlalchemy import create_engine, MetaData\n'), ((12785, 12880), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input', 'table_info', 'top_k']", 'template': '_myscale_prompt'}), "(input_variables=['input', 'table_info', 'top_k'], template=\n _myscale_prompt)\n", (12799, 12880), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((12935, 13087), 'chains.arxiv_chains.VectorSQLRetrieveCustomOutputParser.from_embeddings', 'VectorSQLRetrieveCustomOutputParser.from_embeddings', ([], {'model': "st.session_state[f'emb_model_{_sel}']", 'must_have_columns': "sel_map[_sel]['must_have_cols']"}), "(model=st.session_state[\n f'emb_model_{_sel}'], must_have_columns=sel_map[_sel]['must_have_cols'])\n", (12986, 13087), False, 'from chains.arxiv_chains import VectorSQLRetrieveCustomOutputParser\n'), ((13541, 13650), 'langchain_experimental.retrievers.vector_sql_database.VectorSQLDatabaseChainRetriever', 'VectorSQLDatabaseChainRetriever', ([], {'sql_db_chain': 'sql_query_chain', 'page_content_key': "sel_map[_sel]['text_col']"}), "(sql_db_chain=sql_query_chain,\n page_content_key=sel_map[_sel]['text_col'])\n", (13572, 13650), False, 'from langchain_experimental.retrievers.vector_sql_database import VectorSQLDatabaseChainRetriever\n'), ((14139, 14197), 'streamlit.spinner', 'st.spinner', (['f"""Building QA Chain with {name} for {_sel}..."""'], {}), "(f'Building QA Chain with {name} for {_sel}...')\n", (14149, 14197), True, 'import streamlit as st\n'), ((15683, 15704), 'sqlalchemy.Column', 'Column', (['types.Float64'], {}), '(types.Float64)\n', (15689, 15704), False, 'from sqlalchemy import Column, Text, create_engine, MetaData\n'), ((15726, 15738), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (15732, 15738), False, 'from sqlalchemy import Column, Text, create_engine, MetaData\n'), ((15757, 15769), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (15763, 15769), False, 'from sqlalchemy import Column, Text, create_engine, MetaData\n'), ((15787, 15817), 'sqlalchemy.Column', 'Column', (['Text'], {'primary_key': '(True)'}), '(Text, primary_key=True)\n', (15793, 15817), False, 'from sqlalchemy import Column, Text, create_engine, MetaData\n'), ((15833, 15845), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (15839, 15845), False, 'from sqlalchemy import Column, Text, create_engine, MetaData\n'), ((15867, 15879), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (15873, 15879), False, 'from sqlalchemy import Column, Text, create_engine, MetaData\n'), ((15898, 15910), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (15904, 15910), False, 'from sqlalchemy import Column, Text, create_engine, MetaData\n'), ((16265, 16296), 'langchain.schema.messages.HumanMessage', 'HumanMessage', ([], {}), "(**message['data'])\n", (16277, 16296), False, 'from langchain.schema.messages import BaseMessage, HumanMessage, AIMessage, FunctionMessage, SystemMessage, ChatMessage, ToolMessage\n'), ((17209, 17220), 'time.time', 'time.time', ([], {}), '()\n', (17218, 17220), False, 'import time\n'), ((17878, 17909), 'json.loads', 'json.loads', (['sql_message.message'], {}), '(sql_message.message)\n', (17888, 17909), False, 'import json\n'), ((22189, 22429), 'streamlit.write', 'st.write', (['"""Sorry 😵 we didn\'t find any articles related to your query.\n\nMaybe the LLM is too naughty that does not follow our instruction... \n\nPlease try again and use verbs that may match the datatype."""'], {'unsafe_allow_html': '(True)'}), '(\n """Sorry 😵 we didn\'t find any articles related to your query.\n\nMaybe the LLM is too naughty that does not follow our instruction... \n\nPlease try again and use verbs that may match the datatype."""\n , unsafe_allow_html=True)\n', (22197, 22429), True, 'import streamlit as st\n'), ((6644, 6734), 'langchain.chains.query_constructor.base.AttributeInfo', 'AttributeInfo', ([], {'name': '"""title"""', 'description': '"""title of the wikipedia page"""', 'type': '"""string"""'}), "(name='title', description='title of the wikipedia page', type\n ='string')\n", (6657, 6734), False, 'from langchain.chains.query_constructor.base import AttributeInfo, VirtualColumnName\n'), ((6806, 6896), 'langchain.chains.query_constructor.base.AttributeInfo', 'AttributeInfo', ([], {'name': '"""text"""', 'description': '"""paragraph from this wiki page"""', 'type': '"""string"""'}), "(name='text', description='paragraph from this wiki page',\n type='string')\n", (6819, 6896), False, 'from langchain.chains.query_constructor.base import AttributeInfo, VirtualColumnName\n'), ((6969, 7041), 'langchain.chains.query_constructor.base.AttributeInfo', 'AttributeInfo', ([], {'name': '"""views"""', 'description': '"""number of views"""', 'type': '"""float"""'}), "(name='views', description='number of views', type='float')\n", (6982, 7041), False, 'from langchain.chains.query_constructor.base import AttributeInfo, VirtualColumnName\n'), ((7305, 7413), 'langchain.embeddings.SentenceTransformerEmbeddings', 'SentenceTransformerEmbeddings', ([], {'model_name': '"""sentence-transformers/paraphrase-multilingual-mpnet-base-v2"""'}), "(model_name=\n 'sentence-transformers/paraphrase-multilingual-mpnet-base-v2')\n", (7334, 7413), False, 'from langchain.embeddings import HuggingFaceInstructEmbeddings, SentenceTransformerEmbeddings\n'), ((8278, 8369), 'langchain.chains.query_constructor.base.AttributeInfo', 'AttributeInfo', ([], {'name': '"""authors"""', 'description': '"""List of author names"""', 'type': '"""list[string]"""'}), "(name='authors', description='List of author names', type=\n 'list[string]')\n", (8291, 8369), False, 'from langchain.chains.query_constructor.base import AttributeInfo, VirtualColumnName\n'), ((8441, 8517), 'langchain.chains.query_constructor.base.AttributeInfo', 'AttributeInfo', ([], {'name': '"""title"""', 'description': '"""Title of the paper"""', 'type': '"""string"""'}), "(name='title', description='Title of the paper', type='string')\n", (8454, 8517), False, 'from langchain.chains.query_constructor.base import AttributeInfo, VirtualColumnName\n'), ((8594, 8698), 'langchain.chains.query_constructor.base.AttributeInfo', 'AttributeInfo', ([], {'name': '"""categories"""', 'description': '"""arxiv categories to this paper"""', 'type': '"""list[string]"""'}), "(name='categories', description=\n 'arxiv categories to this paper', type='list[string]')\n", (8607, 8698), False, 'from langchain.chains.query_constructor.base import AttributeInfo, VirtualColumnName\n'), ((8769, 8882), 'langchain.chains.query_constructor.base.AttributeInfo', 'AttributeInfo', ([], {'name': '"""length(categories)"""', 'description': '"""length of arxiv categories to this paper"""', 'type': '"""int"""'}), "(name='length(categories)', description=\n 'length of arxiv categories to this paper', type='int')\n", (8782, 8882), False, 'from langchain.chains.query_constructor.base import AttributeInfo, VirtualColumnName\n'), ((9172, 9335), 'langchain.embeddings.HuggingFaceInstructEmbeddings', 'HuggingFaceInstructEmbeddings', ([], {'model_name': '"""hkunlp/instructor-xl"""', 'embed_instruction': '"""Represent the question for retrieving supporting scientific papers: """'}), "(model_name='hkunlp/instructor-xl',\n embed_instruction=\n 'Represent the question for retrieving supporting scientific papers: ')\n", (9201, 9335), False, 'from langchain.embeddings import HuggingFaceInstructEmbeddings, SentenceTransformerEmbeddings\n'), ((11900, 11985), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': 'query_model_name', 'openai_api_key': 'OPENAI_API_KEY', 'temperature': '(0)'}), '(model_name=query_model_name, openai_api_key=OPENAI_API_KEY,\n temperature=0)\n', (11906, 11985), False, 'from langchain import OpenAI\n'), ((15950, 16035), 'clickhouse_sqlalchemy.engines.ReplacingMergeTree', 'engines.ReplacingMergeTree', ([], {'partition_by': '"""session_id"""', 'order_by': "('id', 'msg_id')"}), "(partition_by='session_id', order_by=('id', 'msg_id')\n )\n", (15976, 16035), False, 'from clickhouse_sqlalchemy import types, engines\n'), ((16336, 16364), 'langchain.schema.messages.AIMessage', 'AIMessage', ([], {}), "(**message['data'])\n", (16345, 16364), False, 'from langchain.schema.messages import BaseMessage, HumanMessage, AIMessage, FunctionMessage, SystemMessage, ChatMessage, ToolMessage\n'), ((17097, 17115), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (17113, 17115), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((22087, 22120), 'streamlit.dataframe', 'st.dataframe', (['dataframe[columns_]'], {}), '(dataframe[columns_])\n', (22099, 22120), True, 'import streamlit as st\n'), ((22147, 22170), 'streamlit.dataframe', 'st.dataframe', (['dataframe'], {}), '(dataframe)\n', (22159, 22170), True, 'import streamlit as st\n'), ((12174, 12193), 'langchain.retrievers.self_query.myscale.MyScaleTranslator', 'MyScaleTranslator', ([], {}), '()\n', (12191, 12193), False, 'from langchain.retrievers.self_query.myscale import MyScaleTranslator\n'), ((13171, 13256), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': 'query_model_name', 'openai_api_key': 'OPENAI_API_KEY', 'temperature': '(0)'}), '(model_name=query_model_name, openai_api_key=OPENAI_API_KEY,\n temperature=0)\n', (13177, 13256), False, 'from langchain import OpenAI\n'), ((13373, 13432), 'langchain.utilities.sql_database.SQLDatabase', 'SQLDatabase', (['engine', 'None', 'metadata'], {'max_string_length': '(1024)'}), '(engine, None, metadata, max_string_length=1024)\n', (13384, 13432), False, 'from langchain.utilities.sql_database import SQLDatabase\n'), ((16408, 16440), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {}), "(**message['data'])\n", (16421, 16440), False, 'from langchain.schema.messages import BaseMessage, HumanMessage, AIMessage, FunctionMessage, SystemMessage, ChatMessage, ToolMessage\n'), ((17574, 17611), 'json.dumps', 'json.dumps', (['message.additional_kwargs'], {}), '(message.additional_kwargs)\n', (17584, 17611), False, 'import json\n'), ((18782, 18826), 'langchain.prompts.chat.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""history"""'}), "(variable_name='history')\n", (18801, 18826), False, 'from langchain.prompts.chat import MessagesPlaceholder\n'), ((8119, 8152), 'langchain.chains.query_constructor.base.VirtualColumnName', 'VirtualColumnName', ([], {'name': '"""pubdate"""'}), "(name='pubdate')\n", (8136, 8152), False, 'from langchain.chains.query_constructor.base import AttributeInfo, VirtualColumnName\n'), ((16482, 16512), 'langchain.schema.messages.ChatMessage', 'ChatMessage', ([], {}), "(**message['data'])\n", (16493, 16512), False, 'from langchain.schema.messages import BaseMessage, HumanMessage, AIMessage, FunctionMessage, SystemMessage, ChatMessage, ToolMessage\n'), ((16558, 16592), 'langchain.schema.messages.FunctionMessage', 'FunctionMessage', ([], {}), "(**message['data'])\n", (16573, 16592), False, 'from langchain.schema.messages import BaseMessage, HumanMessage, AIMessage, FunctionMessage, SystemMessage, ChatMessage, ToolMessage\n'), ((16634, 16664), 'langchain.schema.messages.ToolMessage', 'ToolMessage', ([], {}), "(**message['data'])\n", (16645, 16664), False, 'from langchain.schema.messages import BaseMessage, HumanMessage, AIMessage, FunctionMessage, SystemMessage, ChatMessage, ToolMessage\n'), ((14437, 14527), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'chat_model_name', 'openai_api_key': 'OPENAI_API_KEY', 'temperature': '(0.6)'}), '(model_name=chat_model_name, openai_api_key=OPENAI_API_KEY,\n temperature=0.6)\n', (14447, 14527), False, 'from langchain.chat_models import ChatOpenAI\n'), ((16755, 16783), 'langchain.schema.messages.AIMessage', 'AIMessage', ([], {}), "(**message['data'])\n", (16764, 16783), False, 'from langchain.schema.messages import BaseMessage, HumanMessage, AIMessage, FunctionMessage, SystemMessage, ChatMessage, ToolMessage\n')]
"""Wrapper around Replicate API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class Replicate(LLM): """Wrapper around Replicate models. To use, you should have the ``replicate`` python package installed, and the environment variable ``REPLICATE_API_TOKEN`` set with your API token. You can find your token here: https://replicate.com/account The model param is required, but any other model parameters can also be passed in with the format input={model_param: value, ...} Example: .. code-block:: python from langchain.llms import Replicate replicate = Replicate(model="stability-ai/stable-diffusion: \ 27b93a2413e7f36cd83da926f365628\ 0b2931564ff050bf9575f1fdf9bcd7478", input={"image_dimensions": "512x512"}) """ model: str input: Dict[str, Any] = Field(default_factory=dict) model_kwargs: Dict[str, Any] = Field(default_factory=dict) replicate_api_token: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" replicate_api_token = get_from_dict_or_env( values, "REPLICATE_API_TOKEN", "REPLICATE_API_TOKEN" ) values["replicate_api_token"] = replicate_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of model.""" return "replicate" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call to replicate endpoint.""" try: import replicate as replicate_python except ImportError: raise ImportError( "Could not import replicate python package. " "Please install it with `pip install replicate`." ) # get the model and version model_str, version_str = self.model.split(":") model = replicate_python.models.get(model_str) version = model.versions.get(version_str) # sort through the openapi schema to get the name of the first input input_properties = sorted( version.openapi_schema["components"]["schemas"]["Input"][ "properties" ].items(), key=lambda item: item[1].get("x-order", 0), ) first_input_name = input_properties[0][0] inputs = {first_input_name: prompt, **self.input} iterator = replicate_python.run(self.model, input={**inputs}) return "".join([output for output in iterator])
[ "langchain.utils.get_from_dict_or_env" ]
[((317, 344), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (334, 344), False, 'import logging\n'), ((1212, 1239), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1217, 1239), False, 'from pydantic import Extra, Field, root_validator\n'), ((1275, 1302), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1280, 1302), False, 'from pydantic import Extra, Field, root_validator\n'), ((1458, 1482), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (1472, 1482), False, 'from pydantic import Extra, Field, root_validator\n'), ((2300, 2316), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (2314, 2316), False, 'from pydantic import Extra, Field, root_validator\n'), ((2482, 2556), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""REPLICATE_API_TOKEN"""', '"""REPLICATE_API_TOKEN"""'], {}), "(values, 'REPLICATE_API_TOKEN', 'REPLICATE_API_TOKEN')\n", (2502, 2556), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3550, 3588), 'replicate.models.get', 'replicate_python.models.get', (['model_str'], {}), '(model_str)\n', (3577, 3588), True, 'import replicate as replicate_python\n'), ((4068, 4118), 'replicate.run', 'replicate_python.run', (['self.model'], {'input': '{**inputs}'}), '(self.model, input={**inputs})\n', (4088, 4118), True, 'import replicate as replicate_python\n')]
import datetime import difflib import logging import os from functools import wraps from queue import Queue from threading import Thread from typing import Any, Callable, Dict, List import numpy as np import openai import pandas as pd import sqlalchemy from google.api_core.exceptions import GoogleAPIError from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.chains.llm import LLMChain from langchain.tools.base import BaseTool from langchain_community.callbacks import get_openai_callback from langchain_openai import OpenAIEmbeddings from overrides import override from pydantic import BaseModel, Field from sqlalchemy import MetaData from sqlalchemy.exc import SQLAlchemyError from sqlalchemy.sql import func from dataherald.context_store import ContextStore from dataherald.db import DB from dataherald.db_scanner.models.types import TableDescription, TableDescriptionStatus from dataherald.db_scanner.repository.base import TableDescriptionRepository from dataherald.repositories.sql_generations import ( SQLGenerationRepository, ) from dataherald.sql_database.base import SQLDatabase, SQLInjectionError from dataherald.sql_database.models.types import ( DatabaseConnection, ) from dataherald.sql_generator import EngineTimeOutORItemLimitError, SQLGenerator from dataherald.types import Prompt, SQLGeneration from dataherald.utils.agent_prompts import ( AGENT_PREFIX, ERROR_PARSING_MESSAGE, FORMAT_INSTRUCTIONS, PLAN_BASE, PLAN_WITH_FEWSHOT_EXAMPLES, PLAN_WITH_FEWSHOT_EXAMPLES_AND_INSTRUCTIONS, PLAN_WITH_INSTRUCTIONS, SUFFIX_WITH_FEW_SHOT_SAMPLES, SUFFIX_WITHOUT_FEW_SHOT_SAMPLES, ) from dataherald.utils.timeout_utils import run_with_timeout logger = logging.getLogger(__name__) TOP_K = SQLGenerator.get_upper_bound_limit() EMBEDDING_MODEL = "text-embedding-3-large" TOP_TABLES = 20 def catch_exceptions(): # noqa: C901 def decorator(fn: Callable[[str], str]) -> Callable[[str], str]: # noqa: C901 @wraps(fn) def wrapper(*args: Any, **kwargs: Any) -> Any: # noqa: PLR0911 try: return fn(*args, **kwargs) except openai.AuthenticationError as e: # Handle authentication error here return f"OpenAI API authentication error: {e}" except openai.RateLimitError as e: # Handle API error here, e.g. retry or log return f"OpenAI API request exceeded rate limit: {e}" except openai.BadRequestError as e: # Handle connection error here return f"OpenAI API request timed out: {e}" except openai.APIResponseValidationError as e: # Handle rate limit error (we recommend using exponential backoff) return f"OpenAI API response is invalid: {e}" except openai.OpenAIError as e: # Handle timeout error (we recommend using exponential backoff) return f"OpenAI API returned an error: {e}" except GoogleAPIError as e: return f"Google API returned an error: {e}" except SQLAlchemyError as e: return f"Error: {e}" return wrapper return decorator def replace_unprocessable_characters(text: str) -> str: """Replace unprocessable characters with a space.""" text = text.strip() return text.replace(r"\_", "_") # Classes needed for tools class BaseSQLDatabaseTool(BaseModel): """Base tool for interacting with the SQL database and the context information.""" db: SQLDatabase = Field(exclude=True) context: List[dict] | None = Field(exclude=True, default=None) class Config(BaseTool.Config): """Configuration for this pydantic object.""" arbitrary_types_allowed = True extra = "allow" class SystemTime(BaseSQLDatabaseTool, BaseTool): """Tool for finding the current data and time.""" name = "SystemTime" description = """ Input is an empty string, output is the current data and time. Always use this tool before generating a query if there is any time or date in the given question. """ @catch_exceptions() def _run( self, tool_input: str = "", # noqa: ARG002 run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002 ) -> str: """Execute the query, return the results or an error message.""" current_datetime = datetime.datetime.now() return f"Current Date and Time: {str(current_datetime)}" async def _arun( self, tool_input: str = "", run_manager: AsyncCallbackManagerForToolRun | None = None, ) -> str: raise NotImplementedError("GetCurrentTimeTool does not support async") class QuerySQLDataBaseTool(BaseSQLDatabaseTool, BaseTool): """Tool for querying a SQL database.""" name = "SqlDbQuery" description = """ Input: SQL query. Output: Result from the database or an error message if the query is incorrect. If an error occurs, rewrite the query and retry. Use this tool to execute SQL queries. """ @catch_exceptions() def _run( self, query: str, top_k: int = TOP_K, run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002 ) -> str: """Execute the query, return the results or an error message.""" query = replace_unprocessable_characters(query) if "```sql" in query: query = query.replace("```sql", "").replace("```", "") try: return run_with_timeout( self.db.run_sql, args=(query,), kwargs={"top_k": top_k}, timeout_duration=int(os.getenv("SQL_EXECUTION_TIMEOUT", "60")), )[0] except TimeoutError: return "SQL query execution time exceeded, proceed without query execution" async def _arun( self, query: str, run_manager: AsyncCallbackManagerForToolRun | None = None, ) -> str: raise NotImplementedError("QuerySQLDataBaseTool does not support async") class GetUserInstructions(BaseSQLDatabaseTool, BaseTool): """Tool for retrieving the instructions from the user""" name = "GetAdminInstructions" description = """ Input: is an empty string. Output: Database admin instructions before generating the SQL query. The generated SQL query MUST follow the admin instructions even it contradicts with the given question. """ instructions: List[dict] @catch_exceptions() def _run( self, tool_input: str = "", # noqa: ARG002 run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002 ) -> str: response = "Admin: All of the generated SQL queries must follow the below instructions:\n" for instruction in self.instructions: response += f"{instruction['instruction']}\n" return response async def _arun( self, tool_input: str = "", # noqa: ARG002 run_manager: AsyncCallbackManagerForToolRun | None = None, ) -> str: raise NotImplementedError("GetUserInstructions does not support async") class TablesSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool): """Tool which takes in the given question and returns a list of tables with their relevance score to the question""" name = "DbTablesWithRelevanceScores" description = """ Input: Given question. Output: Comma-separated list of tables with their relevance scores, indicating their relevance to the question. Use this tool to identify the relevant tables for the given question. """ db_scan: List[TableDescription] embedding: OpenAIEmbeddings def get_embedding( self, text: str, ) -> List[float]: text = text.replace("\n", " ") return self.embedding.embed_query(text) def get_docs_embedding( self, docs: List[str], ) -> List[List[float]]: return self.embedding.embed_documents(docs) def cosine_similarity(self, a: List[float], b: List[float]) -> float: return round(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)), 4) @catch_exceptions() def _run( self, user_question: str, run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002 ) -> str: """Use the concatenation of table name, columns names, and the description of the table as the table representation""" question_embedding = self.get_embedding(user_question) table_representations = [] for table in self.db_scan: col_rep = "" for column in table.columns: if column.description is not None: col_rep += f"{column.name}: {column.description}, " else: col_rep += f"{column.name}, " if table.description is not None: table_rep = f"Table {table.table_name} contain columns: [{col_rep}], this tables has: {table.description}" else: table_rep = f"Table {table.table_name} contain columns: [{col_rep}]" table_representations.append([table.table_name, table_rep]) df = pd.DataFrame( table_representations, columns=["table_name", "table_representation"] ) df["table_embedding"] = self.get_docs_embedding(df.table_representation) df["similarities"] = df.table_embedding.apply( lambda x: self.cosine_similarity(x, question_embedding) ) df = df.sort_values(by="similarities", ascending=True) df = df.tail(TOP_TABLES) table_relevance = "" for _, row in df.iterrows(): table_relevance += ( f'Table: {row["table_name"]}, relevance score: {row["similarities"]}\n' ) return table_relevance async def _arun( self, user_question: str = "", run_manager: AsyncCallbackManagerForToolRun | None = None, ) -> str: raise NotImplementedError("TablesSQLDatabaseTool does not support async") class ColumnEntityChecker(BaseSQLDatabaseTool, BaseTool): """Tool for checking the existance of an entity inside a column.""" name = "DbColumnEntityChecker" description = """ Input: Column name and its corresponding table, and an entity. Output: cell-values found in the column similar to the given entity. Use this tool to get cell values similar to the given entity in the given column. Example Input: table1 -> column2, entity """ def find_similar_strings( self, input_list: List[tuple], target_string: str, threshold=0.4 ): similar_strings = [] for item in input_list: similarity = difflib.SequenceMatcher( None, str(item[0]).strip().lower(), target_string.lower() ).ratio() if similarity >= threshold: similar_strings.append((str(item[0]).strip(), similarity)) similar_strings.sort(key=lambda x: x[1], reverse=True) return similar_strings[:25] @catch_exceptions() def _run( self, tool_input: str, run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002 ) -> str: try: schema, entity = tool_input.split(",") table_name, column_name = schema.split("->") except ValueError: return "Invalid input format, use following format: table_name -> column_name, entity (entity should be a string without ',')" search_pattern = f"%{entity.strip().lower()}%" meta = MetaData(bind=self.db.engine) table = sqlalchemy.Table(table_name.strip(), meta, autoload=True) try: search_query = sqlalchemy.select( [func.distinct(table.c[column_name.strip()])] ).where(func.lower(table.c[column_name.strip()]).like(search_pattern)) search_results = self.db.engine.execute(search_query).fetchall() search_results = search_results[:25] except SQLAlchemyError: search_results = [] distinct_query = sqlalchemy.select( [func.distinct(table.c[column_name.strip()])] ) results = self.db.engine.execute(distinct_query).fetchall() results = self.find_similar_strings(results, entity) similar_items = "Similar items:\n" already_added = {} for item in results: similar_items += f"{item[0]}\n" already_added[item[0]] = True if len(search_results) > 0: for item in search_results: if item[0] not in already_added: similar_items += f"{item[0]}\n" return similar_items async def _arun( self, tool_input: str, run_manager: AsyncCallbackManagerForToolRun | None = None, ) -> str: raise NotImplementedError("ColumnEntityChecker does not support async") class SchemaSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool): """Tool for getting schema of relevant tables.""" name = "DbRelevantTablesSchema" description = """ Input: Comma-separated list of tables. Output: Schema of the specified tables. Use this tool to discover all columns of the relevant tables and identify potentially relevant columns. Example Input: table1, table2, table3 """ db_scan: List[TableDescription] @catch_exceptions() def _run( self, table_names: str, run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002 ) -> str: """Get the schema for tables in a comma-separated list.""" table_names_list = table_names.split(", ") table_names_list = [ replace_unprocessable_characters(table_name) for table_name in table_names_list ] tables_schema = "" for table in self.db_scan: if table.table_name in table_names_list: tables_schema += table.table_schema + "\n" descriptions = [] if table.description is not None: descriptions.append( f"Table `{table.table_name}`: {table.description}\n" ) for column in table.columns: if column.description is not None: descriptions.append( f"Column `{column.name}`: {column.description}\n" ) if len(descriptions) > 0: tables_schema += f"/*\n{''.join(descriptions)}*/\n" if tables_schema == "": tables_schema += "Tables not found in the database" return tables_schema async def _arun( self, table_name: str, run_manager: AsyncCallbackManagerForToolRun | None = None, ) -> str: raise NotImplementedError("SchemaSQLDatabaseTool does not support async") class InfoRelevantColumns(BaseSQLDatabaseTool, BaseTool): """Tool for getting more information for potentially relevant columns""" name = "DbRelevantColumnsInfo" description = """ Input: Comma-separated list of potentially relevant columns with their corresponding table. Output: Information about the values inside the columns and their descriptions. Use this tool to gather details about potentially relevant columns. then, filter them, and identify the relevant ones. Example Input: table1 -> column1, table1 -> column2, table2 -> column1 """ db_scan: List[TableDescription] @catch_exceptions() def _run( # noqa: C901 self, column_names: str, run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002 ) -> str: """Get the column level information.""" items_list = column_names.split(", ") column_full_info = "" for item in items_list: if " -> " in item: table_name, column_name = item.split(" -> ") table_name = replace_unprocessable_characters(table_name) column_name = replace_unprocessable_characters(column_name) found = False for table in self.db_scan: if table_name == table.table_name: col_info = "" for column in table.columns: if column_name == column.name: found = True col_info += f"Description: {column.description}," if column.low_cardinality: col_info += f" categories = {column.categories}," col_info += " Sample rows: " if found: for row in table.examples: col_info += row[column_name] + ", " col_info = col_info[:-2] column_full_info += f"Table: {table_name}, column: {column_name}, additional info: {col_info}\n" else: return "Malformed input, input should be in the following format Example Input: table1 -> column1, table1 -> column2, table2 -> column1" # noqa: E501 if not found: column_full_info += f"Table: {table_name}, column: {column_name} not found in database\n" return column_full_info async def _arun( self, table_name: str, run_manager: AsyncCallbackManagerForToolRun | None = None, ) -> str: raise NotImplementedError("InfoRelevantColumnsTool does not support async") class GetFewShotExamples(BaseSQLDatabaseTool, BaseTool): """Tool to obtain few-shot examples from the pool of samples""" name = "FewshotExamplesRetriever" description = """ Input: Number of required Question/SQL pairs. Output: List of similar Question/SQL pairs related to the given question. Use this tool to fetch previously asked Question/SQL pairs as examples for improving SQL query generation. For complex questions, request more examples to gain a better understanding of tables and columns and the SQL keywords to use. If the given question is very similar to one of the retrieved examples, it is recommended to use the same SQL query and modify it slightly to fit the given question. Always use this tool first and before any other tool! """ # noqa: E501 few_shot_examples: List[dict] @catch_exceptions() def _run( self, number_of_samples: str, run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002 ) -> str: """Get the schema for tables in a comma-separated list.""" if number_of_samples.strip().isdigit(): number_of_samples = int(number_of_samples.strip()) else: return "Action input for the fewshot_examples_retriever tool should be an integer" returned_output = "" for example in self.few_shot_examples[:number_of_samples]: returned_output += ( f"Question: {example['prompt_text']} -> SQL: {example['sql']}\n" ) if returned_output == "": returned_output = "No previously asked Question/SQL pairs are available" return returned_output async def _arun( self, number_of_samples: str, run_manager: AsyncCallbackManagerForToolRun | None = None, ) -> str: raise NotImplementedError("GetFewShotExamplesTool does not support async") class SQLDatabaseToolkit(BaseToolkit): """Dataherald toolkit""" db: SQLDatabase = Field(exclude=True) context: List[dict] | None = Field(exclude=True, default=None) few_shot_examples: List[dict] | None = Field(exclude=True, default=None) instructions: List[dict] | None = Field(exclude=True, default=None) db_scan: List[TableDescription] = Field(exclude=True) embedding: OpenAIEmbeddings = Field(exclude=True) @property def dialect(self) -> str: """Return string representation of SQL dialect to use.""" return self.db.dialect class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" tools = [] query_sql_db_tool = QuerySQLDataBaseTool(db=self.db, context=self.context) tools.append(query_sql_db_tool) if self.instructions is not None: tools.append( GetUserInstructions( db=self.db, context=self.context, instructions=self.instructions ) ) get_current_datetime = SystemTime(db=self.db, context=self.context) tools.append(get_current_datetime) tables_sql_db_tool = TablesSQLDatabaseTool( db=self.db, context=self.context, db_scan=self.db_scan, embedding=self.embedding, ) tools.append(tables_sql_db_tool) schema_sql_db_tool = SchemaSQLDatabaseTool( db=self.db, context=self.context, db_scan=self.db_scan ) tools.append(schema_sql_db_tool) info_relevant_tool = InfoRelevantColumns( db=self.db, context=self.context, db_scan=self.db_scan ) tools.append(info_relevant_tool) column_sample_tool = ColumnEntityChecker(db=self.db, context=self.context) tools.append(column_sample_tool) if self.few_shot_examples is not None: get_fewshot_examples_tool = GetFewShotExamples( db=self.db, context=self.context, few_shot_examples=self.few_shot_examples, ) tools.append(get_fewshot_examples_tool) return tools class DataheraldSQLAgent(SQLGenerator): """Dataherald SQL agent""" max_number_of_examples: int = 5 # maximum number of question/SQL pairs llm: Any = None def remove_duplicate_examples(self, fewshot_exmaples: List[dict]) -> List[dict]: returned_result = [] seen_list = [] for example in fewshot_exmaples: if example["prompt_text"] not in seen_list: seen_list.append(example["prompt_text"]) returned_result.append(example) return returned_result def create_sql_agent( self, toolkit: SQLDatabaseToolkit, callback_manager: BaseCallbackManager | None = None, prefix: str = AGENT_PREFIX, suffix: str | None = None, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: List[str] | None = None, max_examples: int = 20, number_of_instructions: int = 1, max_iterations: int | None = int(os.getenv("AGENT_MAX_ITERATIONS", "15")), # noqa: B008 max_execution_time: float | None = None, early_stopping_method: str = "generate", verbose: bool = False, agent_executor_kwargs: Dict[str, Any] | None = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct an SQL agent from an LLM and tools.""" tools = toolkit.get_tools() if max_examples > 0 and number_of_instructions > 0: plan = PLAN_WITH_FEWSHOT_EXAMPLES_AND_INSTRUCTIONS suffix = SUFFIX_WITH_FEW_SHOT_SAMPLES elif max_examples > 0: plan = PLAN_WITH_FEWSHOT_EXAMPLES suffix = SUFFIX_WITH_FEW_SHOT_SAMPLES elif number_of_instructions > 0: plan = PLAN_WITH_INSTRUCTIONS suffix = SUFFIX_WITHOUT_FEW_SHOT_SAMPLES else: plan = PLAN_BASE suffix = SUFFIX_WITHOUT_FEW_SHOT_SAMPLES plan = plan.format( dialect=toolkit.dialect, max_examples=max_examples, ) prefix = prefix.format( dialect=toolkit.dialect, max_examples=max_examples, agent_plan=plan ) prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=self.llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), ) @override def generate_response( self, user_prompt: Prompt, database_connection: DatabaseConnection, context: List[dict] = None, ) -> SQLGeneration: context_store = self.system.instance(ContextStore) storage = self.system.instance(DB) response = SQLGeneration( prompt_id=user_prompt.id, llm_config=self.llm_config, created_at=datetime.datetime.now(), ) self.llm = self.model.get_model( database_connection=database_connection, temperature=0, model_name=self.llm_config.llm_name, api_base=self.llm_config.api_base, ) repository = TableDescriptionRepository(storage) db_scan = repository.get_all_tables_by_db( { "db_connection_id": str(database_connection.id), "status": TableDescriptionStatus.SCANNED.value, } ) if not db_scan: raise ValueError("No scanned tables found for database") few_shot_examples, instructions = context_store.retrieve_context_for_question( user_prompt, number_of_samples=self.max_number_of_examples ) if few_shot_examples is not None: new_fewshot_examples = self.remove_duplicate_examples(few_shot_examples) number_of_samples = len(new_fewshot_examples) else: new_fewshot_examples = None number_of_samples = 0 logger.info(f"Generating SQL response to question: {str(user_prompt.dict())}") self.database = SQLDatabase.get_sql_engine(database_connection) toolkit = SQLDatabaseToolkit( db=self.database, context=context, few_shot_examples=new_fewshot_examples, instructions=instructions, db_scan=db_scan, embedding=OpenAIEmbeddings( openai_api_key=database_connection.decrypt_api_key(), model=EMBEDDING_MODEL, ), ) agent_executor = self.create_sql_agent( toolkit=toolkit, verbose=True, max_examples=number_of_samples, number_of_instructions=len(instructions) if instructions is not None else 0, max_execution_time=int(os.environ.get("DH_ENGINE_TIMEOUT", 150)), ) agent_executor.return_intermediate_steps = True agent_executor.handle_parsing_errors = ERROR_PARSING_MESSAGE with get_openai_callback() as cb: try: result = agent_executor.invoke({"input": user_prompt.text}) result = self.check_for_time_out_or_tool_limit(result) except SQLInjectionError as e: raise SQLInjectionError(e) from e except EngineTimeOutORItemLimitError as e: raise EngineTimeOutORItemLimitError(e) from e except Exception as e: return SQLGeneration( prompt_id=user_prompt.id, tokens_used=cb.total_tokens, completed_at=datetime.datetime.now(), sql="", status="INVALID", error=str(e), ) sql_query = "" if "```sql" in result["output"]: sql_query = self.remove_markdown(result["output"]) else: sql_query = self.extract_query_from_intermediate_steps( result["intermediate_steps"] ) logger.info(f"cost: {str(cb.total_cost)} tokens: {str(cb.total_tokens)}") response.sql = replace_unprocessable_characters(sql_query) response.tokens_used = cb.total_tokens response.completed_at = datetime.datetime.now() return self.create_sql_query_status( self.database, response.sql, response, ) @override def stream_response( self, user_prompt: Prompt, database_connection: DatabaseConnection, response: SQLGeneration, queue: Queue, ): context_store = self.system.instance(ContextStore) storage = self.system.instance(DB) sql_generation_repository = SQLGenerationRepository(storage) self.llm = self.model.get_model( database_connection=database_connection, temperature=0, model_name=self.llm_config.llm_name, api_base=self.llm_config.api_base, streaming=True, ) repository = TableDescriptionRepository(storage) db_scan = repository.get_all_tables_by_db( { "db_connection_id": str(database_connection.id), "status": TableDescriptionStatus.SCANNED.value, } ) if not db_scan: raise ValueError("No scanned tables found for database") few_shot_examples, instructions = context_store.retrieve_context_for_question( user_prompt, number_of_samples=self.max_number_of_examples ) if few_shot_examples is not None: new_fewshot_examples = self.remove_duplicate_examples(few_shot_examples) number_of_samples = len(new_fewshot_examples) else: new_fewshot_examples = None number_of_samples = 0 self.database = SQLDatabase.get_sql_engine(database_connection) toolkit = SQLDatabaseToolkit( queuer=queue, db=self.database, context=[{}], few_shot_examples=new_fewshot_examples, instructions=instructions, db_scan=db_scan, embedding=OpenAIEmbeddings( openai_api_key=database_connection.decrypt_api_key(), model=EMBEDDING_MODEL, ), ) agent_executor = self.create_sql_agent( toolkit=toolkit, verbose=True, max_examples=number_of_samples, number_of_instructions=len(instructions) if instructions is not None else 0, max_execution_time=int(os.environ.get("DH_ENGINE_TIMEOUT", 150)), ) agent_executor.return_intermediate_steps = True agent_executor.handle_parsing_errors = ERROR_PARSING_MESSAGE thread = Thread( target=self.stream_agent_steps, args=( user_prompt.text, agent_executor, response, sql_generation_repository, queue, ), ) thread.start()
[ "langchain.agents.mrkl.base.ZeroShotAgent.create_prompt", "langchain.agents.mrkl.base.ZeroShotAgent", "langchain.agents.agent.AgentExecutor.from_agent_and_tools", "langchain.chains.llm.LLMChain", "langchain_community.callbacks.get_openai_callback" ]
[((2000, 2027), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2017, 2027), False, 'import logging\n'), ((2038, 2074), 'dataherald.sql_generator.SQLGenerator.get_upper_bound_limit', 'SQLGenerator.get_upper_bound_limit', ([], {}), '()\n', (2072, 2074), False, 'from dataherald.sql_generator import EngineTimeOutORItemLimitError, SQLGenerator\n'), ((3869, 3888), 'pydantic.Field', 'Field', ([], {'exclude': '(True)'}), '(exclude=True)\n', (3874, 3888), False, 'from pydantic import BaseModel, Field\n'), ((3922, 3955), 'pydantic.Field', 'Field', ([], {'exclude': '(True)', 'default': 'None'}), '(exclude=True, default=None)\n', (3927, 3955), False, 'from pydantic import BaseModel, Field\n'), ((20062, 20081), 'pydantic.Field', 'Field', ([], {'exclude': '(True)'}), '(exclude=True)\n', (20067, 20081), False, 'from pydantic import BaseModel, Field\n'), ((20115, 20148), 'pydantic.Field', 'Field', ([], {'exclude': '(True)', 'default': 'None'}), '(exclude=True, default=None)\n', (20120, 20148), False, 'from pydantic import BaseModel, Field\n'), ((20192, 20225), 'pydantic.Field', 'Field', ([], {'exclude': '(True)', 'default': 'None'}), '(exclude=True, default=None)\n', (20197, 20225), False, 'from pydantic import BaseModel, Field\n'), ((20264, 20297), 'pydantic.Field', 'Field', ([], {'exclude': '(True)', 'default': 'None'}), '(exclude=True, default=None)\n', (20269, 20297), False, 'from pydantic import BaseModel, Field\n'), ((20336, 20355), 'pydantic.Field', 'Field', ([], {'exclude': '(True)'}), '(exclude=True)\n', (20341, 20355), False, 'from pydantic import BaseModel, Field\n'), ((20390, 20409), 'pydantic.Field', 'Field', ([], {'exclude': '(True)'}), '(exclude=True)\n', (20395, 20409), False, 'from pydantic import BaseModel, Field\n'), ((2266, 2275), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (2271, 2275), False, 'from functools import wraps\n'), ((4731, 4754), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4752, 4754), False, 'import datetime\n'), ((9562, 9649), 'pandas.DataFrame', 'pd.DataFrame', (['table_representations'], {'columns': "['table_name', 'table_representation']"}), "(table_representations, columns=['table_name',\n 'table_representation'])\n", (9574, 9649), True, 'import pandas as pd\n'), ((11971, 12000), 'sqlalchemy.MetaData', 'MetaData', ([], {'bind': 'self.db.engine'}), '(bind=self.db.engine)\n', (11979, 12000), False, 'from sqlalchemy import MetaData\n'), ((24413, 24555), 'langchain.agents.mrkl.base.ZeroShotAgent.create_prompt', 'ZeroShotAgent.create_prompt', (['tools'], {'prefix': 'prefix', 'suffix': 'suffix', 'format_instructions': 'format_instructions', 'input_variables': 'input_variables'}), '(tools, prefix=prefix, suffix=suffix,\n format_instructions=format_instructions, input_variables=input_variables)\n', (24440, 24555), False, 'from langchain.agents.mrkl.base import ZeroShotAgent\n'), ((24643, 24715), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'prompt', 'callback_manager': 'callback_manager'}), '(llm=self.llm, prompt=prompt, callback_manager=callback_manager)\n', (24651, 24715), False, 'from langchain.chains.llm import LLMChain\n'), ((24830, 24900), 'langchain.agents.mrkl.base.ZeroShotAgent', 'ZeroShotAgent', ([], {'llm_chain': 'llm_chain', 'allowed_tools': 'tool_names'}), '(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)\n', (24843, 24900), False, 'from langchain.agents.mrkl.base import ZeroShotAgent\n'), ((24916, 25187), 'langchain.agents.agent.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent', 'tools': 'tools', 'callback_manager': 'callback_manager', 'verbose': 'verbose', 'max_iterations': 'max_iterations', 'max_execution_time': 'max_execution_time', 'early_stopping_method': 'early_stopping_method'}), '(agent=agent, tools=tools,\n callback_manager=callback_manager, verbose=verbose, max_iterations=\n max_iterations, max_execution_time=max_execution_time,\n early_stopping_method=early_stopping_method, **agent_executor_kwargs or {})\n', (24950, 25187), False, 'from langchain.agents.agent import AgentExecutor\n'), ((25998, 26033), 'dataherald.db_scanner.repository.base.TableDescriptionRepository', 'TableDescriptionRepository', (['storage'], {}), '(storage)\n', (26024, 26033), False, 'from dataherald.db_scanner.repository.base import TableDescriptionRepository\n'), ((26897, 26944), 'dataherald.sql_database.base.SQLDatabase.get_sql_engine', 'SQLDatabase.get_sql_engine', (['database_connection'], {}), '(database_connection)\n', (26923, 26944), False, 'from dataherald.sql_database.base import SQLDatabase, SQLInjectionError\n'), ((29041, 29064), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (29062, 29064), False, 'import datetime\n'), ((29527, 29559), 'dataherald.repositories.sql_generations.SQLGenerationRepository', 'SQLGenerationRepository', (['storage'], {}), '(storage)\n', (29550, 29559), False, 'from dataherald.repositories.sql_generations import SQLGenerationRepository\n'), ((29836, 29871), 'dataherald.db_scanner.repository.base.TableDescriptionRepository', 'TableDescriptionRepository', (['storage'], {}), '(storage)\n', (29862, 29871), False, 'from dataherald.db_scanner.repository.base import TableDescriptionRepository\n'), ((30648, 30695), 'dataherald.sql_database.base.SQLDatabase.get_sql_engine', 'SQLDatabase.get_sql_engine', (['database_connection'], {}), '(database_connection)\n', (30674, 30695), False, 'from dataherald.sql_database.base import SQLDatabase, SQLInjectionError\n'), ((31576, 31703), 'threading.Thread', 'Thread', ([], {'target': 'self.stream_agent_steps', 'args': '(user_prompt.text, agent_executor, response, sql_generation_repository, queue)'}), '(target=self.stream_agent_steps, args=(user_prompt.text,\n agent_executor, response, sql_generation_repository, queue))\n', (31582, 31703), False, 'from threading import Thread\n'), ((23228, 23267), 'os.getenv', 'os.getenv', (['"""AGENT_MAX_ITERATIONS"""', '"""15"""'], {}), "('AGENT_MAX_ITERATIONS', '15')\n", (23237, 23267), False, 'import os\n'), ((27798, 27819), 'langchain_community.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (27817, 27819), False, 'from langchain_community.callbacks import get_openai_callback\n'), ((8452, 8464), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (8458, 8464), True, 'import numpy as np\n'), ((25715, 25738), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (25736, 25738), False, 'import datetime\n'), ((8468, 8485), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (8482, 8485), True, 'import numpy as np\n'), ((8488, 8505), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (8502, 8505), True, 'import numpy as np\n'), ((27607, 27647), 'os.environ.get', 'os.environ.get', (['"""DH_ENGINE_TIMEOUT"""', '(150)'], {}), "('DH_ENGINE_TIMEOUT', 150)\n", (27621, 27647), False, 'import os\n'), ((28056, 28076), 'dataherald.sql_database.base.SQLInjectionError', 'SQLInjectionError', (['e'], {}), '(e)\n', (28073, 28076), False, 'from dataherald.sql_database.base import SQLDatabase, SQLInjectionError\n'), ((28161, 28193), 'dataherald.sql_generator.EngineTimeOutORItemLimitError', 'EngineTimeOutORItemLimitError', (['e'], {}), '(e)\n', (28190, 28193), False, 'from dataherald.sql_generator import EngineTimeOutORItemLimitError, SQLGenerator\n'), ((31381, 31421), 'os.environ.get', 'os.environ.get', (['"""DH_ENGINE_TIMEOUT"""', '(150)'], {}), "('DH_ENGINE_TIMEOUT', 150)\n", (31395, 31421), False, 'import os\n'), ((6019, 6059), 'os.getenv', 'os.getenv', (['"""SQL_EXECUTION_TIMEOUT"""', '"""60"""'], {}), "('SQL_EXECUTION_TIMEOUT', '60')\n", (6028, 6059), False, 'import os\n'), ((28402, 28425), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (28423, 28425), False, 'import datetime\n')]
from marqo import Client import pandas as pd import numpy as np from langchain_openai import OpenAI from langchain.docstore.document import Document from langchain.chains import LLMChain from dotenv import load_dotenv from utilities import ( load_data, extract_text_from_highlights, qna_prompt, predict_ce, get_sorted_inds ) load_dotenv() if __name__ == "__main__": ############################################################# # STEP 0: Install Marqo ############################################################# # run the following docker commands from the terminal to start marqo # docker rm -f marqo # docker pull marqoai/marqo:2.0.0 # docker run --name marqo -it -p 8882:8882 --add-host host.docker.internal:host-gateway marqoai/marqo:2.0.0 ############################################################# # STEP 1: Setup Marqo ############################################################# mq = Client() index_name = "iron-docs" # (optinally) delete if it already exists try: mq.index(index_name).delete() except: pass # we can set some specific settings for the index. if they are not provided, sensible defaults are used index_settings = { "model": "flax-sentence-embeddings/all_datasets_v4_MiniLM-L6", "normalizeEmbeddings": True, "textPreprocessing": { "splitLength": 3, "splitOverlap": 1, "splitMethod": "sentence" }, } # create the index with custom settings mq.create_index(index_name, settings_dict=index_settings) ############################################################# # STEP 2: Load the data ############################################################# df = load_data() # turn the data into a dict for indexing documents = df.to_dict(orient='records') ############################################################# # STEP 3: Index the data ############################################################# # index the documents indexing = mq.index(index_name).add_documents(documents, tensor_fields=["cleaned_text"], client_batch_size=64) ############################################################# # STEP 4: Search the data ############################################################# # try a generic search q = "what is the rated voltage" results = mq.index(index_name).search(q) print(results['hits'][0]) ############################################################# # STEP 5: Make it chatty ############################################################# highlights, texts = extract_text_from_highlights(results, token_limit=150) docs = [Document(page_content=f"Source [{ind}]:" + t) for ind, t in enumerate(texts)] llm = OpenAI(temperature=0.9) chain_qa = LLMChain(llm=llm, prompt=qna_prompt()) llm_results = chain_qa.invoke({"summaries": docs, "question": results['query']}, return_only_outputs=True) print(llm_results['text']) ############################################################# # STEP 6: Score the references ############################################################# score_threshold = 0.20 top_k = 3 scores = predict_ce(llm_results['text'], texts) inds = get_sorted_inds(scores) scores = scores.cpu().numpy() scores = [np.round(s[0], 2) for s in scores] references = [(str(np.round(scores[i], 2)), texts[i]) for i in inds[:top_k] if scores[i] > score_threshold] df_ref = pd.DataFrame(references, columns=['score', 'sources']) print(df_ref)
[ "langchain_openai.OpenAI", "langchain.docstore.document.Document" ]
[((349, 362), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (360, 362), False, 'from dotenv import load_dotenv\n'), ((984, 992), 'marqo.Client', 'Client', ([], {}), '()\n', (990, 992), False, 'from marqo import Client\n'), ((1812, 1823), 'utilities.load_data', 'load_data', ([], {}), '()\n', (1821, 1823), False, 'from utilities import load_data, extract_text_from_highlights, qna_prompt, predict_ce, get_sorted_inds\n'), ((2727, 2781), 'utilities.extract_text_from_highlights', 'extract_text_from_highlights', (['results'], {'token_limit': '(150)'}), '(results, token_limit=150)\n', (2755, 2781), False, 'from utilities import load_data, extract_text_from_highlights, qna_prompt, predict_ce, get_sorted_inds\n'), ((2882, 2905), 'langchain_openai.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (2888, 2905), False, 'from langchain_openai import OpenAI\n'), ((3331, 3369), 'utilities.predict_ce', 'predict_ce', (["llm_results['text']", 'texts'], {}), "(llm_results['text'], texts)\n", (3341, 3369), False, 'from utilities import load_data, extract_text_from_highlights, qna_prompt, predict_ce, get_sorted_inds\n'), ((3381, 3404), 'utilities.get_sorted_inds', 'get_sorted_inds', (['scores'], {}), '(scores)\n', (3396, 3404), False, 'from utilities import load_data, extract_text_from_highlights, qna_prompt, predict_ce, get_sorted_inds\n'), ((3613, 3667), 'pandas.DataFrame', 'pd.DataFrame', (['references'], {'columns': "['score', 'sources']"}), "(references, columns=['score', 'sources'])\n", (3625, 3667), True, 'import pandas as pd\n'), ((2794, 2839), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': "(f'Source [{ind}]:' + t)"}), "(page_content=f'Source [{ind}]:' + t)\n", (2802, 2839), False, 'from langchain.docstore.document import Document\n'), ((3453, 3470), 'numpy.round', 'np.round', (['s[0]', '(2)'], {}), '(s[0], 2)\n', (3461, 3470), True, 'import numpy as np\n'), ((2946, 2958), 'utilities.qna_prompt', 'qna_prompt', ([], {}), '()\n', (2956, 2958), False, 'from utilities import load_data, extract_text_from_highlights, qna_prompt, predict_ce, get_sorted_inds\n'), ((3511, 3533), 'numpy.round', 'np.round', (['scores[i]', '(2)'], {}), '(scores[i], 2)\n', (3519, 3533), True, 'import numpy as np\n')]
from fastapi import FastAPI, Form, Request, Response, File, Depends, HTTPException, status from fastapi.responses import RedirectResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates from fastapi.encoders import jsonable_encoder from langchain.llms import CTransformers from langchain.chains import QAGenerationChain from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.docstore.document import Document from langchain.document_loaders import PyPDFLoader from langchain.prompts import PromptTemplate from langchain.embeddings import HuggingFaceBgeEmbeddings from langchain.vectorstores import FAISS from langchain.chains.summarize import load_summarize_chain from langchain.chains import RetrievalQA import os import json import time import uvicorn import aiofiles from PyPDF2 import PdfReader import csv app = FastAPI() app.mount("/static", StaticFiles(directory="static"), name="static") templates = Jinja2Templates(directory="templates") def load_llm(): # Load the locally downloaded model here llm = CTransformers( model = "mistral-7b-instruct-v0.1.Q4_K_S.gguf", model_type="mistral", max_new_tokens = 1048, temperature = 0.3 ) return llm def file_processing(file_path): # Load data from PDF loader = PyPDFLoader(file_path) data = loader.load() question_gen = '' for page in data: question_gen += page.page_content splitter_ques_gen = RecursiveCharacterTextSplitter( chunk_size = 1000, chunk_overlap = 100 ) chunks_ques_gen = splitter_ques_gen.split_text(question_gen) document_ques_gen = [Document(page_content=t) for t in chunks_ques_gen] splitter_ans_gen = RecursiveCharacterTextSplitter( chunk_size = 300, chunk_overlap = 30 ) document_answer_gen = splitter_ans_gen.split_documents( document_ques_gen ) return document_ques_gen, document_answer_gen def llm_pipeline(file_path): document_ques_gen, document_answer_gen = file_processing(file_path) llm_ques_gen_pipeline = load_llm() prompt_template = """ You are an expert at creating questions based on coding materials and documentation. Your goal is to prepare a coder or programmer for their exam and coding tests. You do this by asking questions about the text below: ------------ {text} ------------ Create questions that will prepare the coders or programmers for their tests. Make sure not to lose any important information. QUESTIONS: """ PROMPT_QUESTIONS = PromptTemplate(template=prompt_template, input_variables=["text"]) refine_template = (""" You are an expert at creating practice questions based on coding material and documentation. Your goal is to help a coder or programmer prepare for a coding test. We have received some practice questions to a certain extent: {existing_answer}. We have the option to refine the existing questions or add new ones. (only if necessary) with some more context below. ------------ {text} ------------ Given the new context, refine the original questions in English. If the context is not helpful, please provide the original questions. QUESTIONS: """ ) REFINE_PROMPT_QUESTIONS = PromptTemplate( input_variables=["existing_answer", "text"], template=refine_template, ) ques_gen_chain = load_summarize_chain(llm = llm_ques_gen_pipeline, chain_type = "refine", verbose = True, question_prompt=PROMPT_QUESTIONS, refine_prompt=REFINE_PROMPT_QUESTIONS) ques = ques_gen_chain.run(document_ques_gen) embeddings = HuggingFaceBgeEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") vector_store = FAISS.from_documents(document_answer_gen, embeddings) llm_answer_gen = load_llm() ques_list = ques.split("\n") filtered_ques_list = [element for element in ques_list if element.endswith('?') or element.endswith('.')] answer_generation_chain = RetrievalQA.from_chain_type(llm=llm_answer_gen, chain_type="stuff", retriever=vector_store.as_retriever()) return answer_generation_chain, filtered_ques_list def get_csv (file_path): answer_generation_chain, ques_list = llm_pipeline(file_path) base_folder = 'static/output/' if not os.path.isdir(base_folder): os.mkdir(base_folder) output_file = base_folder+"QA.csv" with open(output_file, "w", newline="", encoding="utf-8") as csvfile: csv_writer = csv.writer(csvfile) csv_writer.writerow(["Question", "Answer"]) # Writing the header row for question in ques_list: print("Question: ", question) answer = answer_generation_chain.run(question) print("Answer: ", answer) print("--------------------------------------------------\n\n") # Save answer to CSV file csv_writer.writerow([question, answer]) return output_file @app.get("/") async def index(request: Request): return templates.TemplateResponse("index.html", {"request": request}) @app.post("/upload") async def chat(request: Request, pdf_file: bytes = File(), filename: str = Form(...)): base_folder = 'static/docs/' if not os.path.isdir(base_folder): os.mkdir(base_folder) pdf_filename = os.path.join(base_folder, filename) async with aiofiles.open(pdf_filename, 'wb') as f: await f.write(pdf_file) response_data = jsonable_encoder(json.dumps({"msg": 'success',"pdf_filename": pdf_filename})) res = Response(response_data) return res @app.post("/analyze") async def chat(request: Request, pdf_filename: str = Form(...)): output_file = get_csv(pdf_filename) response_data = jsonable_encoder(json.dumps({"output_file": output_file})) res = Response(response_data) return res if __name__ == "__main__": uvicorn.run("app:app", host='0.0.0.0', port=8000, reload=True)
[ "langchain.chains.summarize.load_summarize_chain", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.embeddings.HuggingFaceBgeEmbeddings", "langchain.docstore.document.Document", "langchain.llms.CTransformers", "langchain.vectorstores.FAISS.from_documents", "langchain.document_loaders.PyPDFLoader", "langchain.prompts.PromptTemplate" ]
[((911, 920), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (918, 920), False, 'from fastapi import FastAPI, Form, Request, Response, File, Depends, HTTPException, status\n'), ((1008, 1046), 'fastapi.templating.Jinja2Templates', 'Jinja2Templates', ([], {'directory': '"""templates"""'}), "(directory='templates')\n", (1023, 1046), False, 'from fastapi.templating import Jinja2Templates\n'), ((945, 976), 'fastapi.staticfiles.StaticFiles', 'StaticFiles', ([], {'directory': '"""static"""'}), "(directory='static')\n", (956, 976), False, 'from fastapi.staticfiles import StaticFiles\n'), ((1123, 1247), 'langchain.llms.CTransformers', 'CTransformers', ([], {'model': '"""mistral-7b-instruct-v0.1.Q4_K_S.gguf"""', 'model_type': '"""mistral"""', 'max_new_tokens': '(1048)', 'temperature': '(0.3)'}), "(model='mistral-7b-instruct-v0.1.Q4_K_S.gguf', model_type=\n 'mistral', max_new_tokens=1048, temperature=0.3)\n", (1136, 1247), False, 'from langchain.llms import CTransformers\n'), ((1385, 1407), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['file_path'], {}), '(file_path)\n', (1396, 1407), False, 'from langchain.document_loaders import PyPDFLoader\n'), ((1562, 1628), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(100)'}), '(chunk_size=1000, chunk_overlap=100)\n', (1592, 1628), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1831, 1895), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(300)', 'chunk_overlap': '(30)'}), '(chunk_size=300, chunk_overlap=30)\n', (1861, 1895), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2734, 2800), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['text']"}), "(template=prompt_template, input_variables=['text'])\n", (2748, 2800), False, 'from langchain.prompts import PromptTemplate\n'), ((3479, 3569), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['existing_answer', 'text']", 'template': 'refine_template'}), "(input_variables=['existing_answer', 'text'], template=\n refine_template)\n", (3493, 3569), False, 'from langchain.prompts import PromptTemplate\n'), ((3615, 3779), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', ([], {'llm': 'llm_ques_gen_pipeline', 'chain_type': '"""refine"""', 'verbose': '(True)', 'question_prompt': 'PROMPT_QUESTIONS', 'refine_prompt': 'REFINE_PROMPT_QUESTIONS'}), "(llm=llm_ques_gen_pipeline, chain_type='refine',\n verbose=True, question_prompt=PROMPT_QUESTIONS, refine_prompt=\n REFINE_PROMPT_QUESTIONS)\n", (3635, 3779), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((4033, 4111), 'langchain.embeddings.HuggingFaceBgeEmbeddings', 'HuggingFaceBgeEmbeddings', ([], {'model_name': '"""sentence-transformers/all-mpnet-base-v2"""'}), "(model_name='sentence-transformers/all-mpnet-base-v2')\n", (4057, 4111), False, 'from langchain.embeddings import HuggingFaceBgeEmbeddings\n'), ((4134, 4187), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['document_answer_gen', 'embeddings'], {}), '(document_answer_gen, embeddings)\n', (4154, 4187), False, 'from langchain.vectorstores import FAISS\n'), ((5686, 5692), 'fastapi.File', 'File', ([], {}), '()\n', (5690, 5692), False, 'from fastapi import FastAPI, Form, Request, Response, File, Depends, HTTPException, status\n'), ((5710, 5719), 'fastapi.Form', 'Form', (['...'], {}), '(...)\n', (5714, 5719), False, 'from fastapi import FastAPI, Form, Request, Response, File, Depends, HTTPException, status\n'), ((5847, 5882), 'os.path.join', 'os.path.join', (['base_folder', 'filename'], {}), '(base_folder, filename)\n', (5859, 5882), False, 'import os\n'), ((6084, 6107), 'fastapi.Response', 'Response', (['response_data'], {}), '(response_data)\n', (6092, 6107), False, 'from fastapi import FastAPI, Form, Request, Response, File, Depends, HTTPException, status\n'), ((6205, 6214), 'fastapi.Form', 'Form', (['...'], {}), '(...)\n', (6209, 6214), False, 'from fastapi import FastAPI, Form, Request, Response, File, Depends, HTTPException, status\n'), ((6349, 6372), 'fastapi.Response', 'Response', (['response_data'], {}), '(response_data)\n', (6357, 6372), False, 'from fastapi import FastAPI, Form, Request, Response, File, Depends, HTTPException, status\n'), ((6424, 6486), 'uvicorn.run', 'uvicorn.run', (['"""app:app"""'], {'host': '"""0.0.0.0"""', 'port': '(8000)', 'reload': '(True)'}), "('app:app', host='0.0.0.0', port=8000, reload=True)\n", (6435, 6486), False, 'import uvicorn\n'), ((1754, 1778), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 't'}), '(page_content=t)\n', (1762, 1778), False, 'from langchain.docstore.document import Document\n'), ((4812, 4838), 'os.path.isdir', 'os.path.isdir', (['base_folder'], {}), '(base_folder)\n', (4825, 4838), False, 'import os\n'), ((4849, 4870), 'os.mkdir', 'os.mkdir', (['base_folder'], {}), '(base_folder)\n', (4857, 4870), False, 'import os\n'), ((5008, 5027), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (5018, 5027), False, 'import csv\n'), ((5768, 5794), 'os.path.isdir', 'os.path.isdir', (['base_folder'], {}), '(base_folder)\n', (5781, 5794), False, 'import os\n'), ((5805, 5826), 'os.mkdir', 'os.mkdir', (['base_folder'], {}), '(base_folder)\n', (5813, 5826), False, 'import os\n'), ((5901, 5934), 'aiofiles.open', 'aiofiles.open', (['pdf_filename', '"""wb"""'], {}), "(pdf_filename, 'wb')\n", (5914, 5934), False, 'import aiofiles\n'), ((6012, 6072), 'json.dumps', 'json.dumps', (["{'msg': 'success', 'pdf_filename': pdf_filename}"], {}), "({'msg': 'success', 'pdf_filename': pdf_filename})\n", (6022, 6072), False, 'import json\n'), ((6296, 6336), 'json.dumps', 'json.dumps', (["{'output_file': output_file}"], {}), "({'output_file': output_file})\n", (6306, 6336), False, 'import json\n')]
#!/usr/bin/env python """Example LangChain server exposes a retriever.""" from fastapi import FastAPI from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import FAISS from langserve import add_routes vectorstore = FAISS.from_texts( ["cats like fish", "dogs like sticks"], embedding=OpenAIEmbeddings() ) retriever = vectorstore.as_retriever() app = FastAPI( title="LangChain Server", version="1.0", description="Spin up a simple api server using Langchain's Runnable interfaces", ) # Adds routes to the app for using the retriever under: # /invoke # /batch # /stream add_routes(app, retriever) if __name__ == "__main__": import uvicorn uvicorn.run(app, host="localhost", port=8000)
[ "langchain.embeddings.OpenAIEmbeddings" ]
[((381, 515), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""LangChain Server"""', 'version': '"""1.0"""', 'description': '"""Spin up a simple api server using Langchain\'s Runnable interfaces"""'}), '(title=\'LangChain Server\', version=\'1.0\', description=\n "Spin up a simple api server using Langchain\'s Runnable interfaces")\n', (388, 515), False, 'from fastapi import FastAPI\n'), ((611, 637), 'langserve.add_routes', 'add_routes', (['app', 'retriever'], {}), '(app, retriever)\n', (621, 637), False, 'from langserve import add_routes\n'), ((690, 735), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""localhost"""', 'port': '(8000)'}), "(app, host='localhost', port=8000)\n", (701, 735), False, 'import uvicorn\n'), ((314, 332), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (330, 332), False, 'from langchain.embeddings import OpenAIEmbeddings\n')]
"""Wrapper around Google's PaLM Chat API.""" from __future__ import annotations import logging from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional from pydantic import BaseModel, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, SystemMessage, ) from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: import google.generativeai as genai logger = logging.getLogger(__name__) class ChatGooglePalmError(Exception): pass def _truncate_at_stop_tokens( text: str, stop: Optional[List[str]], ) -> str: """Truncates text at the earliest stop token found.""" if stop is None: return text for stop_token in stop: stop_token_idx = text.find(stop_token) if stop_token_idx != -1: text = text[:stop_token_idx] return text def _response_to_result( response: genai.types.ChatResponse, stop: Optional[List[str]], ) -> ChatResult: """Converts a PaLM API response into a LangChain ChatResult.""" if not response.candidates: raise ChatGooglePalmError("ChatResponse must have at least one candidate.") generations: List[ChatGeneration] = [] for candidate in response.candidates: author = candidate.get("author") if author is None: raise ChatGooglePalmError(f"ChatResponse must have an author: {candidate}") content = _truncate_at_stop_tokens(candidate.get("content", ""), stop) if content is None: raise ChatGooglePalmError(f"ChatResponse must have a content: {candidate}") if author == "ai": generations.append( ChatGeneration(text=content, message=AIMessage(content=content)) ) elif author == "human": generations.append( ChatGeneration( text=content, message=HumanMessage(content=content), ) ) else: generations.append( ChatGeneration( text=content, message=ChatMessage(role=author, content=content), ) ) return ChatResult(generations=generations) def _messages_to_prompt_dict( input_messages: List[BaseMessage], ) -> genai.types.MessagePromptDict: """Converts a list of LangChain messages into a PaLM API MessagePrompt structure.""" import google.generativeai as genai context: str = "" examples: List[genai.types.MessageDict] = [] messages: List[genai.types.MessageDict] = [] remaining = list(enumerate(input_messages)) while remaining: index, input_message = remaining.pop(0) if isinstance(input_message, SystemMessage): if index != 0: raise ChatGooglePalmError("System message must be first input message.") context = input_message.content elif isinstance(input_message, HumanMessage) and input_message.example: if messages: raise ChatGooglePalmError( "Message examples must come before other messages." ) _, next_input_message = remaining.pop(0) if isinstance(next_input_message, AIMessage) and next_input_message.example: examples.extend( [ genai.types.MessageDict( author="human", content=input_message.content ), genai.types.MessageDict( author="ai", content=next_input_message.content ), ] ) else: raise ChatGooglePalmError( "Human example message must be immediately followed by an " " AI example response." ) elif isinstance(input_message, AIMessage) and input_message.example: raise ChatGooglePalmError( "AI example message must be immediately preceded by a Human " "example message." ) elif isinstance(input_message, AIMessage): messages.append( genai.types.MessageDict(author="ai", content=input_message.content) ) elif isinstance(input_message, HumanMessage): messages.append( genai.types.MessageDict(author="human", content=input_message.content) ) elif isinstance(input_message, ChatMessage): messages.append( genai.types.MessageDict( author=input_message.role, content=input_message.content ) ) else: raise ChatGooglePalmError( "Messages without an explicit role not supported by PaLM API." ) return genai.types.MessagePromptDict( context=context, examples=examples, messages=messages, ) def _create_retry_decorator() -> Callable[[Any], Any]: """Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions""" import google.api_core.exceptions multiplier = 2 min_seconds = 1 max_seconds = 60 max_retries = 10 return retry( reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(google.api_core.exceptions.ResourceExhausted) | retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable) | retry_if_exception_type(google.api_core.exceptions.GoogleAPIError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def chat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _chat_with_retry(**kwargs: Any) -> Any: return llm.client.chat(**kwargs) return _chat_with_retry(**kwargs) async def achat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator async def _achat_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.chat_async(**kwargs) return await _achat_with_retry(**kwargs) class ChatGooglePalm(BaseChatModel, BaseModel): """Wrapper around Google's PaLM Chat API. To use you must have the google.generativeai Python package installed and either: 1. The ``GOOGLE_API_KEY``` environment varaible set with your API key, or 2. Pass your API key using the google_api_key kwarg to the ChatGoogle constructor. Example: .. code-block:: python from langchain.chat_models import ChatGooglePalm chat = ChatGooglePalm() """ client: Any #: :meta private: model_name: str = "models/chat-bison-001" """Model name to use.""" google_api_key: Optional[str] = None temperature: Optional[float] = None """Run inference with this temperature. Must by in the closed interval [0.0, 1.0].""" top_p: Optional[float] = None """Decode using nucleus sampling: consider the smallest set of tokens whose probability sum is at least top_p. Must be in the closed interval [0.0, 1.0].""" top_k: Optional[int] = None """Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.""" n: int = 1 """Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate api key, python package exists, temperature, top_p, and top_k.""" google_api_key = get_from_dict_or_env( values, "google_api_key", "GOOGLE_API_KEY" ) try: import google.generativeai as genai genai.configure(api_key=google_api_key) except ImportError: raise ChatGooglePalmError( "Could not import google.generativeai python package. " "Please install it with `pip install google-generativeai`" ) values["client"] = genai if values["temperature"] is not None and not 0 <= values["temperature"] <= 1: raise ValueError("temperature must be in the range [0.0, 1.0]") if values["top_p"] is not None and not 0 <= values["top_p"] <= 1: raise ValueError("top_p must be in the range [0.0, 1.0]") if values["top_k"] is not None and values["top_k"] <= 0: raise ValueError("top_k must be positive") return values def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> ChatResult: prompt = _messages_to_prompt_dict(messages) response: genai.types.ChatResponse = chat_with_retry( self, model=self.model_name, prompt=prompt, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k, candidate_count=self.n, ) return _response_to_result(response, stop) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> ChatResult: prompt = _messages_to_prompt_dict(messages) response: genai.types.ChatResponse = await achat_with_retry( self, model=self.model_name, prompt=prompt, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k, candidate_count=self.n, ) return _response_to_result(response, stop) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_name": self.model_name, "temperature": self.temperature, "top_p": self.top_p, "top_k": self.top_k, "n": self.n, } @property def _llm_type(self) -> str: return "google-palm-chat"
[ "langchain.schema.ChatMessage", "langchain.utils.get_from_dict_or_env", "langchain.schema.ChatResult", "langchain.schema.HumanMessage", "langchain.schema.AIMessage" ]
[((792, 819), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (809, 819), False, 'import logging\n'), ((2563, 2598), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (2573, 2598), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, SystemMessage\n'), ((5264, 5353), 'google.generativeai.types.MessagePromptDict', 'genai.types.MessagePromptDict', ([], {'context': 'context', 'examples': 'examples', 'messages': 'messages'}), '(context=context, examples=examples, messages=\n messages)\n', (5293, 5353), True, 'import google.generativeai as genai\n'), ((8257, 8273), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (8271, 8273), False, 'from pydantic import BaseModel, root_validator\n'), ((8442, 8506), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""google_api_key"""', '"""GOOGLE_API_KEY"""'], {}), "(values, 'google_api_key', 'GOOGLE_API_KEY')\n", (8462, 8506), False, 'from langchain.utils import get_from_dict_or_env\n'), ((5697, 5728), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['max_retries'], {}), '(max_retries)\n', (5715, 5728), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((5743, 5816), 'tenacity.wait_exponential', 'wait_exponential', ([], {'multiplier': 'multiplier', 'min': 'min_seconds', 'max': 'max_seconds'}), '(multiplier=multiplier, min=min_seconds, max=max_seconds)\n', (5759, 5816), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((6114, 6155), 'tenacity.before_sleep_log', 'before_sleep_log', (['logger', 'logging.WARNING'], {}), '(logger, logging.WARNING)\n', (6130, 6155), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((8603, 8642), 'google.generativeai.configure', 'genai.configure', ([], {'api_key': 'google_api_key'}), '(api_key=google_api_key)\n', (8618, 8642), True, 'import google.generativeai as genai\n'), ((6015, 6081), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['google.api_core.exceptions.GoogleAPIError'], {}), '(google.api_core.exceptions.GoogleAPIError)\n', (6038, 6081), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((5846, 5915), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['google.api_core.exceptions.ResourceExhausted'], {}), '(google.api_core.exceptions.ResourceExhausted)\n', (5869, 5915), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((5930, 6000), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['google.api_core.exceptions.ServiceUnavailable'], {}), '(google.api_core.exceptions.ServiceUnavailable)\n', (5953, 6000), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((2073, 2099), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (2082, 2099), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, SystemMessage\n'), ((2273, 2302), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'content'}), '(content=content)\n', (2285, 2302), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, SystemMessage\n'), ((2476, 2517), 'langchain.schema.ChatMessage', 'ChatMessage', ([], {'role': 'author', 'content': 'content'}), '(role=author, content=content)\n', (2487, 2517), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, SystemMessage\n'), ((3748, 3818), 'google.generativeai.types.MessageDict', 'genai.types.MessageDict', ([], {'author': '"""human"""', 'content': 'input_message.content'}), "(author='human', content=input_message.content)\n", (3771, 3818), True, 'import google.generativeai as genai\n'), ((3898, 3970), 'google.generativeai.types.MessageDict', 'genai.types.MessageDict', ([], {'author': '"""ai"""', 'content': 'next_input_message.content'}), "(author='ai', content=next_input_message.content)\n", (3921, 3970), True, 'import google.generativeai as genai\n'), ((4608, 4675), 'google.generativeai.types.MessageDict', 'genai.types.MessageDict', ([], {'author': '"""ai"""', 'content': 'input_message.content'}), "(author='ai', content=input_message.content)\n", (4631, 4675), True, 'import google.generativeai as genai\n'), ((4789, 4859), 'google.generativeai.types.MessageDict', 'genai.types.MessageDict', ([], {'author': '"""human"""', 'content': 'input_message.content'}), "(author='human', content=input_message.content)\n", (4812, 4859), True, 'import google.generativeai as genai\n'), ((4972, 5058), 'google.generativeai.types.MessageDict', 'genai.types.MessageDict', ([], {'author': 'input_message.role', 'content': 'input_message.content'}), '(author=input_message.role, content=input_message.\n content)\n', (4995, 5058), True, 'import google.generativeai as genai\n')]
#!/usr/bin/env python # -*- encoding: utf-8 -*- ''' @File : create_db.py @Time : 2023/12/14 10:56:31 @Author : Logan Zou @Version : 1.0 @Contact : [email protected] @License : (C)Copyright 2017-2018, Liugroup-NLPR-CASIA @Desc : 知识库搭建 ''' # 首先导入所需第三方库 from langchain.document_loaders import UnstructuredFileLoader from langchain.document_loaders import UnstructuredMarkdownLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma from langchain.embeddings.huggingface import HuggingFaceEmbeddings from tqdm import tqdm import os # 获取文件路径函数 def get_files(dir_path): # args:dir_path,目标文件夹路径 file_list = [] for filepath, dirnames, filenames in os.walk(dir_path): # os.walk 函数将递归遍历指定文件夹 for filename in filenames: # 通过后缀名判断文件类型是否满足要求 if filename.endswith(".md"): # 如果满足要求,将其绝对路径加入到结果列表 file_list.append(os.path.join(filepath, filename)) elif filename.endswith(".txt"): file_list.append(os.path.join(filepath, filename)) return file_list # 加载文件函数 def get_text(dir_path): # args:dir_path,目标文件夹路径 # 首先调用上文定义的函数得到目标文件路径列表 file_lst = get_files(dir_path) # docs 存放加载之后的纯文本对象 docs = [] # 遍历所有目标文件 for one_file in tqdm(file_lst): file_type = one_file.split('.')[-1] if file_type == 'md': loader = UnstructuredMarkdownLoader(one_file) elif file_type == 'txt': loader = UnstructuredFileLoader(one_file) else: # 如果是不符合条件的文件,直接跳过 continue docs.extend(loader.load()) return docs # 目标文件夹 tar_dir = [ "/root/autodl-tmp/self-llm", "/root/autodl-tmp/llm-universe", "/root/autodl-tmp/prompt-engineering-for-developers", "/root/autodl-tmp/so-large-lm", "/root/autodl-tmp/hugging-llm", ] # 加载目标文件 docs = [] for dir_path in tar_dir: docs.extend(get_text(dir_path)) # 对文本进行分块 text_splitter = RecursiveCharacterTextSplitter( chunk_size=500, chunk_overlap=150) split_docs = text_splitter.split_documents(docs) # 加载开源词向量模型 embeddings = HuggingFaceEmbeddings(model_name="/root/autodl-tmp/sentence-transformer") # 构建向量数据库 # 定义持久化路径 persist_directory = 'data_base/vector_db/chroma' # 加载数据库 vectordb = Chroma.from_documents( documents=split_docs, embedding=embeddings, persist_directory=persist_directory # 允许我们将persist_directory目录保存到磁盘上 ) # 将加载的向量数据库持久化到磁盘上 vectordb.persist()
[ "langchain.document_loaders.UnstructuredFileLoader", "langchain.embeddings.huggingface.HuggingFaceEmbeddings", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.vectorstores.Chroma.from_documents", "langchain.document_loaders.UnstructuredMarkdownLoader" ]
[((2018, 2083), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(150)'}), '(chunk_size=500, chunk_overlap=150)\n', (2048, 2083), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2164, 2237), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""/root/autodl-tmp/sentence-transformer"""'}), "(model_name='/root/autodl-tmp/sentence-transformer')\n", (2185, 2237), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((2327, 2433), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', ([], {'documents': 'split_docs', 'embedding': 'embeddings', 'persist_directory': 'persist_directory'}), '(documents=split_docs, embedding=embeddings,\n persist_directory=persist_directory)\n', (2348, 2433), False, 'from langchain.vectorstores import Chroma\n'), ((741, 758), 'os.walk', 'os.walk', (['dir_path'], {}), '(dir_path)\n', (748, 758), False, 'import os\n'), ((1335, 1349), 'tqdm.tqdm', 'tqdm', (['file_lst'], {}), '(file_lst)\n', (1339, 1349), False, 'from tqdm import tqdm\n'), ((1446, 1482), 'langchain.document_loaders.UnstructuredMarkdownLoader', 'UnstructuredMarkdownLoader', (['one_file'], {}), '(one_file)\n', (1472, 1482), False, 'from langchain.document_loaders import UnstructuredMarkdownLoader\n'), ((1537, 1569), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['one_file'], {}), '(one_file)\n', (1559, 1569), False, 'from langchain.document_loaders import UnstructuredFileLoader\n'), ((971, 1003), 'os.path.join', 'os.path.join', (['filepath', 'filename'], {}), '(filepath, filename)\n', (983, 1003), False, 'import os\n'), ((1082, 1114), 'os.path.join', 'os.path.join', (['filepath', 'filename'], {}), '(filepath, filename)\n', (1094, 1114), False, 'import os\n')]
from flask import Flask, request from flask_restful import Resource, Api, reqparse, abort from werkzeug.utils import secure_filename ######################################################################## import tempfile import os from langchain.document_loaders import DirectoryLoader, PyMuPDFLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings from langchain.chains import ConversationalRetrievalChain from langchain.chat_models import ChatOpenAI from langchain.vectorstores import Pinecone import pinecone from templates.qa_prompt import QA_PROMPT from templates.condense_prompt import CONDENSE_PROMPT from dotenv import load_dotenv load_dotenv() openai_api_key_env = os.environ.get('OPENAI_API_KEY') pinecone_api_key_env = os.environ.get('PINECONE_API_KEY') pinecone_environment_env = os.environ.get('PINECONE_ENVIRONMENT') pinecone_index_env = os.environ.get('PINECONE_INDEX') pinecone_namespace = 'testing-pdf-2389203901' app = Flask("L-ChatBot") UPLOAD_FOLDER = 'documents' ALLOWED_EXTENSIONS = {'pdf'} app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER api = Api(app) parser = reqparse.RequestParser() def get_answer(message, temperature=0.7, source_amount=4): chat_history = [] embeddings = OpenAIEmbeddings( model='text-embedding-ada-002', openai_api_key=openai_api_key_env) pinecone.init(api_key=pinecone_api_key_env, environment=pinecone_environment_env) vectorstore = Pinecone.from_existing_index( index_name=pinecone_index_env, embedding=embeddings, text_key='text', namespace=pinecone_namespace) model = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=temperature, openai_api_key=openai_api_key_env, streaming=False) # max temperature is 2 least is 0 retriever = vectorstore.as_retriever(search_kwargs={ "k": source_amount}, qa_template=QA_PROMPT, question_generator_template=CONDENSE_PROMPT) # 9 is the max sources qa = ConversationalRetrievalChain.from_llm( llm=model, retriever=retriever, return_source_documents=True) result = qa({"question": message, "chat_history": chat_history}) print("Cevap Geldi") answer = result["answer"] source_documents = result['source_documents'] parsed_documents = [] for doc in source_documents: parsed_doc = { "page_content": doc.page_content, "metadata": { "author": doc.metadata.get("author", ""), "creationDate": doc.metadata.get("creationDate", ""), "creator": doc.metadata.get("creator", ""), "file_path": doc.metadata.get("file_path", ""), "format": doc.metadata.get("format", ""), "keywords": doc.metadata.get("keywords", ""), "modDate": doc.metadata.get("modDate", ""), "page_number": doc.metadata.get("page_number", 0), "producer": doc.metadata.get("producer", ""), "source": doc.metadata.get("source", ""), "subject": doc.metadata.get("subject", ""), "title": doc.metadata.get("title", ""), "total_pages": doc.metadata.get("total_pages", 0), "trapped": doc.metadata.get("trapped", "") } } parsed_documents.append(parsed_doc) # Display the response in the Streamlit app return { "answer": answer, "meta": parsed_documents } ######################################################################## class Ask(Resource): def get(self): question = request.args.get("question") temp = request.args.get("temp", default=0.7) sources = request.args.get("sources", default=4) return get_answer(question, float(temp), int(sources)) class Ingest(Resource): def allowed_file(self, filename): return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS def post(self): # Get Text type fields if 'file' not in request.files: return 'No file part' file = request.files.get("file") if file and self.allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) loader = DirectoryLoader( app.config['UPLOAD_FOLDER'], glob="**/*.pdf", loader_cls=PyMuPDFLoader) documents = loader.load() text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=100) documents = text_splitter.split_documents(documents) pinecone.init( api_key=pinecone_api_key_env, # find at app.pinecone.io environment=pinecone_environment_env # next to api key in console ) embeddings = OpenAIEmbeddings( model='text-embedding-ada-002', openai_api_key=openai_api_key_env) Pinecone.from_documents( documents, embeddings, index_name=pinecone_index_env, namespace=pinecone_namespace) return 'File uploaded and ingested successfully' api.add_resource(Ask, "/ask") api.add_resource(Ingest, "/ingest") if __name__ == "__main__": app.run()
[ "langchain.document_loaders.DirectoryLoader", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.chains.ConversationalRetrievalChain.from_llm", "langchain.vectorstores.Pinecone.from_documents", "langchain.chat_models.ChatOpenAI", "langchain.vectorstores.Pinecone.from_existing_index", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((718, 731), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (729, 731), False, 'from dotenv import load_dotenv\n'), ((753, 785), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (767, 785), False, 'import os\n'), ((809, 843), 'os.environ.get', 'os.environ.get', (['"""PINECONE_API_KEY"""'], {}), "('PINECONE_API_KEY')\n", (823, 843), False, 'import os\n'), ((871, 909), 'os.environ.get', 'os.environ.get', (['"""PINECONE_ENVIRONMENT"""'], {}), "('PINECONE_ENVIRONMENT')\n", (885, 909), False, 'import os\n'), ((931, 963), 'os.environ.get', 'os.environ.get', (['"""PINECONE_INDEX"""'], {}), "('PINECONE_INDEX')\n", (945, 963), False, 'import os\n'), ((1018, 1036), 'flask.Flask', 'Flask', (['"""L-ChatBot"""'], {}), "('L-ChatBot')\n", (1023, 1036), False, 'from flask import Flask, request\n'), ((1146, 1154), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (1149, 1154), False, 'from flask_restful import Resource, Api, reqparse, abort\n'), ((1165, 1189), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (1187, 1189), False, 'from flask_restful import Resource, Api, reqparse, abort\n'), ((1290, 1378), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""', 'openai_api_key': 'openai_api_key_env'}), "(model='text-embedding-ada-002', openai_api_key=\n openai_api_key_env)\n", (1306, 1378), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1388, 1474), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'pinecone_api_key_env', 'environment': 'pinecone_environment_env'}), '(api_key=pinecone_api_key_env, environment=\n pinecone_environment_env)\n', (1401, 1474), False, 'import pinecone\n'), ((1506, 1639), 'langchain.vectorstores.Pinecone.from_existing_index', 'Pinecone.from_existing_index', ([], {'index_name': 'pinecone_index_env', 'embedding': 'embeddings', 'text_key': '"""text"""', 'namespace': 'pinecone_namespace'}), "(index_name=pinecone_index_env, embedding=\n embeddings, text_key='text', namespace=pinecone_namespace)\n", (1534, 1639), False, 'from langchain.vectorstores import Pinecone\n'), ((1656, 1775), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': 'temperature', 'openai_api_key': 'openai_api_key_env', 'streaming': '(False)'}), "(model_name='gpt-3.5-turbo', temperature=temperature,\n openai_api_key=openai_api_key_env, streaming=False)\n", (1666, 1775), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2018, 2121), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'model', 'retriever': 'retriever', 'return_source_documents': '(True)'}), '(llm=model, retriever=retriever,\n return_source_documents=True)\n', (2055, 2121), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((3647, 3675), 'flask.request.args.get', 'request.args.get', (['"""question"""'], {}), "('question')\n", (3663, 3675), False, 'from flask import Flask, request\n'), ((3691, 3728), 'flask.request.args.get', 'request.args.get', (['"""temp"""'], {'default': '(0.7)'}), "('temp', default=0.7)\n", (3707, 3728), False, 'from flask import Flask, request\n'), ((3747, 3785), 'flask.request.args.get', 'request.args.get', (['"""sources"""'], {'default': '(4)'}), "('sources', default=4)\n", (3763, 3785), False, 'from flask import Flask, request\n'), ((4162, 4187), 'flask.request.files.get', 'request.files.get', (['"""file"""'], {}), "('file')\n", (4179, 4187), False, 'from flask import Flask, request\n'), ((4265, 4295), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (4280, 4295), False, 'from werkzeug.utils import secure_filename\n'), ((4392, 4484), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (["app.config['UPLOAD_FOLDER']"], {'glob': '"""**/*.pdf"""', 'loader_cls': 'PyMuPDFLoader'}), "(app.config['UPLOAD_FOLDER'], glob='**/*.pdf', loader_cls=\n PyMuPDFLoader)\n", (4407, 4484), False, 'from langchain.document_loaders import DirectoryLoader, PyMuPDFLoader\n'), ((4563, 4629), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(100)'}), '(chunk_size=1000, chunk_overlap=100)\n', (4593, 4629), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((4725, 4811), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'pinecone_api_key_env', 'environment': 'pinecone_environment_env'}), '(api_key=pinecone_api_key_env, environment=\n pinecone_environment_env)\n', (4738, 4811), False, 'import pinecone\n'), ((4935, 5023), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""', 'openai_api_key': 'openai_api_key_env'}), "(model='text-embedding-ada-002', openai_api_key=\n openai_api_key_env)\n", (4951, 5023), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((5048, 5160), 'langchain.vectorstores.Pinecone.from_documents', 'Pinecone.from_documents', (['documents', 'embeddings'], {'index_name': 'pinecone_index_env', 'namespace': 'pinecone_namespace'}), '(documents, embeddings, index_name=\n pinecone_index_env, namespace=pinecone_namespace)\n', (5071, 5160), False, 'from langchain.vectorstores import Pinecone\n'), ((4318, 4369), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (4330, 4369), False, 'import os\n')]
from langchain.llms import LlamaCpp from langchain.embeddings import HuggingFaceEmbeddings from langchain.callbacks.manager import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler def hf_embeddings(): return HuggingFaceEmbeddings( model_name = "sentence-transformers/all-mpnet-base-v2", ) def code_llama(): callbackmanager = CallbackManager([StreamingStdOutCallbackHandler()]) llm = LlamaCpp( model_path="./models/codellama-7b.Q4_K_M.gguf", n_ctx=2048, max_tokens=200, n_gpu_layers=1, f16_kv=True, callback_manager=callbackmanager, verbose=True, use_mlock=True ) return llm
[ "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler", "langchain.llms.LlamaCpp", "langchain.embeddings.HuggingFaceEmbeddings" ]
[((260, 335), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-mpnet-base-v2"""'}), "(model_name='sentence-transformers/all-mpnet-base-v2')\n", (281, 335), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((456, 642), 'langchain.llms.LlamaCpp', 'LlamaCpp', ([], {'model_path': '"""./models/codellama-7b.Q4_K_M.gguf"""', 'n_ctx': '(2048)', 'max_tokens': '(200)', 'n_gpu_layers': '(1)', 'f16_kv': '(True)', 'callback_manager': 'callbackmanager', 'verbose': '(True)', 'use_mlock': '(True)'}), "(model_path='./models/codellama-7b.Q4_K_M.gguf', n_ctx=2048,\n max_tokens=200, n_gpu_layers=1, f16_kv=True, callback_manager=\n callbackmanager, verbose=True, use_mlock=True)\n", (464, 642), False, 'from langchain.llms import LlamaCpp\n'), ((411, 443), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (441, 443), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')]
import os import yaml from types import SimpleNamespace import openai import numpy as np from sklearn.metrics.pairwise import cosine_similarity from langchain.vectorstores import FAISS from langchain.embeddings import HuggingFaceEmbeddings with open("config.yml") as f: config = yaml.safe_load(f) config = SimpleNamespace(**config) os.environ["TOKENIZERS_PARALLELISM"] = "false" def semantic_search(query_embedding, embeddings): """Manual similarity search (deprecated in favor of langchain).""" similarities = cosine_similarity([query_embedding], embeddings)[0] ranked_indices = np.argsort(-similarities) return ranked_indices def answer_question(context, query, model="gpt-3.5-turbo", max_tokens=None, temperature=config.temperature): system_prompt = """ You are a truthful and accurate scientific research assistant. You can write equations in LaTeX. You can fix any unknown LaTeX syntax elements. Do not use the \enumerate. \itemize, \cite, \ref LaTex environments. You are an expert and helpful programmer and write correct code. If parts of the context are not relevant to the question, ignore them. Only answer if you are absolutely confident in the answer. Do not make up any facts. Do not make up what acronyms stand for. """ if context is not None and len(context) > 0: prompt = f"Use the following context to answer the question at the end. If parts of the context are not relevant to the question, ignore them. Context: {context}. Question: {query}" else: prompt = f"Question: {query}" try: response = openai.ChatCompletion.create( model=model, messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}], max_tokens=max_tokens, n=1, temperature=temperature, ) return response["choices"][0]["message"]["content"] except (openai.error.AuthenticationError, openai.error.APIError) as e: return "Authentication error." except (openai.error.APIError, openai.error.Timeout, openai.error.ServiceUnavailableError) as e: return "There was an error with the OpenAI API, or the request timed out." except openai.error.APIConnectionError as e: return "Issue connecting to the OpenAI API." except Exception as e: return "An error occurred: {}".format(e) def run(query, model="gpt-3.5-turbo", api_key=None, query_papers=True, k=config.top_k, max_len_query=300): if api_key is None: openai.api_key = os.getenv("OPENAI_API_KEY") else: openai.api_key = api_key db_path = "./data/db/faiss_index" embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/multi-qa-mpnet-base-dot-v1") files = [db_path] is_missing = False for file in files: if not os.path.exists(file): print(f"{file} does not exist") is_missing = True else: # Load FAISS index db = FAISS.load_local(db_path, embeddings) # If set, don't query papers; pretend they don't exist if not query_papers: is_missing = True if not query: return "Please enter your question above, and I'll do my best to help you." if len(query) > max_len_query: return "Please ask a shorter question!" else: # Do a similarity query, combine the most relevant chunks, and answer the question if not is_missing: similarity_results = db.similarity_search(query, k=k) most_relevant_chunk = ". ".join([results.page_content for results in similarity_results]) answer = answer_question(context=most_relevant_chunk, query=query, model=model) answer.strip("\n") return answer else: answer = answer_question(context=None, query=query, model=model) answer.strip("\n") return answer
[ "langchain.embeddings.HuggingFaceEmbeddings", "langchain.vectorstores.FAISS.load_local" ]
[((313, 338), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**config)\n', (328, 338), False, 'from types import SimpleNamespace\n'), ((286, 303), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (300, 303), False, 'import yaml\n'), ((602, 627), 'numpy.argsort', 'np.argsort', (['(-similarities)'], {}), '(-similarities)\n', (612, 627), True, 'import numpy as np\n'), ((2699, 2788), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/multi-qa-mpnet-base-dot-v1"""'}), "(model_name=\n 'sentence-transformers/multi-qa-mpnet-base-dot-v1')\n", (2720, 2788), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((529, 577), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['[query_embedding]', 'embeddings'], {}), '([query_embedding], embeddings)\n', (546, 577), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((1617, 1813), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'model', 'messages': "[{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content':\n prompt}]", 'max_tokens': 'max_tokens', 'n': '(1)', 'temperature': 'temperature'}), "(model=model, messages=[{'role': 'system',\n 'content': system_prompt}, {'role': 'user', 'content': prompt}],\n max_tokens=max_tokens, n=1, temperature=temperature)\n", (1645, 1813), False, 'import openai\n'), ((2571, 2598), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2580, 2598), False, 'import os\n'), ((2869, 2889), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (2883, 2889), False, 'import os\n'), ((3027, 3064), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['db_path', 'embeddings'], {}), '(db_path, embeddings)\n', (3043, 3064), False, 'from langchain.vectorstores import FAISS\n')]
from langchain.agents import load_tools from langchain.agents import initialize_agent from langchain.agents import AgentType from langchain_app.models.vicuna_request_llm import VicunaLLM # First, let's load the language model we're going to use to control the agent. llm = VicunaLLM() # Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in. tools = load_tools(["python_repl"], llm=llm) # Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use. agent = initialize_agent( tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) # Now let's test it out! agent.run("""Write a Python script that prints 'Hello, world!""")
[ "langchain_app.models.vicuna_request_llm.VicunaLLM", "langchain.agents.initialize_agent", "langchain.agents.load_tools" ]
[((275, 286), 'langchain_app.models.vicuna_request_llm.VicunaLLM', 'VicunaLLM', ([], {}), '()\n', (284, 286), False, 'from langchain_app.models.vicuna_request_llm import VicunaLLM\n'), ((405, 441), 'langchain.agents.load_tools', 'load_tools', (["['python_repl']"], {'llm': 'llm'}), "(['python_repl'], llm=llm)\n", (415, 441), False, 'from langchain.agents import load_tools\n'), ((562, 653), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(True)'}), '(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n verbose=True)\n', (578, 653), False, 'from langchain.agents import initialize_agent\n')]
import logging import sys from typing import Callable from langchain.prompts import MessagesPlaceholder from langchain.agents import AgentType, AgentExecutor from langchain.agents import initialize_agent as initialize_agent_base from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.chains.base import Chain logger = logging.getLogger(__name__) def initialize_agent(agent: AgentType, **kwargs) -> Chain: """ Extended version of the initialize_agent function from ix.chains.agents. Modifications: - unpacks agent_kwargs: allows agent_kwargs to be flattened into the ChainNode config A flattened config simplifies the UX integration such that it works with TypeAutoFields """ # Inject placeholders into prompt for memory if provided placeholders = [] if memories := kwargs.get("memory", None): if not isinstance(memories, list): memories = [memories] placeholders = [] for component in memories: if not getattr(component, "return_messages", False): raise ValueError( f"Memory component {component} has return_messages=False. Agents require " f"return_messages=True." ) for memory_key in component.memory_variables: placeholders.append(MessagesPlaceholder(variable_name=memory_key)) # Re-pack agent_kwargs__* arguments into agent_kwargs agent_kwargs = { "extra_prompt_messages": placeholders, } for key, value in kwargs.items(): if key.startswith("agent_kwargs__"): agent_kwargs[key[15:]] = value del kwargs[key] kwargs["agent_kwargs"] = agent_kwargs # unpack Toolkits into Tools if "tools" in kwargs: tools = kwargs["tools"] unpacked_tools = [] for i, value in enumerate(tools): if isinstance(value, BaseToolkit): unpacked_tools.extend(value.get_tools()) else: unpacked_tools.append(value) kwargs["tools"] = unpacked_tools return initialize_agent_base(agent=agent, **kwargs) def create_init_func(agent_type: AgentType) -> Callable: """ This function creates a new initialization function for a given agent type. The initialization function is a proxy to the initialize_agent function, but it has a distinct name and can be imported directly from this module. Agent initialization functions are used so there is a distinct class_path for each agent type. This allows class_path to be used as an identifier for the agent type. Args: agent_type (str): The type of the agent to create an initialization function for. Returns: function: The newly created initialization function. """ def init_func(**kwargs) -> AgentExecutor: return initialize_agent(agent=agent_type, **kwargs) return init_func # list of function names that are created, used for debugging FUNCTION_NAMES = [] def create_functions() -> None: """ Generate initialization functions for each agent type and add them to this module. This will automatically create a new function for each agent type as LangChain creates them. """ for agent_type in AgentType: # create an initialization function for this agent type init_func = create_init_func(agent_type) func_name = "initialize_" + agent_type.value.replace("-", "_") FUNCTION_NAMES.append(func_name) # add the function to the current module setattr(sys.modules[__name__], func_name, init_func) # auto-run the function that creates the initialization functions create_functions()
[ "langchain.agents.initialize_agent", "langchain.prompts.MessagesPlaceholder" ]
[((343, 370), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (360, 370), False, 'import logging\n'), ((2107, 2151), 'langchain.agents.initialize_agent', 'initialize_agent_base', ([], {'agent': 'agent'}), '(agent=agent, **kwargs)\n', (2128, 2151), True, 'from langchain.agents import initialize_agent as initialize_agent_base\n'), ((1348, 1393), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': 'memory_key'}), '(variable_name=memory_key)\n', (1367, 1393), False, 'from langchain.prompts import MessagesPlaceholder\n')]
import os os.environ["LANGCHAIN_TRACING"] = "true" from langchain import OpenAI from langchain.agents import initialize_agent, AgentType from langchain.llms import OpenAI from langchain.agents import initialize_agent, Tool from langchain.agents import AgentType def multiplier(a, b): return a / b def parsing_multiplier(string): a, b = string.split(",") return multiplier(int(a), int(b)) llm = OpenAI(temperature=0) tools = [ Tool( name="Multiplier", func=parsing_multiplier, description="useful for when you need to multiply two numbers together. The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. For example, `1,2` would be the input if you wanted to multiply 1 by 2.", ) ] agent = initialize_agent( tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) agent.run("3 times four?")
[ "langchain.agents.initialize_agent", "langchain.llms.OpenAI", "langchain.agents.Tool" ]
[((412, 433), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (418, 433), False, 'from langchain.llms import OpenAI\n'), ((826, 917), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(True)'}), '(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n verbose=True)\n', (842, 917), False, 'from langchain.agents import initialize_agent, Tool\n'), ((448, 794), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Multiplier"""', 'func': 'parsing_multiplier', 'description': '"""useful for when you need to multiply two numbers together. The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. For example, `1,2` would be the input if you wanted to multiply 1 by 2."""'}), "(name='Multiplier', func=parsing_multiplier, description=\n 'useful for when you need to multiply two numbers together. The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. For example, `1,2` would be the input if you wanted to multiply 1 by 2.'\n )\n", (452, 794), False, 'from langchain.agents import initialize_agent, Tool\n')]
# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from typing import List from langchain.chains import RetrievalQA from langchain.chains.conversational_retrieval.base import ( BaseConversationalRetrievalChain, ) from langchain.llms.vertexai import VertexAI from langchain.memory import ConversationBufferMemory from langchain.tools import BaseTool current_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.append(current_dir) from MyVertexAIEmbedding import MyVertexAIEmbedding # noqa: E402 from VertexMatchingEngine import MatchingEngine, MatchingEngineUtils # noqa: E402 # https://cdn.cloudflare.steamstatic.com/steam/apps/597180/manuals/Old_World-Official_User_Manual.pdf?t=1653279974 """ Matching Engine As Retriever """ ME_REGION = os.getenv("GOOGLE_CLOUD_REGIN") PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") ME_INDEX_NAME = f"{PROJECT_ID}-chatbot-vme" ME_DIMENSIONS = 768 ME_EMBEDDING_DIR = f"gs://{PROJECT_ID}-chatbot-embeddings" REQUESTS_PER_MINUTE = 15 mengine = MatchingEngineUtils( project_id=PROJECT_ID, region=ME_REGION, index_name=ME_INDEX_NAME ) embedding = MyVertexAIEmbedding() llm = VertexAI() memory = ConversationBufferMemory() def create_PDFQA_chain_me_RetrievalQA() -> BaseConversationalRetrievalChain: mengine = MatchingEngineUtils( project_id=PROJECT_ID, region=ME_REGION, index_name=ME_INDEX_NAME ) ME_INDEX_ID, ME_INDEX_ENDPOINT_ID = mengine.get_index_and_endpoint() me = MatchingEngine.from_components( project_id=PROJECT_ID, region=ME_REGION, gcs_bucket_name=f'gs://{ME_EMBEDDING_DIR.split("/")[2]}', embedding=embedding, index_id=ME_INDEX_ID, endpoint_id=ME_INDEX_ENDPOINT_ID, ) retriever = me.as_retriever() doc_chain = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=False, verbose=True, ) return doc_chain class VIAI_INFO_ME(BaseTool): name = "VIAI_INFO_ME" description = """ Use this tool to get information regarding the solution "Visual Inspection AI Edge", or "VIAI Edge". The Tool Input is the user's question, the user may reference to previous convsation, add context to the question when needed. The Output is the result """ def _run(self, query: str) -> str: if query == "": query = "summarize" chat_history: List[str] = [] print("Running tool:{}".format(query)) qa = create_PDFQA_chain_me_RetrievalQA() result = qa( {"query": query, "chat_history": chat_history}, return_only_outputs=False ) return result async def _arun(self, query: str) -> str: """Use the tool asynchronously.""" print(f"*** Invoking MockTool with query '{query}'") return f"Answer of '{query}' is 'Michael Chi'"
[ "langchain.llms.vertexai.VertexAI", "langchain.memory.ConversationBufferMemory", "langchain.chains.RetrievalQA.from_chain_type" ]
[((957, 985), 'sys.path.append', 'sys.path.append', (['current_dir'], {}), '(current_dir)\n', (972, 985), False, 'import sys\n'), ((1302, 1333), 'os.getenv', 'os.getenv', (['"""GOOGLE_CLOUD_REGIN"""'], {}), "('GOOGLE_CLOUD_REGIN')\n", (1311, 1333), False, 'import os\n'), ((1347, 1380), 'os.getenv', 'os.getenv', (['"""GOOGLE_CLOUD_PROJECT"""'], {}), "('GOOGLE_CLOUD_PROJECT')\n", (1356, 1380), False, 'import os\n'), ((1540, 1631), 'VertexMatchingEngine.MatchingEngineUtils', 'MatchingEngineUtils', ([], {'project_id': 'PROJECT_ID', 'region': 'ME_REGION', 'index_name': 'ME_INDEX_NAME'}), '(project_id=PROJECT_ID, region=ME_REGION, index_name=\n ME_INDEX_NAME)\n', (1559, 1631), False, 'from VertexMatchingEngine import MatchingEngine, MatchingEngineUtils\n'), ((1645, 1666), 'MyVertexAIEmbedding.MyVertexAIEmbedding', 'MyVertexAIEmbedding', ([], {}), '()\n', (1664, 1666), False, 'from MyVertexAIEmbedding import MyVertexAIEmbedding\n'), ((1674, 1684), 'langchain.llms.vertexai.VertexAI', 'VertexAI', ([], {}), '()\n', (1682, 1684), False, 'from langchain.llms.vertexai import VertexAI\n'), ((1694, 1720), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (1718, 1720), False, 'from langchain.memory import ConversationBufferMemory\n'), ((930, 955), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (945, 955), False, 'import os\n'), ((1814, 1905), 'VertexMatchingEngine.MatchingEngineUtils', 'MatchingEngineUtils', ([], {'project_id': 'PROJECT_ID', 'region': 'ME_REGION', 'index_name': 'ME_INDEX_NAME'}), '(project_id=PROJECT_ID, region=ME_REGION, index_name=\n ME_INDEX_NAME)\n', (1833, 1905), False, 'from VertexMatchingEngine import MatchingEngine, MatchingEngineUtils\n'), ((2311, 2438), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'chain_type': '"""stuff"""', 'retriever': 'retriever', 'return_source_documents': '(False)', 'verbose': '(True)'}), "(llm=llm, chain_type='stuff', retriever=\n retriever, return_source_documents=False, verbose=True)\n", (2338, 2438), False, 'from langchain.chains import RetrievalQA\n')]
import boto3 from botocore.exceptions import ClientError import json import langchain from importlib import reload from langchain.agents.structured_chat import output_parser from typing import List import logging import os import sqlalchemy from sqlalchemy import create_engine from langchain.docstore.document import Document from langchain import PromptTemplate,SQLDatabase, LLMChain from langchain_experimental.sql.base import SQLDatabaseChain from langchain.prompts.prompt import PromptTemplate import streamlit as st import pandas as pd import datetime from langchain.tools import tool from typing import List, Optional import json from langchain.prompts import ( ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain.llms.bedrock import Bedrock from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner from langchain.agents.tools import Tool import time import uuid from utility import get_cfn_details,custom_logga, upload_amz_file from langchain.tools.python.tool import PythonREPLTool from langchain.memory import ConversationBufferMemory from langchain.memory.chat_message_histories import DynamoDBChatMessageHistory from streamlit.web.server.websocket_headers import _get_websocket_headers import sys st.set_page_config(layout="wide") # logger = logging.getLogger('sagemaker') # logger.setLevel(logging.DEBUG) # logger.addHandler(logging.StreamHandler()) sys.stdout = custom_logga.Logger() #Session states to hold sateful variables if 'generated' not in st.session_state: st.session_state['generated'] = [] if 'past' not in st.session_state: st.session_state['past'] = [] if 'messages' not in st.session_state: st.session_state['messages'] = [] if 'ant_key' not in st.session_state: st.session_state['ant_key'] = '' if 'chat_id' not in st.session_state: st.session_state['chat_id'] = 1 if 'client_id' not in st.session_state: st.session_state['client_id'] = '' if 'prompt' not in st.session_state: st.session_state['prompt'] = '' if 'memory' not in st.session_state: st.session_state['memory'] = "" # Global Variables STACK_NAME="mmfsi" #change to the name of the cloudformation stack REGION='us-east-1' #change to the name of the region you are working in if len(st.session_state['messages'])<1: ## browser client info headers = _get_websocket_headers() st.session_state['client_id'] = str(headers.get("Sec-Websocket-Key")) #print(f"Client KEY {st.session_state['client_id']}") #st.session_state['ant_key']= get_secret()(REGION, "Secrete Name") ## PASS the AWS SECRETES secrete name st.session_state['chat_id']= st.session_state['chat_id']+1 #print(f"Session Chat ID {st.session_state['chat_id']}") # get cfn parameters glue_db_name,kendra_index_id,audio_transcripts_source_bucket,textract_source_bucket,query_staging_bucket,multimodal_output_bucket=get_cfn_details.stack_info(STACK_NAME,REGION) param={} param['db']=glue_db_name param['query_bucket']=query_staging_bucket param['region']=REGION param['kendra_id']=kendra_index_id#'45739a4f-c80f-4201-b183-20389d0febc7' #Store parameters in json file with open('param.json', 'w', encoding='utf-8') as f: json.dump(param, f, ensure_ascii=False, indent=4) # upload files to s3 #from utility.upload_amz_file import upload_file_amz upload_amz_file.upload_file_amz('files/Amazon-10K-2022-EarningsReport.pdf', textract_source_bucket) upload_amz_file.upload_file_amz('files/Amazon-10Q-Q1-2023-QuaterlyEarningsReport.pdf', textract_source_bucket) upload_amz_file.upload_file_amz('files/Amazon-Quarterly-Earnings-Report-Q1-2023-Full-Call-v1.mp3', audio_transcripts_source_bucket) #Athena connection config connathena=f"athena.{REGION}.amazonaws.com" portathena='443' #Update, if port is different schemaathena=glue_db_name #from user defined params s3stagingathena=f's3://{query_staging_bucket}/athenaresults/'#from cfn params wkgrpathena='primary'#Update, if workgroup is different ## Create the athena connection string connection_string = f"awsathena+rest://@{connathena}:{portathena}/{schemaathena}?s3_staging_dir={s3stagingathena}&work_group={wkgrpathena}" ## Create the athena SQLAlchemy engine engine_athena = create_engine(connection_string, echo=False) dbathena = SQLDatabase(engine_athena) from botocore.config import Config config = Config( retries = dict( max_attempts = 10 ) ) from utility import stock_query_mm, kendra_tool_mm, aws_tools, portfolio_tool inference_modifier = { 'max_tokens_to_sample':512, "temperature":0.01, "stop_sequences":["\n\nQuestion:","\n\nHuman:","\nHuman:"]#"\n\nAssistant:","\nAssistant:"]#,"\nHuman:"]#,"\n\nAssistant:","\nAssistant:"], # "top_k": 50, # "top_p": 1, } llm = llm = Bedrock(model_id='anthropic.claude-v2',model_kwargs =inference_modifier ) table = 'stock_prices' session_id=st.session_state['client_id'] chat_id= st.session_state['chat_id'] #persist dynamodb table id for chat history for each session and browser client @st.cache_data def db_table_id(session_id, chat_id): chat_sess_id=str(uuid.uuid4()) return chat_sess_id chat_session_id=db_table_id(session_id, chat_id) #print(f"Chat SESSION ID {chat_session_id}") def run_query(query): PROMPT_sql = PromptTemplate( input_variables=["input", "table_info", "dialect"], template=_DEFAULT_TEMPLATE ) db_chain = SQLDatabaseChain.from_llm(llm, dbathena, prompt=PROMPT_sql, verbose=True, return_intermediate_steps=False) response=db_chain.run(query) return response def SentimentAnalysis(inputString): print(inputString) lambda_client = boto3.client('lambda', region_name=REGION) lambda_payload = {"inputString:"+inputString} response=lambda_client.invoke(FunctionName='FSI-SentimentDetecttion', InvocationType='RequestResponse', Payload=json.dumps(inputString)) #print(response['Payload'].read()) output=json.loads(response['Payload'].read().decode()) return output['body'] def DetectKeyPhrases(inputString): #print(inputString) lambda_client = boto3.client('lambda', region_name=REGION) lambda_payload = {"inputString:"+inputString} response=lambda_client.invoke(FunctionName='FSI-KeyPhrasesDetection', InvocationType='RequestResponse', Payload=json.dumps(inputString)) #print(response['Payload'].read()) output=json.loads(response['Payload'].read().decode()) return output['body'] tools = [ Tool( name="Stock Querying Tool", func=stock_query_mm.run_query, description=""" Useful for when you need to answer questions about stocks. It only has information about stocks. """ ), portfolio_tool.OptimizePortfolio(), Tool( name="Financial Information Lookup Tool", func=kendra_tool_mm.run_chain, description=""" Useful for when you need to look up financial information like revenues, sales, loss, risks etc. """ ), PythonREPLTool(), Tool( name="Sentiment Analysis Tool", func=SentimentAnalysis, description=""" Useful for when you need to analyze the sentiment of an excerpt from a financial report. """ ), Tool( name="Detect Phrases Tool", func=DetectKeyPhrases, description=""" Useful for when you need to detect key phrases in financial reports. """ ), Tool( name="Text Extraction Tool", func=aws_tools.IntiateTextExtractProcessing, description=""" Useful for when you need to trigger conversion of pdf version of quaterly reports to text files using amazon textextract """ ), Tool( name="Transcribe Audio Tool", func=aws_tools.TranscribeAudio, description=""" Useful for when you need to convert audio recordings of earnings calls from audio to text format using Amazon Transcribe """ ) ] combo_template = """\n\nHuman: You are a Minimization Solutionist with a set of tools at your disposal. You would be presented with a problem. First understand the problem and devise a plan to solve the problem. Please output the plan starting with the header 'Plan:' and then followed by a numbered list of steps. Ensure the plan has the minimum amount of steps needed to solve the problem. Do not include unnecessary steps. <instructions> These are guidance on when to use a tool to solve a task, follow them strictly: 1. For the tool that specifically focuses on stock price data, use "Stock Query Tool". 2. For financial information lookup that covers various financial data like company's finance, performance or any other information pertaining a company beyond stocks, use the "Financial Data Explorer Tool". Ask specific questions using this tool as it is your knowledge database. Refrain from asking question like "look up 10K filings" instead a more specific question like "what is the revenue for this company". 3. When you need to find key phrases in a report, use the "Detect Phrases Tool" to get the information about all key phrases and respond with key phrases relavent to the question. 4. When you need to provide an optimized stock portfolio based on stock names, use Portfolio Optimization Tool. The output is the percent of fund you should spend on each stock. This tool only takes stock ticker as input and not stock prices, for example ["EWR","JHT"]. 5. Please use the PythonREPLTool exclusively for calculations, refrain from utilizing 'print' statements for output. Use this too only when needed, most times its unnecessary. 6. When you need to analyze sentiment of a topic, use "Sentiment Analysis Tool". </instructions>\n\nAssistant:""" combo_template=combo_template if st.session_state['prompt']=="" else st.session_state['prompt'] chat_history_table = 'DYNAMODB table name' ### SPECIFY THE DYNAMODB TABLE chat_history_memory = DynamoDBChatMessageHistory(table_name=chat_history_table, session_id=chat_session_id) model = llm planner = load_chat_planner(model) system_message_prompt = SystemMessagePromptTemplate.from_template(combo_template) human_message_prompt = planner.llm_chain.prompt.messages[1] planner.llm_chain.prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) executor = load_agent_executor(model, tools, verbose=True) if st.session_state['memory']: memory = ConversationBufferMemory(memory_key="chat_history", chat_memory=chat_history_memory, return_messages=True) agent = PlanAndExecute(planner=planner, executor=executor, verbose=True, max_iterations=2, memory=memory) else: agent = PlanAndExecute(planner=planner, executor=executor, verbose=True, max_iterations=2)#, memory=memory) def query(request, agent, chat_history_memory): output=agent(request) chat_history_memory.add_ai_message(str(output)) try: return output['output'] except: return output def action_doc(agent, chat_history_memory): st.title('Multi-Modal Agent to assist Financial Analyst') # Display chat messages from history on app rerun for message in st.session_state.messages: if "role" in message.keys(): with st.chat_message(message["role"]): st.markdown(message['content'].replace("$","USD ").replace("%", " percent")) else: with st.expander(label="**Intermediate Steps**"): st.write(message["steps"]) if prompt := st.chat_input("Hello?"): st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() output_answer=query(prompt, agent, chat_history_memory) message_placeholder.markdown(output_answer.replace("$","USD ").replace("%", " percent")) st.session_state.messages.append({"role": "assistant", "content": output_answer}) # Saving the intermediate steps in a logf file to be shown in the UI. This is a hack due to the inability to capture these steps with the agent planner and executor library being used with st.expander(label="**Intermediate Steps**"): with open('logfile.txt','r')as f: steps=f.readlines() st.write(steps) os.remove('logfile.txt') st.session_state.messages.append({"steps": steps}) def app_sidebar(): with st.sidebar: st.write('## How to use:') description = """This app lets you query multi-modal documents and get relevant answers. Documents inculde DB Tables, audio files and pdf files. Type your query in the chat box to get appropiate answers. If you need to refresh session, click on the `Clear Session` button. Happy QnA :) """ st.markdown(description) st.write('---') st.write('## Sample Questions') st.markdown(""" - What are the closing prices of stocks AAAA, WWW, DDD in year 2018? Can you build an optimized portfolio using these three stocks? Please provide answers to both questions. - What is the net sales for Amazon in 2021 and 2022? What is the percent difference? - What are the biggest risks facing Amazon Inc? """) st.markdown(""" **Datasets** - [Quterly Earnings recordings](https://github.com/revdotcom/speech-datasets) - [Annual Reports (FinTabNet)](https://developer.ibm.com/exchanges/data/all/fintabnet/) - [S&P 500 stock data](https://www.kaggle.com/camnugent/sandp500) """) st.write('---') #st.write('Pass your custom prompt') user_input = st.text_area("Custom prompt goes here", "") if user_input: st.session_state['prompt']=user_input print(user_input) use_memory='' mem = st.checkbox('Conversation Memory') if mem: use_memory='yes' st.session_state['memory']=use_memory if st.button('Clear Session'): ''' The Clear context helps to refresh the UI and also create a new session for the chat. This creates a new Dynamo DB table to hold the chat history. ''' # Delete all the items in Session state for key in st.session_state.keys(): del st.session_state[key] # create new session state items if 'generated' not in st.session_state: st.session_state['generated'] = [] if 'past' not in st.session_state: st.session_state['past'] = [] if 'messages' not in st.session_state: st.session_state['messages'] = [] if 'ant_key' not in st.session_state: st.session_state['ant_key'] = '' if 'chat_id' not in st.session_state: st.session_state['chat_id'] = 1 if 'client_id' not in st.session_state: st.session_state['client_id'] = '' if 'prompt' not in st.session_state: st.session_state['prompt'] = "" if 'memory' not in st.session_state: st.session_state['memory'] = "" def main(agent,chat_history_memory): params=app_sidebar() action_doc(agent, chat_history_memory) if __name__ == '__main__': main(agent, chat_history_memory)
[ "langchain.memory.ConversationBufferMemory", "langchain.prompts.ChatPromptTemplate.from_messages", "langchain.tools.python.tool.PythonREPLTool", "langchain.llms.bedrock.Bedrock", "langchain.prompts.PromptTemplate", "langchain_experimental.plan_and_execute.load_chat_planner", "langchain_experimental.plan_and_execute.PlanAndExecute", "langchain.memory.chat_message_histories.DynamoDBChatMessageHistory", "langchain.SQLDatabase", "langchain.agents.tools.Tool", "langchain.prompts.SystemMessagePromptTemplate.from_template", "langchain_experimental.plan_and_execute.load_agent_executor", "langchain_experimental.sql.base.SQLDatabaseChain.from_llm" ]
[((1364, 1397), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""'}), "(layout='wide')\n", (1382, 1397), True, 'import streamlit as st\n'), ((1532, 1553), 'utility.custom_logga.Logger', 'custom_logga.Logger', ([], {}), '()\n', (1551, 1553), False, 'from utility import get_cfn_details, custom_logga, upload_amz_file\n'), ((5121, 5193), 'langchain.llms.bedrock.Bedrock', 'Bedrock', ([], {'model_id': '"""anthropic.claude-v2"""', 'model_kwargs': 'inference_modifier'}), "(model_id='anthropic.claude-v2', model_kwargs=inference_modifier)\n", (5128, 5193), False, 'from langchain.llms.bedrock import Bedrock\n'), ((10370, 10460), 'langchain.memory.chat_message_histories.DynamoDBChatMessageHistory', 'DynamoDBChatMessageHistory', ([], {'table_name': 'chat_history_table', 'session_id': 'chat_session_id'}), '(table_name=chat_history_table, session_id=\n chat_session_id)\n', (10396, 10460), False, 'from langchain.memory.chat_message_histories import DynamoDBChatMessageHistory\n'), ((10478, 10502), 'langchain_experimental.plan_and_execute.load_chat_planner', 'load_chat_planner', (['model'], {}), '(model)\n', (10495, 10502), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((10528, 10585), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['combo_template'], {}), '(combo_template)\n', (10569, 10585), False, 'from langchain.prompts import ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((10673, 10752), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_message_prompt, human_message_prompt]'], {}), '([system_message_prompt, human_message_prompt])\n', (10705, 10752), False, 'from langchain.prompts import ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((10765, 10812), 'langchain_experimental.plan_and_execute.load_agent_executor', 'load_agent_executor', (['model', 'tools'], {'verbose': '(True)'}), '(model, tools, verbose=True)\n', (10784, 10812), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((2449, 2473), 'streamlit.web.server.websocket_headers._get_websocket_headers', '_get_websocket_headers', ([], {}), '()\n', (2471, 2473), False, 'from streamlit.web.server.websocket_headers import _get_websocket_headers\n'), ((3016, 3062), 'utility.get_cfn_details.stack_info', 'get_cfn_details.stack_info', (['STACK_NAME', 'REGION'], {}), '(STACK_NAME, REGION)\n', (3042, 3062), False, 'from utility import get_cfn_details, custom_logga, upload_amz_file\n'), ((3511, 3614), 'utility.upload_amz_file.upload_file_amz', 'upload_amz_file.upload_file_amz', (['"""files/Amazon-10K-2022-EarningsReport.pdf"""', 'textract_source_bucket'], {}), "('files/Amazon-10K-2022-EarningsReport.pdf',\n textract_source_bucket)\n", (3542, 3614), False, 'from utility import get_cfn_details, custom_logga, upload_amz_file\n'), ((3615, 3734), 'utility.upload_amz_file.upload_file_amz', 'upload_amz_file.upload_file_amz', (['"""files/Amazon-10Q-Q1-2023-QuaterlyEarningsReport.pdf"""', 'textract_source_bucket'], {}), "(\n 'files/Amazon-10Q-Q1-2023-QuaterlyEarningsReport.pdf',\n textract_source_bucket)\n", (3646, 3734), False, 'from utility import get_cfn_details, custom_logga, upload_amz_file\n'), ((3730, 3870), 'utility.upload_amz_file.upload_file_amz', 'upload_amz_file.upload_file_amz', (['"""files/Amazon-Quarterly-Earnings-Report-Q1-2023-Full-Call-v1.mp3"""', 'audio_transcripts_source_bucket'], {}), "(\n 'files/Amazon-Quarterly-Earnings-Report-Q1-2023-Full-Call-v1.mp3',\n audio_transcripts_source_bucket)\n", (3761, 3870), False, 'from utility import get_cfn_details, custom_logga, upload_amz_file\n'), ((4454, 4498), 'sqlalchemy.create_engine', 'create_engine', (['connection_string'], {'echo': '(False)'}), '(connection_string, echo=False)\n', (4467, 4498), False, 'from sqlalchemy import create_engine\n'), ((4514, 4540), 'langchain.SQLDatabase', 'SQLDatabase', (['engine_athena'], {}), '(engine_athena)\n', (4525, 4540), False, 'from langchain import PromptTemplate, SQLDatabase, LLMChain\n'), ((5625, 5724), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input', 'table_info', 'dialect']", 'template': '_DEFAULT_TEMPLATE'}), "(input_variables=['input', 'table_info', 'dialect'], template\n =_DEFAULT_TEMPLATE)\n", (5639, 5724), False, 'from langchain.prompts import ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((5754, 5864), 'langchain_experimental.sql.base.SQLDatabaseChain.from_llm', 'SQLDatabaseChain.from_llm', (['llm', 'dbathena'], {'prompt': 'PROMPT_sql', 'verbose': '(True)', 'return_intermediate_steps': '(False)'}), '(llm, dbathena, prompt=PROMPT_sql, verbose=True,\n return_intermediate_steps=False)\n', (5779, 5864), False, 'from langchain_experimental.sql.base import SQLDatabaseChain\n'), ((5999, 6041), 'boto3.client', 'boto3.client', (['"""lambda"""'], {'region_name': 'REGION'}), "('lambda', region_name=REGION)\n", (6011, 6041), False, 'import boto3\n'), ((6482, 6524), 'boto3.client', 'boto3.client', (['"""lambda"""'], {'region_name': 'REGION'}), "('lambda', region_name=REGION)\n", (6494, 6524), False, 'import boto3\n'), ((6901, 7113), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Stock Querying Tool"""', 'func': 'stock_query_mm.run_query', 'description': '"""\n Useful for when you need to answer questions about stocks. It only has information about stocks.\n """'}), '(name=\'Stock Querying Tool\', func=stock_query_mm.run_query, description\n =\n """\n Useful for when you need to answer questions about stocks. It only has information about stocks.\n """\n )\n', (6905, 7113), False, 'from langchain.agents.tools import Tool\n'), ((7134, 7168), 'utility.portfolio_tool.OptimizePortfolio', 'portfolio_tool.OptimizePortfolio', ([], {}), '()\n', (7166, 7168), False, 'from utility import stock_query_mm, kendra_tool_mm, aws_tools, portfolio_tool\n'), ((7174, 7401), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Financial Information Lookup Tool"""', 'func': 'kendra_tool_mm.run_chain', 'description': '"""\n Useful for when you need to look up financial information like revenues, sales, loss, risks etc. \n """'}), '(name=\'Financial Information Lookup Tool\', func=kendra_tool_mm.\n run_chain, description=\n """\n Useful for when you need to look up financial information like revenues, sales, loss, risks etc. \n """\n )\n', (7178, 7401), False, 'from langchain.agents.tools import Tool\n'), ((7422, 7438), 'langchain.tools.python.tool.PythonREPLTool', 'PythonREPLTool', ([], {}), '()\n', (7436, 7438), False, 'from langchain.tools.python.tool import PythonREPLTool\n'), ((7444, 7640), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Sentiment Analysis Tool"""', 'func': 'SentimentAnalysis', 'description': '"""\n Useful for when you need to analyze the sentiment of an excerpt from a financial report.\n """'}), '(name=\'Sentiment Analysis Tool\', func=SentimentAnalysis, description=\n """\n Useful for when you need to analyze the sentiment of an excerpt from a financial report.\n """\n )\n', (7448, 7640), False, 'from langchain.agents.tools import Tool\n'), ((7667, 7838), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Detect Phrases Tool"""', 'func': 'DetectKeyPhrases', 'description': '"""\n Useful for when you need to detect key phrases in financial reports.\n """'}), '(name=\'Detect Phrases Tool\', func=DetectKeyPhrases, description=\n """\n Useful for when you need to detect key phrases in financial reports.\n """\n )\n', (7671, 7838), False, 'from langchain.agents.tools import Tool\n'), ((7865, 8117), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Text Extraction Tool"""', 'func': 'aws_tools.IntiateTextExtractProcessing', 'description': '"""\n Useful for when you need to trigger conversion of pdf version of quaterly reports to text files using amazon textextract\n """'}), '(name=\'Text Extraction Tool\', func=aws_tools.\n IntiateTextExtractProcessing, description=\n """\n Useful for when you need to trigger conversion of pdf version of quaterly reports to text files using amazon textextract\n """\n )\n', (7869, 8117), False, 'from langchain.agents.tools import Tool\n'), ((8139, 8377), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Transcribe Audio Tool"""', 'func': 'aws_tools.TranscribeAudio', 'description': '"""\n Useful for when you need to convert audio recordings of earnings calls from audio to text format using Amazon Transcribe\n """'}), '(name=\'Transcribe Audio Tool\', func=aws_tools.TranscribeAudio,\n description=\n """\n Useful for when you need to convert audio recordings of earnings calls from audio to text format using Amazon Transcribe\n """\n )\n', (8143, 8377), False, 'from langchain.agents.tools import Tool\n'), ((10867, 10978), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'chat_memory': 'chat_history_memory', 'return_messages': '(True)'}), "(memory_key='chat_history', chat_memory=\n chat_history_memory, return_messages=True)\n", (10891, 10978), False, 'from langchain.memory import ConversationBufferMemory\n'), ((10986, 11087), 'langchain_experimental.plan_and_execute.PlanAndExecute', 'PlanAndExecute', ([], {'planner': 'planner', 'executor': 'executor', 'verbose': '(True)', 'max_iterations': '(2)', 'memory': 'memory'}), '(planner=planner, executor=executor, verbose=True,\n max_iterations=2, memory=memory)\n', (11000, 11087), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((11102, 11188), 'langchain_experimental.plan_and_execute.PlanAndExecute', 'PlanAndExecute', ([], {'planner': 'planner', 'executor': 'executor', 'verbose': '(True)', 'max_iterations': '(2)'}), '(planner=planner, executor=executor, verbose=True,\n max_iterations=2)\n', (11116, 11188), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((11461, 11518), 'streamlit.title', 'st.title', (['"""Multi-Modal Agent to assist Financial Analyst"""'], {}), "('Multi-Modal Agent to assist Financial Analyst')\n", (11469, 11518), True, 'import streamlit as st\n'), ((3361, 3410), 'json.dump', 'json.dump', (['param', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(param, f, ensure_ascii=False, indent=4)\n', (3370, 3410), False, 'import json\n'), ((5451, 5463), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5461, 5463), False, 'import uuid\n'), ((11979, 12002), 'streamlit.chat_input', 'st.chat_input', (['"""Hello?"""'], {}), "('Hello?')\n", (11992, 12002), True, 'import streamlit as st\n'), ((12012, 12081), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (12044, 12081), True, 'import streamlit as st\n'), ((12444, 12529), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': output_answer}"], {}), "({'role': 'assistant', 'content':\n output_answer})\n", (12476, 12529), True, 'import streamlit as st\n'), ((12945, 12995), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'steps': steps}"], {}), "({'steps': steps})\n", (12977, 12995), True, 'import streamlit as st\n'), ((13107, 13133), 'streamlit.write', 'st.write', (['"""## How to use:"""'], {}), "('## How to use:')\n", (13115, 13133), True, 'import streamlit as st\n'), ((13561, 13585), 'streamlit.markdown', 'st.markdown', (['description'], {}), '(description)\n', (13572, 13585), True, 'import streamlit as st\n'), ((13594, 13609), 'streamlit.write', 'st.write', (['"""---"""'], {}), "('---')\n", (13602, 13609), True, 'import streamlit as st\n'), ((13618, 13649), 'streamlit.write', 'st.write', (['"""## Sample Questions"""'], {}), "('## Sample Questions')\n", (13626, 13649), True, 'import streamlit as st\n'), ((13658, 14109), 'streamlit.markdown', 'st.markdown', (['"""\n - What are the closing prices of stocks AAAA, WWW, DDD in year 2018? Can you build an optimized portfolio using these three stocks? Please provide answers to both questions.\n - What is the net sales for Amazon in 2021 and 2022? What is the percent difference?\n - What are the biggest risks facing Amazon Inc? \n """'], {}), '(\n """\n - What are the closing prices of stocks AAAA, WWW, DDD in year 2018? Can you build an optimized portfolio using these three stocks? Please provide answers to both questions.\n - What is the net sales for Amazon in 2021 and 2022? What is the percent difference?\n - What are the biggest risks facing Amazon Inc? \n """\n )\n', (13669, 14109), True, 'import streamlit as st\n'), ((14108, 14504), 'streamlit.markdown', 'st.markdown', (['"""\n **Datasets**\n \n - [Quterly Earnings recordings](https://github.com/revdotcom/speech-datasets)\n - [Annual Reports (FinTabNet)](https://developer.ibm.com/exchanges/data/all/fintabnet/)\n - [S&P 500 stock data](https://www.kaggle.com/camnugent/sandp500)\n """'], {}), '(\n """\n **Datasets**\n \n - [Quterly Earnings recordings](https://github.com/revdotcom/speech-datasets)\n - [Annual Reports (FinTabNet)](https://developer.ibm.com/exchanges/data/all/fintabnet/)\n - [S&P 500 stock data](https://www.kaggle.com/camnugent/sandp500)\n """\n )\n', (14119, 14504), True, 'import streamlit as st\n'), ((14503, 14518), 'streamlit.write', 'st.write', (['"""---"""'], {}), "('---')\n", (14511, 14518), True, 'import streamlit as st\n'), ((14585, 14628), 'streamlit.text_area', 'st.text_area', (['"""Custom prompt goes here"""', '""""""'], {}), "('Custom prompt goes here', '')\n", (14597, 14628), True, 'import streamlit as st\n'), ((14773, 14807), 'streamlit.checkbox', 'st.checkbox', (['"""Conversation Memory"""'], {}), "('Conversation Memory')\n", (14784, 14807), True, 'import streamlit as st\n'), ((14924, 14950), 'streamlit.button', 'st.button', (['"""Clear Session"""'], {}), "('Clear Session')\n", (14933, 14950), True, 'import streamlit as st\n'), ((6253, 6276), 'json.dumps', 'json.dumps', (['inputString'], {}), '(inputString)\n', (6263, 6276), False, 'import json\n'), ((6736, 6759), 'json.dumps', 'json.dumps', (['inputString'], {}), '(inputString)\n', (6746, 6759), False, 'import json\n'), ((12095, 12118), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (12110, 12118), True, 'import streamlit as st\n'), ((12132, 12151), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (12143, 12151), True, 'import streamlit as st\n'), ((12191, 12219), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (12206, 12219), True, 'import streamlit as st\n'), ((12255, 12265), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (12263, 12265), True, 'import streamlit as st\n'), ((12740, 12783), 'streamlit.expander', 'st.expander', ([], {'label': '"""**Intermediate Steps**"""'}), "(label='**Intermediate Steps**')\n", (12751, 12783), True, 'import streamlit as st\n'), ((12912, 12936), 'os.remove', 'os.remove', (['"""logfile.txt"""'], {}), "('logfile.txt')\n", (12921, 12936), False, 'import os\n'), ((15241, 15264), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (15262, 15264), True, 'import streamlit as st\n'), ((11674, 11706), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (11689, 11706), True, 'import streamlit as st\n'), ((11861, 11904), 'streamlit.expander', 'st.expander', ([], {'label': '"""**Intermediate Steps**"""'}), "(label='**Intermediate Steps**')\n", (11872, 11904), True, 'import streamlit as st\n'), ((11922, 11948), 'streamlit.write', 'st.write', (["message['steps']"], {}), "(message['steps'])\n", (11930, 11948), True, 'import streamlit as st\n'), ((12884, 12899), 'streamlit.write', 'st.write', (['steps'], {}), '(steps)\n', (12892, 12899), True, 'import streamlit as st\n')]
from langchain.agents.agent_toolkits import create_python_agent from langchain.tools.python.tool import PythonREPLTool from langchain.python import PythonREPL from langchain.llms.openai import OpenAI from langchain.agents.agent_types import AgentType from langchain.chat_models import ChatOpenAI import os agent_executor = create_python_agent( llm=OpenAI(temperature=0.5, max_tokens=2000), tool=PythonREPLTool(), verbose=True, agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, ) agent_executor.run("What is the 10th fibonacci number?")
[ "langchain.llms.openai.OpenAI", "langchain.tools.python.tool.PythonREPLTool" ]
[((354, 394), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0.5)', 'max_tokens': '(2000)'}), '(temperature=0.5, max_tokens=2000)\n', (360, 394), False, 'from langchain.llms.openai import OpenAI\n'), ((405, 421), 'langchain.tools.python.tool.PythonREPLTool', 'PythonREPLTool', ([], {}), '()\n', (419, 421), False, 'from langchain.tools.python.tool import PythonREPLTool\n')]
"""Loaders for Prefect.""" import asyncio import httpx import os import shutil import tempfile from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain_prefect.types import GitHubComment, GitHubIssue from prefect.utilities.asyncutils import sync_compatible class GithubIssueLoader(BaseLoader): """Loader for GitHub issues for a given repository.""" def __init__(self, repo: str, n_issues: int): """ Initialize the loader with the given repository. Args: repo: The name of the repository, in the format "<owner>/<repo>" """ self.repo = repo self.n_issues = n_issues self.request_headers = { "Accept": "application/vnd.github.v3+json", } # If a GitHub token is available, use it to increase the rate limit if token := os.environ.get("GITHUB_TOKEN"): self.request_headers["Authorization"] = f"Bearer {token}" def _get_issue_comments( self, issue_number: int, per_page: int = 100 ) -> List[GitHubComment]: """ Get a list of all comments for the given issue. Returns: A list of dictionaries, each representing a comment. """ url = f"https://api.github.com/repos/{self.repo}/issues/{issue_number}/comments" comments = [] page = 1 while True: response = httpx.get( url=url, headers=self.request_headers, params={"per_page": per_page, "page": page}, ) response.raise_for_status() if not (new_comments := response.json()): break comments.extend([GitHubComment(**comment) for comment in new_comments]) page += 1 return comments def _get_issues(self, per_page: int = 100) -> List[GitHubIssue]: """ Get a list of all issues for the given repository. Returns: A list of `GitHubIssue` objects, each representing an issue. """ url = f"https://api.github.com/repos/{self.repo}/issues" issues = [] page = 1 while True: if len(issues) >= self.n_issues: break remaining = self.n_issues - len(issues) response = httpx.get( url=url, headers=self.request_headers, params={ "per_page": remaining if remaining < per_page else per_page, "page": page, "include": "comments", }, ) response.raise_for_status() if not (new_issues := response.json()): break issues.extend([GitHubIssue(**issue) for issue in new_issues]) page += 1 return issues def load(self) -> List[Document]: """ Load all issues for the given repository. Returns: A list of `Document` objects, each representing an issue. """ issues = self._get_issues() documents = [] for issue in issues: text = f"{issue.title}\n{issue.body}" if issue.comments: for comment in self._get_issue_comments(issue.number): text += f"\n\n{comment.user.login}: {comment.body}\n\n" metadata = { "source": issue.html_url, "title": issue.title, "labels": ",".join([label.name for label in issue.labels]), } documents.append(Document(page_content=text, metadata=metadata)) return documents class GitHubRepoLoader(BaseLoader): """Loader for files on GitHub that match a glob pattern.""" def __init__(self, repo: str, glob: str): """Initialize with the GitHub repository and glob pattern. Attrs: repo: The organization and repository name, e.g. "prefecthq/prefect" glob: The glob pattern to match files, e.g. "**/*.md" """ self.repo = f"https://github.com/{repo}.git" self.glob = glob @sync_compatible async def load(self) -> List[Document]: """Load files from GitHub that match the glob pattern.""" tmp_dir = tempfile.mkdtemp() try: process = await asyncio.create_subprocess_exec( *["git", "clone", "--depth", "1", self.repo, tmp_dir] ) if (await process.wait()) != 0: raise OSError( f"Failed to clone repository:\n {process.stderr.decode()}" ) # Read the contents of each file that matches the glob pattern documents = [] for file in Path(tmp_dir).glob(self.glob): with open(file, "r") as f: text = f.read() metadata = { "source": os.path.join(self.repo, file.relative_to(tmp_dir)) } documents.append(Document(page_content=text, metadata=metadata)) return documents finally: shutil.rmtree(tmp_dir)
[ "langchain_prefect.types.GitHubIssue", "langchain.docstore.document.Document", "langchain_prefect.types.GitHubComment" ]
[((4368, 4386), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (4384, 4386), False, 'import tempfile\n'), ((944, 974), 'os.environ.get', 'os.environ.get', (['"""GITHUB_TOKEN"""'], {}), "('GITHUB_TOKEN')\n", (958, 974), False, 'import os\n'), ((1493, 1590), 'httpx.get', 'httpx.get', ([], {'url': 'url', 'headers': 'self.request_headers', 'params': "{'per_page': per_page, 'page': page}"}), "(url=url, headers=self.request_headers, params={'per_page':\n per_page, 'page': page})\n", (1502, 1590), False, 'import httpx\n'), ((2404, 2568), 'httpx.get', 'httpx.get', ([], {'url': 'url', 'headers': 'self.request_headers', 'params': "{'per_page': remaining if remaining < per_page else per_page, 'page': page,\n 'include': 'comments'}"}), "(url=url, headers=self.request_headers, params={'per_page': \n remaining if remaining < per_page else per_page, 'page': page,\n 'include': 'comments'})\n", (2413, 2568), False, 'import httpx\n'), ((5222, 5244), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (5235, 5244), False, 'import shutil\n'), ((3675, 3721), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (3683, 3721), False, 'from langchain.docstore.document import Document\n'), ((4428, 4517), 'asyncio.create_subprocess_exec', 'asyncio.create_subprocess_exec', (["*['git', 'clone', '--depth', '1', self.repo, tmp_dir]"], {}), "(*['git', 'clone', '--depth', '1', self.repo,\n tmp_dir])\n", (4458, 4517), False, 'import asyncio\n'), ((1795, 1819), 'langchain_prefect.types.GitHubComment', 'GitHubComment', ([], {}), '(**comment)\n', (1808, 1819), False, 'from langchain_prefect.types import GitHubComment, GitHubIssue\n'), ((2843, 2863), 'langchain_prefect.types.GitHubIssue', 'GitHubIssue', ([], {}), '(**issue)\n', (2854, 2863), False, 'from langchain_prefect.types import GitHubComment, GitHubIssue\n'), ((4843, 4856), 'pathlib.Path', 'Path', (['tmp_dir'], {}), '(tmp_dir)\n', (4847, 4856), False, 'from pathlib import Path\n'), ((5115, 5161), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (5123, 5161), False, 'from langchain.docstore.document import Document\n')]
from langchain.agents import AgentType, initialize_agent, load_tools from langchain.llms import OpenAI from benchllm import SemanticEvaluator, Test, Tester tools = load_tools(["serpapi", "llm-math"], llm=OpenAI(temperature=0)) agent = initialize_agent(tools, OpenAI(temperature=0), agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True) tests = [Test(input="How many people live in canada as of 2023?", expected=["approximately 38,625,801"])] tester = Tester(lambda input: agent(input)["output"]) tester.add_tests(tests) predictions = tester.run() evaluator = SemanticEvaluator() evaluator.load(predictions) report = evaluator.run() print(report)
[ "langchain.llms.OpenAI" ]
[((569, 588), 'benchllm.SemanticEvaluator', 'SemanticEvaluator', ([], {}), '()\n', (586, 588), False, 'from benchllm import SemanticEvaluator, Test, Tester\n'), ((261, 282), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (267, 282), False, 'from langchain.llms import OpenAI\n'), ((353, 453), 'benchllm.Test', 'Test', ([], {'input': '"""How many people live in canada as of 2023?"""', 'expected': "['approximately 38,625,801']"}), "(input='How many people live in canada as of 2023?', expected=[\n 'approximately 38,625,801'])\n", (357, 453), False, 'from benchllm import SemanticEvaluator, Test, Tester\n'), ((206, 227), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (212, 227), False, 'from langchain.llms import OpenAI\n')]
"""Wrapper around HuggingFace Pipeline APIs.""" import importlib.util import logging from typing import Any, List, Mapping, Optional from pydantic import BaseModel, Extra from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation") logger = logging.getLogger() class HuggingFacePipeline(LLM, BaseModel): """Wrapper around HuggingFace Pipeline API. To use, you should have the ``transformers`` python package installed. Only supports `text-generation` and `text2text-generation` for now. Example using from_model_id: .. code-block:: python from langchain.llms import HuggingFacePipeline hf = HuggingFacePipeline.from_model_id( model_id="gpt2", task="text-generation" ) Example passing pipeline in directly: .. code-block:: python from langchain.llms import HuggingFacePipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) hf = HuggingFacePipeline(pipeline=pipe) """ pipeline: Any #: :meta private: model_id: str = DEFAULT_MODEL_ID """Model name to use.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @classmethod def from_model_id( cls, model_id: str, task: str, device: int = -1, model_kwargs: Optional[dict] = None, **kwargs: Any, ) -> LLM: """Construct the pipeline object from model_id and task.""" try: from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, ) from transformers import pipeline as hf_pipeline except ImportError: raise ValueError( "Could not import transformers python package. " "Please it install it with `pip install transformers`." ) _model_kwargs = model_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs) elif task == "text2text-generation": model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 (default) for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device=device, model_kwargs=_model_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return cls( pipeline=pipeline, model_id=model_id, model_kwargs=_model_kwargs, **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_id": self.model_id}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: return "huggingface_pipeline" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: response = self.pipeline(prompt) if self.pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.pipeline.task == "text2text-generation": text = response[0]["generated_text"] else: raise ValueError( f"Got invalid task {self.pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens" ]
[((390, 409), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (407, 409), False, 'import logging\n'), ((2546, 2602), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (2575, 2602), False, 'from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer\n'), ((4077, 4180), 'transformers.pipeline', 'hf_pipeline', ([], {'task': 'task', 'model': 'model', 'tokenizer': 'tokenizer', 'device': 'device', 'model_kwargs': '_model_kwargs'}), '(task=task, model=model, tokenizer=tokenizer, device=device,\n model_kwargs=_model_kwargs)\n', (4088, 4180), True, 'from transformers import pipeline as hf_pipeline\n'), ((3351, 3376), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3374, 3376), False, 'import torch\n'), ((5708, 5739), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5727, 5739), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2683, 2746), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (2719, 2746), False, 'from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer\n'), ((2820, 2884), 'transformers.AutoModelForSeq2SeqLM.from_pretrained', 'AutoModelForSeq2SeqLM.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (2857, 2884), False, 'from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer\n')]
from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain.chains.query_constructor.base import AttributeInfo from datetime import datetime current_time_iso = datetime.utcnow().isoformat() + "Z" # example metadat """ { "type": "file_load_gcs", "attrs": "namespace:edmonbrain", "source": "gs://devoteam-mark-langchain-loader/edmonbrain/MarkWork/Running LLMs on Google Cloud Platform via Cloud Run, VertexAI and PubSub - LLMOps on GCP.md", "bucketId": "devoteam-mark-langchain-loader", "category": "NarrativeText", "filename": "Running LLMs on Google Cloud Platform via Cloud Run, VertexAI and PubSub - LLMOps on GCP.md", "filetype": "text/markdown", "objectId": "edmonbrain/MarkWork/Running LLMs on Google Cloud Platform via Cloud Run, VertexAI and PubSub - LLMOps on GCP.md", "eventTime": "2023-07-12T19:36:07.325740Z", "eventType": "OBJECT_FINALIZE", "bucket_name": "devoteam-mark-langchain-loader", "page_number": 1, "payloadFormat": "JSON_API_V1", "objectGeneration": "1689190567243818", "notificationConfig": "projects/_/buckets/devoteam-mark-langchain-loader/notificationConfigs/1" } """ metadata_field_info = [ AttributeInfo( name="source", description="The document source url or path to where the document is located", type="string", ), AttributeInfo( name="eventTime", description=f"When this content was put into the memory. The current datetime is {current_time_iso}", type="ISO 8601 formatted date and time string", ), AttributeInfo( name="type", description="How this content was added to the memory", type="string", ), ] document_content_description = "Documents stored in the bot long term memory" def get_self_query_retriever(llm, vectorstore): return SelfQueryRetriever.from_llm( llm, vectorstore, document_content_description, metadata_field_info, verbose=True )
[ "langchain.retrievers.self_query.base.SelfQueryRetriever.from_llm", "langchain.chains.query_constructor.base.AttributeInfo" ]
[((1179, 1311), 'langchain.chains.query_constructor.base.AttributeInfo', 'AttributeInfo', ([], {'name': '"""source"""', 'description': '"""The document source url or path to where the document is located"""', 'type': '"""string"""'}), "(name='source', description=\n 'The document source url or path to where the document is located',\n type='string')\n", (1192, 1311), False, 'from langchain.chains.query_constructor.base import AttributeInfo\n'), ((1339, 1531), 'langchain.chains.query_constructor.base.AttributeInfo', 'AttributeInfo', ([], {'name': '"""eventTime"""', 'description': 'f"""When this content was put into the memory. The current datetime is {current_time_iso}"""', 'type': '"""ISO 8601 formatted date and time string"""'}), "(name='eventTime', description=\n f'When this content was put into the memory. The current datetime is {current_time_iso}'\n , type='ISO 8601 formatted date and time string')\n", (1352, 1531), False, 'from langchain.chains.query_constructor.base import AttributeInfo\n'), ((1558, 1660), 'langchain.chains.query_constructor.base.AttributeInfo', 'AttributeInfo', ([], {'name': '"""type"""', 'description': '"""How this content was added to the memory"""', 'type': '"""string"""'}), "(name='type', description=\n 'How this content was added to the memory', type='string')\n", (1571, 1660), False, 'from langchain.chains.query_constructor.base import AttributeInfo\n'), ((1829, 1943), 'langchain.retrievers.self_query.base.SelfQueryRetriever.from_llm', 'SelfQueryRetriever.from_llm', (['llm', 'vectorstore', 'document_content_description', 'metadata_field_info'], {'verbose': '(True)'}), '(llm, vectorstore, document_content_description,\n metadata_field_info, verbose=True)\n', (1856, 1943), False, 'from langchain.retrievers.self_query.base import SelfQueryRetriever\n'), ((184, 201), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (199, 201), False, 'from datetime import datetime\n')]
import os import re from typing import List, Optional, Any from langchain.schema import Document from langchain.text_splitter import RecursiveCharacterTextSplitter from loguru import logger from tqdm import tqdm from src.config import local_embedding, retrieve_proxy, chunk_overlap, chunk_size, hf_emb_model_name from src import shared from src.utils import excel_to_string, get_files_hash, load_pkl, save_pkl pwd_path = os.path.abspath(os.path.dirname(__file__)) class ChineseRecursiveTextSplitter(RecursiveCharacterTextSplitter): """Recursive text splitter for Chinese text. copy from: https://github.com/chatchat-space/Langchain-Chatchat/tree/master """ def __init__( self, separators: Optional[List[str]] = None, keep_separator: bool = True, is_separator_regex: bool = True, **kwargs: Any, ) -> None: """Create a new TextSplitter.""" super().__init__(keep_separator=keep_separator, **kwargs) self._separators = separators or [ "\n\n", "\n", "。|!|?", "\.\s|\!\s|\?\s", ";|;\s", ",|,\s" ] self._is_separator_regex = is_separator_regex @staticmethod def _split_text_with_regex_from_end( text: str, separator: str, keep_separator: bool ) -> List[str]: # Now that we have the separator, split the text if separator: if keep_separator: # The parentheses in the pattern keep the delimiters in the result. _splits = re.split(f"({separator})", text) splits = ["".join(i) for i in zip(_splits[0::2], _splits[1::2])] if len(_splits) % 2 == 1: splits += _splits[-1:] else: splits = re.split(separator, text) else: splits = list(text) return [s for s in splits if s != ""] def _split_text(self, text: str, separators: List[str]) -> List[str]: """Split incoming text and return chunks.""" final_chunks = [] # Get appropriate separator to use separator = separators[-1] new_separators = [] for i, _s in enumerate(separators): _separator = _s if self._is_separator_regex else re.escape(_s) if _s == "": separator = _s break if re.search(_separator, text): separator = _s new_separators = separators[i + 1:] break _separator = separator if self._is_separator_regex else re.escape(separator) splits = self._split_text_with_regex_from_end(text, _separator, self._keep_separator) # Now go merging things, recursively splitting longer texts. _good_splits = [] _separator = "" if self._keep_separator else separator for s in splits: if self._length_function(s) < self._chunk_size: _good_splits.append(s) else: if _good_splits: merged_text = self._merge_splits(_good_splits, _separator) final_chunks.extend(merged_text) _good_splits = [] if not new_separators: final_chunks.append(s) else: other_info = self._split_text(s, new_separators) final_chunks.extend(other_info) if _good_splits: merged_text = self._merge_splits(_good_splits, _separator) final_chunks.extend(merged_text) return [re.sub(r"\n{2,}", "\n", chunk.strip()) for chunk in final_chunks if chunk.strip() != ""] def get_documents(file_paths): text_splitter = ChineseRecursiveTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) documents = [] logger.debug("Loading documents...") logger.debug(f"file_paths: {file_paths}") for file in file_paths: filepath = file.name filename = os.path.basename(filepath) file_type = os.path.splitext(filename)[1] logger.info(f"loading file: {filename}") texts = None try: if file_type == ".pdf": import PyPDF2 logger.debug("Loading PDF...") try: from src.pdf_func import parse_pdf from src.config import advance_docs two_column = advance_docs["pdf"].get("two_column", False) pdftext = parse_pdf(filepath, two_column).text except: pdftext = "" with open(filepath, "rb") as pdfFileObj: pdfReader = PyPDF2.PdfReader(pdfFileObj) for page in tqdm(pdfReader.pages): pdftext += page.extract_text() texts = [Document(page_content=pdftext, metadata={"source": filepath})] elif file_type == ".docx": logger.debug("Loading Word...") from langchain.document_loaders import UnstructuredWordDocumentLoader loader = UnstructuredWordDocumentLoader(filepath) texts = loader.load() elif file_type == ".pptx": logger.debug("Loading PowerPoint...") from langchain.document_loaders import UnstructuredPowerPointLoader loader = UnstructuredPowerPointLoader(filepath) texts = loader.load() elif file_type == ".epub": logger.debug("Loading EPUB...") from langchain.document_loaders import UnstructuredEPubLoader loader = UnstructuredEPubLoader(filepath) texts = loader.load() elif file_type == ".xlsx": logger.debug("Loading Excel...") text_list = excel_to_string(filepath) texts = [] for elem in text_list: texts.append(Document(page_content=elem, metadata={"source": filepath})) else: logger.debug("Loading text file...") from langchain_community.document_loaders import TextLoader loader = TextLoader(filepath, "utf8") texts = loader.load() logger.debug(f"text size: {len(texts)}, text top3: {texts[:3]}") except Exception as e: logger.error(f"Error loading file: {filename}, {e}") if texts is not None: texts = text_splitter.split_documents(texts) documents.extend(texts) logger.debug(f"Documents loaded. documents size: {len(documents)}, top3: {documents[:3]}") return documents def construct_index( api_key, files, load_from_cache_if_possible=True, ): from langchain_community.vectorstores import FAISS from langchain.embeddings.huggingface import HuggingFaceEmbeddings if api_key: os.environ["OPENAI_API_KEY"] = api_key else: os.environ["OPENAI_API_KEY"] = "sk-xxxxxxx" index_name = get_files_hash(files) index_dir = os.path.join(pwd_path, '../index') index_path = f"{index_dir}/{index_name}" doc_file = f"{index_path}/docs.pkl" if local_embedding: embeddings = HuggingFaceEmbeddings(model_name=hf_emb_model_name) else: from langchain_community.embeddings import OpenAIEmbeddings if os.environ.get("OPENAI_API_TYPE", "openai") == "openai": embeddings = OpenAIEmbeddings( openai_api_base=shared.state.openai_api_base, openai_api_key=os.environ.get("OPENAI_EMBEDDING_API_KEY", api_key) ) else: embeddings = OpenAIEmbeddings( deployment=os.environ["AZURE_EMBEDDING_DEPLOYMENT_NAME"], openai_api_key=os.environ["AZURE_OPENAI_API_KEY"], model=os.environ["AZURE_EMBEDDING_MODEL_NAME"], openai_api_base=os.environ["AZURE_OPENAI_API_BASE_URL"], openai_api_type="azure" ) if os.path.exists(index_path) and load_from_cache_if_possible: logger.info("找到了缓存的索引文件,加载中……") index = FAISS.load_local(index_path, embeddings) documents = load_pkl(doc_file) return index, documents else: try: documents = get_documents(files) logger.info("构建索引中……") with retrieve_proxy(): index = FAISS.from_documents(documents, embeddings) logger.debug("索引构建完成!") os.makedirs(index_dir, exist_ok=True) index.save_local(index_path) logger.debug("索引已保存至本地!") save_pkl(documents, doc_file) logger.debug("索引文档已保存至本地!") return index, documents except Exception as e: logger.error(f"索引构建失败!error: {e}") return None
[ "langchain.document_loaders.UnstructuredWordDocumentLoader", "langchain.embeddings.huggingface.HuggingFaceEmbeddings", "langchain_community.vectorstores.FAISS.from_documents", "langchain.document_loaders.UnstructuredPowerPointLoader", "langchain.document_loaders.UnstructuredEPubLoader", "langchain.schema.Document", "langchain_community.vectorstores.FAISS.load_local", "langchain_community.document_loaders.TextLoader", "langchain_community.embeddings.OpenAIEmbeddings" ]
[((440, 465), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (455, 465), False, 'import os\n'), ((3874, 3910), 'loguru.logger.debug', 'logger.debug', (['"""Loading documents..."""'], {}), "('Loading documents...')\n", (3886, 3910), False, 'from loguru import logger\n'), ((3915, 3956), 'loguru.logger.debug', 'logger.debug', (['f"""file_paths: {file_paths}"""'], {}), "(f'file_paths: {file_paths}')\n", (3927, 3956), False, 'from loguru import logger\n'), ((7187, 7208), 'src.utils.get_files_hash', 'get_files_hash', (['files'], {}), '(files)\n', (7201, 7208), False, 'from src.utils import excel_to_string, get_files_hash, load_pkl, save_pkl\n'), ((7225, 7259), 'os.path.join', 'os.path.join', (['pwd_path', '"""../index"""'], {}), "(pwd_path, '../index')\n", (7237, 7259), False, 'import os\n'), ((4033, 4059), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (4049, 4059), False, 'import os\n'), ((4118, 4158), 'loguru.logger.info', 'logger.info', (['f"""loading file: {filename}"""'], {}), "(f'loading file: {filename}')\n", (4129, 4158), False, 'from loguru import logger\n'), ((7390, 7441), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'hf_emb_model_name'}), '(model_name=hf_emb_model_name)\n', (7411, 7441), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((8186, 8212), 'os.path.exists', 'os.path.exists', (['index_path'], {}), '(index_path)\n', (8200, 8212), False, 'import os\n'), ((8254, 8285), 'loguru.logger.info', 'logger.info', (['"""找到了缓存的索引文件,加载中……"""'], {}), "('找到了缓存的索引文件,加载中……')\n", (8265, 8285), False, 'from loguru import logger\n'), ((8302, 8342), 'langchain_community.vectorstores.FAISS.load_local', 'FAISS.load_local', (['index_path', 'embeddings'], {}), '(index_path, embeddings)\n', (8318, 8342), False, 'from langchain_community.vectorstores import FAISS\n'), ((8363, 8381), 'src.utils.load_pkl', 'load_pkl', (['doc_file'], {}), '(doc_file)\n', (8371, 8381), False, 'from src.utils import excel_to_string, get_files_hash, load_pkl, save_pkl\n'), ((2427, 2454), 're.search', 're.search', (['_separator', 'text'], {}), '(_separator, text)\n', (2436, 2454), False, 'import re\n'), ((2626, 2646), 're.escape', 're.escape', (['separator'], {}), '(separator)\n', (2635, 2646), False, 'import re\n'), ((4080, 4106), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (4096, 4106), False, 'import os\n'), ((7531, 7574), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_TYPE"""', '"""openai"""'], {}), "('OPENAI_API_TYPE', 'openai')\n", (7545, 7574), False, 'import os\n'), ((7829, 8098), 'langchain_community.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'deployment': "os.environ['AZURE_EMBEDDING_DEPLOYMENT_NAME']", 'openai_api_key': "os.environ['AZURE_OPENAI_API_KEY']", 'model': "os.environ['AZURE_EMBEDDING_MODEL_NAME']", 'openai_api_base': "os.environ['AZURE_OPENAI_API_BASE_URL']", 'openai_api_type': '"""azure"""'}), "(deployment=os.environ['AZURE_EMBEDDING_DEPLOYMENT_NAME'],\n openai_api_key=os.environ['AZURE_OPENAI_API_KEY'], model=os.environ[\n 'AZURE_EMBEDDING_MODEL_NAME'], openai_api_base=os.environ[\n 'AZURE_OPENAI_API_BASE_URL'], openai_api_type='azure')\n", (7845, 8098), False, 'from langchain_community.embeddings import OpenAIEmbeddings\n'), ((8494, 8516), 'loguru.logger.info', 'logger.info', (['"""构建索引中……"""'], {}), "('构建索引中……')\n", (8505, 8516), False, 'from loguru import logger\n'), ((8632, 8655), 'loguru.logger.debug', 'logger.debug', (['"""索引构建完成!"""'], {}), "('索引构建完成!')\n", (8644, 8655), False, 'from loguru import logger\n'), ((8668, 8705), 'os.makedirs', 'os.makedirs', (['index_dir'], {'exist_ok': '(True)'}), '(index_dir, exist_ok=True)\n', (8679, 8705), False, 'import os\n'), ((8759, 8784), 'loguru.logger.debug', 'logger.debug', (['"""索引已保存至本地!"""'], {}), "('索引已保存至本地!')\n", (8771, 8784), False, 'from loguru import logger\n'), ((8797, 8826), 'src.utils.save_pkl', 'save_pkl', (['documents', 'doc_file'], {}), '(documents, doc_file)\n', (8805, 8826), False, 'from src.utils import excel_to_string, get_files_hash, load_pkl, save_pkl\n'), ((8839, 8866), 'loguru.logger.debug', 'logger.debug', (['"""索引文档已保存至本地!"""'], {}), "('索引文档已保存至本地!')\n", (8851, 8866), False, 'from loguru import logger\n'), ((1595, 1627), 're.split', 're.split', (['f"""({separator})"""', 'text'], {}), "(f'({separator})', text)\n", (1603, 1627), False, 'import re\n'), ((1837, 1862), 're.split', 're.split', (['separator', 'text'], {}), '(separator, text)\n', (1845, 1862), False, 'import re\n'), ((2320, 2333), 're.escape', 're.escape', (['_s'], {}), '(_s)\n', (2329, 2333), False, 'import re\n'), ((4275, 4305), 'loguru.logger.debug', 'logger.debug', (['"""Loading PDF..."""'], {}), "('Loading PDF...')\n", (4287, 4305), False, 'from loguru import logger\n'), ((6526, 6578), 'loguru.logger.error', 'logger.error', (['f"""Error loading file: {filename}, {e}"""'], {}), "(f'Error loading file: {filename}, {e}')\n", (6538, 6578), False, 'from loguru import logger\n'), ((8534, 8550), 'src.config.retrieve_proxy', 'retrieve_proxy', ([], {}), '()\n', (8548, 8550), False, 'from src.config import local_embedding, retrieve_proxy, chunk_overlap, chunk_size, hf_emb_model_name\n'), ((8576, 8619), 'langchain_community.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['documents', 'embeddings'], {}), '(documents, embeddings)\n', (8596, 8619), False, 'from langchain_community.vectorstores import FAISS\n'), ((8946, 8980), 'loguru.logger.error', 'logger.error', (['f"""索引构建失败!error: {e}"""'], {}), "(f'索引构建失败!error: {e}')\n", (8958, 8980), False, 'from loguru import logger\n'), ((4910, 4971), 'langchain.schema.Document', 'Document', ([], {'page_content': 'pdftext', 'metadata': "{'source': filepath}"}), "(page_content=pdftext, metadata={'source': filepath})\n", (4918, 4971), False, 'from langchain.schema import Document\n'), ((5062, 5093), 'loguru.logger.debug', 'logger.debug', (['"""Loading Word..."""'], {}), "('Loading Word...')\n", (5074, 5093), False, 'from loguru import logger\n'), ((5205, 5245), 'langchain.document_loaders.UnstructuredWordDocumentLoader', 'UnstructuredWordDocumentLoader', (['filepath'], {}), '(filepath)\n', (5235, 5245), False, 'from langchain.document_loaders import UnstructuredWordDocumentLoader\n'), ((7724, 7775), 'os.environ.get', 'os.environ.get', (['"""OPENAI_EMBEDDING_API_KEY"""', 'api_key'], {}), "('OPENAI_EMBEDDING_API_KEY', api_key)\n", (7738, 7775), False, 'import os\n'), ((4547, 4578), 'src.pdf_func.parse_pdf', 'parse_pdf', (['filepath', 'two_column'], {}), '(filepath, two_column)\n', (4556, 4578), False, 'from src.pdf_func import parse_pdf\n'), ((5339, 5376), 'loguru.logger.debug', 'logger.debug', (['"""Loading PowerPoint..."""'], {}), "('Loading PowerPoint...')\n", (5351, 5376), False, 'from loguru import logger\n'), ((5486, 5524), 'langchain.document_loaders.UnstructuredPowerPointLoader', 'UnstructuredPowerPointLoader', (['filepath'], {}), '(filepath)\n', (5514, 5524), False, 'from langchain.document_loaders import UnstructuredPowerPointLoader\n'), ((4738, 4766), 'PyPDF2.PdfReader', 'PyPDF2.PdfReader', (['pdfFileObj'], {}), '(pdfFileObj)\n', (4754, 4766), False, 'import PyPDF2\n'), ((4803, 4824), 'tqdm.tqdm', 'tqdm', (['pdfReader.pages'], {}), '(pdfReader.pages)\n', (4807, 4824), False, 'from tqdm import tqdm\n'), ((5618, 5649), 'loguru.logger.debug', 'logger.debug', (['"""Loading EPUB..."""'], {}), "('Loading EPUB...')\n", (5630, 5649), False, 'from loguru import logger\n'), ((5753, 5785), 'langchain.document_loaders.UnstructuredEPubLoader', 'UnstructuredEPubLoader', (['filepath'], {}), '(filepath)\n', (5775, 5785), False, 'from langchain.document_loaders import UnstructuredEPubLoader\n'), ((5879, 5911), 'loguru.logger.debug', 'logger.debug', (['"""Loading Excel..."""'], {}), "('Loading Excel...')\n", (5891, 5911), False, 'from loguru import logger\n'), ((5940, 5965), 'src.utils.excel_to_string', 'excel_to_string', (['filepath'], {}), '(filepath)\n', (5955, 5965), False, 'from src.utils import excel_to_string, get_files_hash, load_pkl, save_pkl\n'), ((6201, 6237), 'loguru.logger.debug', 'logger.debug', (['"""Loading text file..."""'], {}), "('Loading text file...')\n", (6213, 6237), False, 'from loguru import logger\n'), ((6339, 6367), 'langchain_community.document_loaders.TextLoader', 'TextLoader', (['filepath', '"""utf8"""'], {}), "(filepath, 'utf8')\n", (6349, 6367), False, 'from langchain_community.document_loaders import TextLoader\n'), ((6065, 6123), 'langchain.schema.Document', 'Document', ([], {'page_content': 'elem', 'metadata': "{'source': filepath}"}), "(page_content=elem, metadata={'source': filepath})\n", (6073, 6123), False, 'from langchain.schema import Document\n')]
from fastapi import FastAPI from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import ElasticVectorSearch from config import openai_api_key embedding = OpenAIEmbeddings(openai_api_key=openai_api_key) db = ElasticVectorSearch( elasticsearch_url="http://localhost:9200", index_name="elastic-index", embedding=embedding, ) qa = RetrievalQA.from_chain_type( llm=ChatOpenAI(temperature=0), chain_type="stuff", retriever=db.as_retriever(), ) app = FastAPI() @app.get("/") def index(): return { "message": "Make a post request to /ask to ask questions about Meditations by Marcus Aurelius" } @app.post("/ask") def ask(query: str): response = qa.run(query) return { "response": response, }
[ "langchain.vectorstores.ElasticVectorSearch", "langchain.chat_models.ChatOpenAI", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((274, 321), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'openai_api_key'}), '(openai_api_key=openai_api_key)\n', (290, 321), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((328, 444), 'langchain.vectorstores.ElasticVectorSearch', 'ElasticVectorSearch', ([], {'elasticsearch_url': '"""http://localhost:9200"""', 'index_name': '"""elastic-index"""', 'embedding': 'embedding'}), "(elasticsearch_url='http://localhost:9200', index_name=\n 'elastic-index', embedding=embedding)\n", (347, 444), False, 'from langchain.vectorstores import ElasticVectorSearch\n'), ((590, 599), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (597, 599), False, 'from fastapi import FastAPI\n'), ((497, 522), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (507, 522), False, 'from langchain.chat_models import ChatOpenAI\n')]
from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List import pandas as pd import streamlit as st from langchain.chains import LLMChain from langchain.prompts.few_shot import FewShotPromptTemplate from doccano_mini.components import ( display_download_button, openai_model_form, task_instruction_editor, usage, ) from doccano_mini.utils import escape_markdown class BasePage(ABC): example_path: str = "" def __init__(self, title: str) -> None: self.title = title @property def columns(self) -> List[str]: return [] def load_examples(self, filename: str) -> pd.DataFrame: filepath = Path(__file__).parent.resolve().joinpath("examples", filename) return pd.read_json(filepath) def make_examples(self, columns: List[str]) -> List[Dict]: df = self.load_examples(self.example_path) edited_df = st.experimental_data_editor(df, num_rows="dynamic", width=1000) examples = edited_df.to_dict(orient="records") return examples @abstractmethod def make_prompt(self, examples: List[Dict]) -> FewShotPromptTemplate: raise NotImplementedError() @abstractmethod def prepare_inputs(self, columns: List[str]) -> Dict: raise NotImplementedError() def annotate(self, examples: List[Dict]) -> List[Dict]: return examples def render(self) -> None: st.title(self.title) st.header("Annotate your data") columns = self.columns examples = self.make_examples(columns) examples = self.annotate(examples) prompt = self.make_prompt(examples) prompt = task_instruction_editor(prompt) st.header("Test") col1, col2 = st.columns([3, 1]) with col1: inputs = self.prepare_inputs(columns) with col2: llm = openai_model_form() with st.expander("See your prompt"): st.markdown(f"```\n{prompt.format(**inputs)}\n```") if llm is None: st.error("Enter your API key.") if st.button("Predict", disabled=llm is None): chain = LLMChain(llm=llm, prompt=prompt) # type:ignore response = chain.run(**inputs) st.markdown(escape_markdown(response).replace("\n", " \n")) chain.save("config.yaml") display_download_button() usage()
[ "langchain.chains.LLMChain" ]
[((763, 785), 'pandas.read_json', 'pd.read_json', (['filepath'], {}), '(filepath)\n', (775, 785), True, 'import pandas as pd\n'), ((921, 984), 'streamlit.experimental_data_editor', 'st.experimental_data_editor', (['df'], {'num_rows': '"""dynamic"""', 'width': '(1000)'}), "(df, num_rows='dynamic', width=1000)\n", (948, 984), True, 'import streamlit as st\n'), ((1434, 1454), 'streamlit.title', 'st.title', (['self.title'], {}), '(self.title)\n', (1442, 1454), True, 'import streamlit as st\n'), ((1463, 1494), 'streamlit.header', 'st.header', (['"""Annotate your data"""'], {}), "('Annotate your data')\n", (1472, 1494), True, 'import streamlit as st\n'), ((1678, 1709), 'doccano_mini.components.task_instruction_editor', 'task_instruction_editor', (['prompt'], {}), '(prompt)\n', (1701, 1709), False, 'from doccano_mini.components import display_download_button, openai_model_form, task_instruction_editor, usage\n'), ((1719, 1736), 'streamlit.header', 'st.header', (['"""Test"""'], {}), "('Test')\n", (1728, 1736), True, 'import streamlit as st\n'), ((1758, 1776), 'streamlit.columns', 'st.columns', (['[3, 1]'], {}), '([3, 1])\n', (1768, 1776), True, 'import streamlit as st\n'), ((2096, 2138), 'streamlit.button', 'st.button', (['"""Predict"""'], {'disabled': '(llm is None)'}), "('Predict', disabled=llm is None)\n", (2105, 2138), True, 'import streamlit as st\n'), ((2409, 2416), 'doccano_mini.components.usage', 'usage', ([], {}), '()\n', (2414, 2416), False, 'from doccano_mini.components import display_download_button, openai_model_form, task_instruction_editor, usage\n'), ((1885, 1904), 'doccano_mini.components.openai_model_form', 'openai_model_form', ([], {}), '()\n', (1902, 1904), False, 'from doccano_mini.components import display_download_button, openai_model_form, task_instruction_editor, usage\n'), ((1919, 1949), 'streamlit.expander', 'st.expander', (['"""See your prompt"""'], {}), "('See your prompt')\n", (1930, 1949), True, 'import streamlit as st\n'), ((2052, 2083), 'streamlit.error', 'st.error', (['"""Enter your API key."""'], {}), "('Enter your API key.')\n", (2060, 2083), True, 'import streamlit as st\n'), ((2160, 2192), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2168, 2192), False, 'from langchain.chains import LLMChain\n'), ((2375, 2400), 'doccano_mini.components.display_download_button', 'display_download_button', ([], {}), '()\n', (2398, 2400), False, 'from doccano_mini.components import display_download_button, openai_model_form, task_instruction_editor, usage\n'), ((2275, 2300), 'doccano_mini.utils.escape_markdown', 'escape_markdown', (['response'], {}), '(response)\n', (2290, 2300), False, 'from doccano_mini.utils import escape_markdown\n'), ((685, 699), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (689, 699), False, 'from pathlib import Path\n')]
"""This module contains functions for loading and managing vector stores in the Wandbot ingestion system. The module includes the following functions: - `load`: Loads the vector store from the specified source artifact path and returns the name of the resulting artifact. Typical usage example: project = "wandbot-dev" entity = "wandbot" source_artifact_path = "wandbot/wandbot-dev/raw_dataset:latest" result_artifact_name = "wandbot_index" load(project, entity, source_artifact_path, result_artifact_name) """ import json import pathlib from typing import Any, Dict, List from langchain.schema import Document as LcDocument from llama_index.callbacks import WandbCallbackHandler import wandb from wandbot.ingestion import preprocess_data from wandbot.ingestion.config import VectorStoreConfig from wandbot.utils import ( get_logger, load_index, load_service_context, load_storage_context, ) logger = get_logger(__name__) def load( project: str, entity: str, source_artifact_path: str, result_artifact_name: str = "wandbot_index", ) -> str: """Load the vector store. Loads the vector store from the specified source artifact path and returns the name of the resulting artifact. Args: project: The name of the project. entity: The name of the entity. source_artifact_path: The path to the source artifact. result_artifact_name: The name of the resulting artifact. Defaults to "wandbot_index". Returns: The name of the resulting artifact. Raises: wandb.Error: An error occurred during the loading process. """ config: VectorStoreConfig = VectorStoreConfig() run: wandb.Run = wandb.init( project=project, entity=entity, job_type="create_vectorstore" ) artifact: wandb.Artifact = run.use_artifact( source_artifact_path, type="dataset" ) artifact_dir: str = artifact.download() storage_context = load_storage_context(config.embedding_dim) service_context = load_service_context( embeddings_cache=str(config.embeddings_cache), llm="gpt-3.5-turbo-16k-0613", temperature=config.temperature, max_retries=config.max_retries, ) document_files: List[pathlib.Path] = list( pathlib.Path(artifact_dir).rglob("documents.jsonl") ) transformed_documents: List[LcDocument] = [] for document_file in document_files: documents: List[LcDocument] = [] with document_file.open() as f: for line in f: doc_dict: Dict[str, Any] = json.loads(line) doc: LcDocument = LcDocument(**doc_dict) documents.append(doc) transformed_documents.extend(preprocess_data.load(documents)) unique_objects = {obj.hash: obj for obj in transformed_documents} transformed_documents = list(unique_objects.values()) index = load_index( transformed_documents, service_context, storage_context, persist_dir=str(config.persist_dir), ) wandb_callback: WandbCallbackHandler = WandbCallbackHandler() wandb_callback.persist_index(index, index_name=result_artifact_name) wandb_callback.finish() run.finish() return f"{entity}/{project}/{result_artifact_name}:latest"
[ "langchain.schema.Document" ]
[((944, 964), 'wandbot.utils.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (954, 964), False, 'from wandbot.utils import get_logger, load_index, load_service_context, load_storage_context\n'), ((1677, 1696), 'wandbot.ingestion.config.VectorStoreConfig', 'VectorStoreConfig', ([], {}), '()\n', (1694, 1696), False, 'from wandbot.ingestion.config import VectorStoreConfig\n'), ((1718, 1791), 'wandb.init', 'wandb.init', ([], {'project': 'project', 'entity': 'entity', 'job_type': '"""create_vectorstore"""'}), "(project=project, entity=entity, job_type='create_vectorstore')\n", (1728, 1791), False, 'import wandb\n'), ((1972, 2014), 'wandbot.utils.load_storage_context', 'load_storage_context', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (1992, 2014), False, 'from wandbot.utils import get_logger, load_index, load_service_context, load_storage_context\n'), ((3103, 3125), 'llama_index.callbacks.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '()\n', (3123, 3125), False, 'from llama_index.callbacks import WandbCallbackHandler\n'), ((2743, 2774), 'wandbot.ingestion.preprocess_data.load', 'preprocess_data.load', (['documents'], {}), '(documents)\n', (2763, 2774), False, 'from wandbot.ingestion import preprocess_data\n'), ((2294, 2320), 'pathlib.Path', 'pathlib.Path', (['artifact_dir'], {}), '(artifact_dir)\n', (2306, 2320), False, 'import pathlib\n'), ((2594, 2610), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2604, 2610), False, 'import json\n'), ((2645, 2667), 'langchain.schema.Document', 'LcDocument', ([], {}), '(**doc_dict)\n', (2655, 2667), True, 'from langchain.schema import Document as LcDocument\n')]
from langchain.tools import BaseTool from langchain.tools.render import render_text_description from langchain_core.language_models.base import LanguageModelLike from langchain_core.messages import ( AIMessage, FunctionMessage, HumanMessage, SystemMessage, ) from langgraph.checkpoint import BaseCheckpointSaver from langgraph.graph import END from langgraph.graph.message import MessageGraph from langgraph.prebuilt import ToolExecutor, ToolInvocation from app.agent_types.prompts import xml_template from app.message_types import LiberalFunctionMessage def _collapse_messages(messages): log = "" if isinstance(messages[-1], AIMessage): scratchpad = messages[:-1] final = messages[-1] else: scratchpad = messages final = None if len(scratchpad) % 2 != 0: raise ValueError("Unexpected") for i in range(0, len(scratchpad), 2): action = messages[i] observation = messages[i + 1] log += f"{action.content}<observation>{observation.content}</observation>" if final is not None: log += final.content return AIMessage(content=log) def construct_chat_history(messages): collapsed_messages = [] temp_messages = [] for message in messages: if isinstance(message, HumanMessage): if temp_messages: collapsed_messages.append(_collapse_messages(temp_messages)) temp_messages = [] collapsed_messages.append(message) elif isinstance(message, LiberalFunctionMessage): _dict = message.dict() _dict["content"] = str(_dict["content"]) m_c = FunctionMessage(**_dict) temp_messages.append(m_c) else: temp_messages.append(message) # Don't forget to add the last non-human message if it exists if temp_messages: collapsed_messages.append(_collapse_messages(temp_messages)) return collapsed_messages def get_xml_agent_executor( tools: list[BaseTool], llm: LanguageModelLike, system_message: str, interrupt_before_action: bool, checkpoint: BaseCheckpointSaver, ): formatted_system_message = xml_template.format( system_message=system_message, tools=render_text_description(tools), tool_names=", ".join([t.name for t in tools]), ) llm_with_stop = llm.bind(stop=["</tool_input>", "<observation>"]) def _get_messages(messages): return [ SystemMessage(content=formatted_system_message) ] + construct_chat_history(messages) agent = _get_messages | llm_with_stop tool_executor = ToolExecutor(tools) # Define the function that determines whether to continue or not def should_continue(messages): last_message = messages[-1] if "</tool>" in last_message.content: return "continue" else: return "end" # Define the function to execute tools async def call_tool(messages): # Based on the continue condition # we know the last message involves a function call last_message = messages[-1] # We construct an ToolInvocation from the function_call tool, tool_input = last_message.content.split("</tool>") _tool = tool.split("<tool>")[1] if "<tool_input>" not in tool_input: _tool_input = "" else: _tool_input = tool_input.split("<tool_input>")[1] if "</tool_input>" in _tool_input: _tool_input = _tool_input.split("</tool_input>")[0] action = ToolInvocation( tool=_tool, tool_input=_tool_input, ) # We call the tool_executor and get back a response response = await tool_executor.ainvoke(action) # We use the response to create a FunctionMessage function_message = LiberalFunctionMessage(content=response, name=action.tool) # We return a list, because this will get added to the existing list return function_message workflow = MessageGraph() # Define the two nodes we will cycle between workflow.add_node("agent", agent) workflow.add_node("action", call_tool) # Set the entrypoint as `agent` # This means that this node is the first one called workflow.set_entry_point("agent") # We now add a conditional edge workflow.add_conditional_edges( # First, we define the start node. We use `agent`. # This means these are the edges taken after the `agent` node is called. "agent", # Next, we pass in the function that will determine which node is called next. should_continue, # Finally we pass in a mapping. # The keys are strings, and the values are other nodes. # END is a special node marking that the graph should finish. # What will happen is we will call `should_continue`, and then the output of that # will be matched against the keys in this mapping. # Based on which one it matches, that node will then be called. { # If `tools`, then we call the tool node. "continue": "action", # Otherwise we finish. "end": END, }, ) # We now add a normal edge from `tools` to `agent`. # This means that after `tools` is called, `agent` node is called next. workflow.add_edge("action", "agent") # Finally, we compile it! # This compiles it into a LangChain Runnable, # meaning you can use it as you would any other runnable app = workflow.compile(checkpointer=checkpoint) if interrupt_before_action: app.interrupt = ["action:inbox"] return app
[ "langchain_core.messages.AIMessage", "langchain.tools.render.render_text_description", "langchain_core.messages.SystemMessage", "langchain_core.messages.FunctionMessage" ]
[((1121, 1143), 'langchain_core.messages.AIMessage', 'AIMessage', ([], {'content': 'log'}), '(content=log)\n', (1130, 1143), False, 'from langchain_core.messages import AIMessage, FunctionMessage, HumanMessage, SystemMessage\n'), ((2644, 2663), 'langgraph.prebuilt.ToolExecutor', 'ToolExecutor', (['tools'], {}), '(tools)\n', (2656, 2663), False, 'from langgraph.prebuilt import ToolExecutor, ToolInvocation\n'), ((4058, 4072), 'langgraph.graph.message.MessageGraph', 'MessageGraph', ([], {}), '()\n', (4070, 4072), False, 'from langgraph.graph.message import MessageGraph\n'), ((3588, 3638), 'langgraph.prebuilt.ToolInvocation', 'ToolInvocation', ([], {'tool': '_tool', 'tool_input': '_tool_input'}), '(tool=_tool, tool_input=_tool_input)\n', (3602, 3638), False, 'from langgraph.prebuilt import ToolExecutor, ToolInvocation\n'), ((3874, 3932), 'app.message_types.LiberalFunctionMessage', 'LiberalFunctionMessage', ([], {'content': 'response', 'name': 'action.tool'}), '(content=response, name=action.tool)\n', (3896, 3932), False, 'from app.message_types import LiberalFunctionMessage\n'), ((2261, 2291), 'langchain.tools.render.render_text_description', 'render_text_description', (['tools'], {}), '(tools)\n', (2284, 2291), False, 'from langchain.tools.render import render_text_description\n'), ((1663, 1687), 'langchain_core.messages.FunctionMessage', 'FunctionMessage', ([], {}), '(**_dict)\n', (1678, 1687), False, 'from langchain_core.messages import AIMessage, FunctionMessage, HumanMessage, SystemMessage\n'), ((2488, 2535), 'langchain_core.messages.SystemMessage', 'SystemMessage', ([], {'content': 'formatted_system_message'}), '(content=formatted_system_message)\n', (2501, 2535), False, 'from langchain_core.messages import AIMessage, FunctionMessage, HumanMessage, SystemMessage\n')]
from textwrap import dedent from langchain import OpenAI from langchain.schema import BaseModel from utils import format_prompt_components_without_tools def extract_first_message(message: str) -> str: """The LLM can continue the conversation from the recipient. So extract just the first line.""" return message.split("\n")[0].strip() def get_unsolicited_message_prompt(ai_prefix: str, human_prefix: str) -> str: """Get prompt for unsolicited message.""" inspirational_thought = f""" *{ai_prefix} then drew on their past experiences with {human_prefix} and continued the conversation*""" return dedent(inspirational_thought) def generate_unsolicited_message( prompt: str, model: BaseModel, ai_settings: dict, contact_settings: dict, temperature: int = 0, ) -> str: """Generate AI message without message from user.""" ai_prefix, _, prefix, suffix = format_prompt_components_without_tools( ai_settings, contact_settings ) chat_history = model.memory.load_memory_variables({})["chat_history"] prompt = "\n".join([prefix, suffix, prompt, "", f"{ai_prefix}:"]).format( chat_history=chat_history ) llm = OpenAI(temperature=temperature) message = llm(prompt) message = extract_first_message(message) model.memory.chat_memory.add_ai_message(message) return message
[ "langchain.OpenAI" ]
[((627, 656), 'textwrap.dedent', 'dedent', (['inspirational_thought'], {}), '(inspirational_thought)\n', (633, 656), False, 'from textwrap import dedent\n'), ((912, 981), 'utils.format_prompt_components_without_tools', 'format_prompt_components_without_tools', (['ai_settings', 'contact_settings'], {}), '(ai_settings, contact_settings)\n', (950, 981), False, 'from utils import format_prompt_components_without_tools\n'), ((1199, 1230), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': 'temperature'}), '(temperature=temperature)\n', (1205, 1230), False, 'from langchain import OpenAI\n')]
"""VectorStore wrapper around a Postgres/PGVector database.""" from __future__ import annotations import enum import logging import uuid from typing import Any, Dict, Iterable, List, Optional, Tuple, Type import sqlalchemy from pgvector.sqlalchemy import Vector from sqlalchemy.dialects.postgresql import JSON, UUID from sqlalchemy.orm import Session, declarative_base, relationship from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore Base = declarative_base() # type: Any ADA_TOKEN_COUNT = 1536 _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain" class BaseModel(Base): __abstract__ = True uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) class CollectionStore(BaseModel): __tablename__ = "langchain_pg_collection" name = sqlalchemy.Column(sqlalchemy.String) cmetadata = sqlalchemy.Column(JSON) embeddings = relationship( "EmbeddingStore", back_populates="collection", passive_deletes=True, ) @classmethod def get_by_name(cls, session: Session, name: str) -> Optional["CollectionStore"]: return session.query(cls).filter(cls.name == name).first() # type: ignore @classmethod def get_or_create( cls, session: Session, name: str, cmetadata: Optional[dict] = None, ) -> Tuple["CollectionStore", bool]: """ Get or create a collection. Returns [Collection, bool] where the bool is True if the collection was created. """ created = False collection = cls.get_by_name(session, name) if collection: return collection, created collection = cls(name=name, cmetadata=cmetadata) session.add(collection) session.commit() created = True return collection, created class EmbeddingStore(BaseModel): __tablename__ = "langchain_pg_embedding" collection_id = sqlalchemy.Column( UUID(as_uuid=True), sqlalchemy.ForeignKey( f"{CollectionStore.__tablename__}.uuid", ondelete="CASCADE", ), ) collection = relationship(CollectionStore, back_populates="embeddings") embedding: Vector = sqlalchemy.Column(Vector(ADA_TOKEN_COUNT)) document = sqlalchemy.Column(sqlalchemy.String, nullable=True) cmetadata = sqlalchemy.Column(JSON, nullable=True) # custom_id : any user defined id custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True) class QueryResult: EmbeddingStore: EmbeddingStore distance: float class DistanceStrategy(str, enum.Enum): EUCLIDEAN = EmbeddingStore.embedding.l2_distance COSINE = EmbeddingStore.embedding.cosine_distance MAX_INNER_PRODUCT = EmbeddingStore.embedding.max_inner_product DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.EUCLIDEAN class PGVector(VectorStore): """ VectorStore implementation using Postgres and pgvector. - `connection_string` is a postgres connection string. - `embedding_function` any embedding function implementing `langchain.embeddings.base.Embeddings` interface. - `collection_name` is the name of the collection to use. (default: langchain) - NOTE: This is not the name of the table, but the name of the collection. The tables will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) - `EUCLIDEAN` is the euclidean distance. - `COSINE` is the cosine distance. - `pre_delete_collection` if True, will delete the collection if it exists. (default: False) - Useful for testing. """ def __init__( self, connection_string: str, embedding_function: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, collection_metadata: Optional[dict] = None, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None, ) -> None: self.connection_string = connection_string self.embedding_function = embedding_function self.collection_name = collection_name self.collection_metadata = collection_metadata self.distance_strategy = distance_strategy self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.__post_init__() def __post_init__( self, ) -> None: """ Initialize the store. """ self._conn = self.connect() # self.create_vector_extension() self.create_tables_if_not_exists() self.create_collection() def connect(self) -> sqlalchemy.engine.Connection: engine = sqlalchemy.create_engine(self.connection_string) conn = engine.connect() return conn def create_vector_extension(self) -> None: try: with Session(self._conn) as session: statement = sqlalchemy.text("CREATE EXTENSION IF NOT EXISTS vector") session.execute(statement) session.commit() except Exception as e: self.logger.exception(e) def create_tables_if_not_exists(self) -> None: with self._conn.begin(): Base.metadata.create_all(self._conn) def drop_tables(self) -> None: with self._conn.begin(): Base.metadata.drop_all(self._conn) def create_collection(self) -> None: if self.pre_delete_collection: self.delete_collection() with Session(self._conn) as session: CollectionStore.get_or_create( session, self.collection_name, cmetadata=self.collection_metadata ) def delete_collection(self) -> None: self.logger.debug("Trying to delete collection") with Session(self._conn) as session: collection = self.get_collection(session) if not collection: self.logger.warning("Collection not found") return session.delete(collection) session.commit() def get_collection(self, session: Session) -> Optional["CollectionStore"]: return CollectionStore.get_by_name(session, self.collection_name) @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DistanceStrategy.COSINE, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store def add_embeddings( self, texts: List[str], embeddings: List[List[float]], metadatas: List[dict], ids: List[str], **kwargs: Any, ) -> None: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ with Session(self._conn) as session: collection = self.get_collection(session) if not collection: raise ValueError("Collection not found") for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): embedding_store = EmbeddingStore( embedding=embedding, document=text, cmetadata=metadata, custom_id=id, ) collection.embeddings.append(embedding_store) session.add(embedding_store) session.commit() def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] with Session(self._conn) as session: collection = self.get_collection(session) if not collection: raise ValueError("Collection not found") for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): embedding_store = EmbeddingStore( embedding=embedding, document=text, cmetadata=metadata, custom_id=id, ) collection.embeddings.append(embedding_store) session.add(embedding_store) session.commit() return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with PGVector with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, ) def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: with Session(self._conn) as session: collection = self.get_collection(session) if not collection: raise ValueError("Collection not found") filter_by = EmbeddingStore.collection_id == collection.uuid if filter is not None: filter_clauses = [] for key, value in filter.items(): IN = "in" if isinstance(value, dict) and IN in map(str.lower, value): value_case_insensitive = { k.lower(): v for k, v in value.items() } filter_by_metadata = EmbeddingStore.cmetadata[key].astext.in_( value_case_insensitive[IN] ) filter_clauses.append(filter_by_metadata) else: filter_by_metadata = EmbeddingStore.cmetadata[ key ].astext == str(value) filter_clauses.append(filter_by_metadata) filter_by = sqlalchemy.and_(filter_by, *filter_clauses) results: List[QueryResult] = ( session.query( EmbeddingStore, self.distance_strategy(embedding).label("distance"), # type: ignore ) .filter(filter_by) .order_by(sqlalchemy.asc("distance")) .join( CollectionStore, EmbeddingStore.collection_id == CollectionStore.uuid, ) .limit(k) .all() ) docs = [ ( Document( page_content=result.EmbeddingStore.document, metadata=result.EmbeddingStore.cmetadata, ), result.distance if self.embedding_function is not None else None, ) for result in results ] return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] @classmethod def from_texts( cls: Type[PGVector], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DistanceStrategy.COSINE, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DistanceStrategy.COSINE, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """Construct PGVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. Example: .. code-block:: python from langchain import PGVector from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = PGVector.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_existing_index( cls: Type[PGVector], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DistanceStrategy.COSINE, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """ Get intsance of an existing PGVector store.This method will return the instance of the store without inserting any new embeddings """ connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, ) return store @classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs, key="connection_string", env_key="PGVECTOR_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Postgres connection string is required" "Either pass it as a parameter" "or set the PGVECTOR_CONNECTION_STRING environment variable." ) return connection_string @classmethod def from_documents( cls: Type[PGVector], documents: List[Document], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """ Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, distance_strategy=distance_strategy, metadatas=metadatas, ids=ids, collection_name=collection_name, **kwargs, ) @classmethod def connection_string_from_db_params( cls, driver: str, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}"
[ "langchain.utils.get_from_dict_or_env", "langchain.docstore.document.Document" ]
[((593, 611), 'sqlalchemy.orm.declarative_base', 'declarative_base', ([], {}), '()\n', (609, 611), False, 'from sqlalchemy.orm import Session, declarative_base, relationship\n'), ((929, 965), 'sqlalchemy.Column', 'sqlalchemy.Column', (['sqlalchemy.String'], {}), '(sqlalchemy.String)\n', (946, 965), False, 'import sqlalchemy\n'), ((982, 1005), 'sqlalchemy.Column', 'sqlalchemy.Column', (['JSON'], {}), '(JSON)\n', (999, 1005), False, 'import sqlalchemy\n'), ((1024, 1110), 'sqlalchemy.orm.relationship', 'relationship', (['"""EmbeddingStore"""'], {'back_populates': '"""collection"""', 'passive_deletes': '(True)'}), "('EmbeddingStore', back_populates='collection', passive_deletes\n =True)\n", (1036, 1110), False, 'from sqlalchemy.orm import Session, declarative_base, relationship\n'), ((2264, 2322), 'sqlalchemy.orm.relationship', 'relationship', (['CollectionStore'], {'back_populates': '"""embeddings"""'}), "(CollectionStore, back_populates='embeddings')\n", (2276, 2322), False, 'from sqlalchemy.orm import Session, declarative_base, relationship\n'), ((2406, 2457), 'sqlalchemy.Column', 'sqlalchemy.Column', (['sqlalchemy.String'], {'nullable': '(True)'}), '(sqlalchemy.String, nullable=True)\n', (2423, 2457), False, 'import sqlalchemy\n'), ((2474, 2512), 'sqlalchemy.Column', 'sqlalchemy.Column', (['JSON'], {'nullable': '(True)'}), '(JSON, nullable=True)\n', (2491, 2512), False, 'import sqlalchemy\n'), ((2568, 2619), 'sqlalchemy.Column', 'sqlalchemy.Column', (['sqlalchemy.String'], {'nullable': '(True)'}), '(sqlalchemy.String, nullable=True)\n', (2585, 2619), False, 'import sqlalchemy\n'), ((777, 795), 'sqlalchemy.dialects.postgresql.UUID', 'UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (781, 795), False, 'from sqlalchemy.dialects.postgresql import JSON, UUID\n'), ((2094, 2112), 'sqlalchemy.dialects.postgresql.UUID', 'UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (2098, 2112), False, 'from sqlalchemy.dialects.postgresql import JSON, UUID\n'), ((2122, 2209), 'sqlalchemy.ForeignKey', 'sqlalchemy.ForeignKey', (['f"""{CollectionStore.__tablename__}.uuid"""'], {'ondelete': '"""CASCADE"""'}), "(f'{CollectionStore.__tablename__}.uuid', ondelete=\n 'CASCADE')\n", (2143, 2209), False, 'import sqlalchemy\n'), ((2366, 2389), 'pgvector.sqlalchemy.Vector', 'Vector', (['ADA_TOKEN_COUNT'], {}), '(ADA_TOKEN_COUNT)\n', (2372, 2389), False, 'from pgvector.sqlalchemy import Vector\n'), ((5035, 5083), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['self.connection_string'], {}), '(self.connection_string)\n', (5059, 5083), False, 'import sqlalchemy\n'), ((18474, 18575), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', ([], {'data': 'kwargs', 'key': '"""connection_string"""', 'env_key': '"""PGVECTOR_CONNECTION_STRING"""'}), "(data=kwargs, key='connection_string', env_key=\n 'PGVECTOR_CONNECTION_STRING')\n", (18494, 18575), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4645, 4672), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4662, 4672), False, 'import logging\n'), ((5856, 5875), 'sqlalchemy.orm.Session', 'Session', (['self._conn'], {}), '(self._conn)\n', (5863, 5875), False, 'from sqlalchemy.orm import Session, declarative_base, relationship\n'), ((6139, 6158), 'sqlalchemy.orm.Session', 'Session', (['self._conn'], {}), '(self._conn)\n', (6146, 6158), False, 'from sqlalchemy.orm import Session, declarative_base, relationship\n'), ((8181, 8200), 'sqlalchemy.orm.Session', 'Session', (['self._conn'], {}), '(self._conn)\n', (8188, 8200), False, 'from sqlalchemy.orm import Session, declarative_base, relationship\n'), ((9616, 9635), 'sqlalchemy.orm.Session', 'Session', (['self._conn'], {}), '(self._conn)\n', (9623, 9635), False, 'from sqlalchemy.orm import Session, declarative_base, relationship\n'), ((11942, 11961), 'sqlalchemy.orm.Session', 'Session', (['self._conn'], {}), '(self._conn)\n', (11949, 11961), False, 'from sqlalchemy.orm import Session, declarative_base, relationship\n'), ((5214, 5233), 'sqlalchemy.orm.Session', 'Session', (['self._conn'], {}), '(self._conn)\n', (5221, 5233), False, 'from sqlalchemy.orm import Session, declarative_base, relationship\n'), ((5274, 5330), 'sqlalchemy.text', 'sqlalchemy.text', (['"""CREATE EXTENSION IF NOT EXISTS vector"""'], {}), "('CREATE EXTENSION IF NOT EXISTS vector')\n", (5289, 5330), False, 'import sqlalchemy\n'), ((13070, 13113), 'sqlalchemy.and_', 'sqlalchemy.and_', (['filter_by', '*filter_clauses'], {}), '(filter_by, *filter_clauses)\n', (13085, 13113), False, 'import sqlalchemy\n'), ((13684, 13784), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'result.EmbeddingStore.document', 'metadata': 'result.EmbeddingStore.cmetadata'}), '(page_content=result.EmbeddingStore.document, metadata=result.\n EmbeddingStore.cmetadata)\n', (13692, 13784), False, 'from langchain.docstore.document import Document\n'), ((7065, 7077), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (7075, 7077), False, 'import uuid\n'), ((9426, 9438), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (9436, 9438), False, 'import uuid\n'), ((13393, 13419), 'sqlalchemy.asc', 'sqlalchemy.asc', (['"""distance"""'], {}), "('distance')\n", (13407, 13419), False, 'import sqlalchemy\n')]
import tempfile import time import os from utils import compute_sha1_from_file from langchain.schema import Document import streamlit as st from langchain.text_splitter import RecursiveCharacterTextSplitter from typing import List from sqlite3 import Connection from verse.sqlite_helper import * def update_metadata(conn: Connection, docs_with_metadata: List[Document]): insert_tuple = list( set( map( lambda x: ( hash(x.metadata["file_sha1"]), x.metadata["file_sha1"], x.metadata["file_name"], ), docs_with_metadata, ) ) ) insertmany(conn=conn, datalist=insert_tuple) def process_file( conn: Connection, file, loader_class, file_suffix, stats_db=None ) -> List[Document]: documents = [] file_name = file.name file_size = file.size if st.secrets.self_hosted == "false": if file_size > 1000000: st.error( "File size is too large. Please upload a file smaller than 1MB or self host." ) return dateshort = time.strftime("%Y%m%d") with tempfile.NamedTemporaryFile(delete=False, suffix=file_suffix) as tmp_file: tmp_file.write(file.getvalue()) tmp_file.flush() loader = loader_class(tmp_file.name) documents = loader.load() file_sha1 = compute_sha1_from_file(tmp_file.name) os.remove(tmp_file.name) chunk_size = st.session_state["chunk_size"] chunk_overlap = st.session_state["chunk_overlap"] print(f"Chunk Size {chunk_size} Overlap {chunk_overlap}") text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=["\n\n", ""] ) documents = text_splitter.split_documents(documents) # Add the document sha1 as metadata to each document docs_with_metadata = [ Document( page_content=doc.page_content, metadata={ "file_sha1": file_sha1, "file_size": file_size, "file_name": file_name, "chunk_size": chunk_size, "chunk_overlap": chunk_overlap, "date": dateshort, "file_type": file_suffix, "page": doc.metadata["page"], "dbsource": doc.metadata["source"] }, ) for doc in documents ] return docs_with_metadata
[ "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder", "langchain.schema.Document" ]
[((1152, 1175), 'time.strftime', 'time.strftime', (['"""%Y%m%d"""'], {}), "('%Y%m%d')\n", (1165, 1175), False, 'import time\n'), ((1468, 1492), 'os.remove', 'os.remove', (['tmp_file.name'], {}), '(tmp_file.name)\n', (1477, 1492), False, 'import os\n'), ((1679, 1812), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separators': "['\\n\\n', '']"}), "(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, separators=['\\n\\n', ''])\n", (1731, 1812), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1185, 1246), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': 'file_suffix'}), '(delete=False, suffix=file_suffix)\n', (1212, 1246), False, 'import tempfile\n'), ((1425, 1462), 'utils.compute_sha1_from_file', 'compute_sha1_from_file', (['tmp_file.name'], {}), '(tmp_file.name)\n', (1447, 1462), False, 'from utils import compute_sha1_from_file\n'), ((1973, 2281), 'langchain.schema.Document', 'Document', ([], {'page_content': 'doc.page_content', 'metadata': "{'file_sha1': file_sha1, 'file_size': file_size, 'file_name': file_name,\n 'chunk_size': chunk_size, 'chunk_overlap': chunk_overlap, 'date':\n dateshort, 'file_type': file_suffix, 'page': doc.metadata['page'],\n 'dbsource': doc.metadata['source']}"}), "(page_content=doc.page_content, metadata={'file_sha1': file_sha1,\n 'file_size': file_size, 'file_name': file_name, 'chunk_size':\n chunk_size, 'chunk_overlap': chunk_overlap, 'date': dateshort,\n 'file_type': file_suffix, 'page': doc.metadata['page'], 'dbsource': doc\n .metadata['source']})\n", (1981, 2281), False, 'from langchain.schema import Document\n'), ((998, 1095), 'streamlit.error', 'st.error', (['"""File size is too large. Please upload a file smaller than 1MB or self host."""'], {}), "(\n 'File size is too large. Please upload a file smaller than 1MB or self host.'\n )\n", (1006, 1095), True, 'import streamlit as st\n')]
import json import logging from typing import Any, Dict, Iterator, List, Optional import requests from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.pydantic_v1 import Field from langchain.schema.output import GenerationChunk logger = logging.getLogger(__name__) class TextGen(LLM): """text-generation-webui models. To use, you should have the text-generation-webui installed, a model loaded, and --api added as a command-line option. Suggested installation, use one-click installer for your OS: https://github.com/oobabooga/text-generation-webui#one-click-installers Parameters below taken from text-generation-webui api example: https://github.com/oobabooga/text-generation-webui/blob/main/api-examples/api-example.py Example: .. code-block:: python from langchain.llms import TextGen llm = TextGen(model_url="http://localhost:8500") """ model_url: str """The full URL to the textgen webui including http[s]://host:port """ preset: Optional[str] = None """The preset to use in the textgen webui """ max_new_tokens: Optional[int] = 250 """The maximum number of tokens to generate.""" do_sample: bool = Field(True, alias="do_sample") """Do sample""" temperature: Optional[float] = 1.3 """Primary factor to control randomness of outputs. 0 = deterministic (only the most likely token is used). Higher value = more randomness.""" top_p: Optional[float] = 0.1 """If not set to 1, select tokens with probabilities adding up to less than this number. Higher value = higher range of possible random results.""" typical_p: Optional[float] = 1 """If not set to 1, select only tokens that are at least this much more likely to appear than random tokens, given the prior text.""" epsilon_cutoff: Optional[float] = 0 # In units of 1e-4 """Epsilon cutoff""" eta_cutoff: Optional[float] = 0 # In units of 1e-4 """ETA cutoff""" repetition_penalty: Optional[float] = 1.18 """Exponential penalty factor for repeating prior tokens. 1 means no penalty, higher value = less repetition, lower value = more repetition.""" top_k: Optional[float] = 40 """Similar to top_p, but select instead only the top_k most likely tokens. Higher value = higher range of possible random results.""" min_length: Optional[int] = 0 """Minimum generation length in tokens.""" no_repeat_ngram_size: Optional[int] = 0 """If not set to 0, specifies the length of token sets that are completely blocked from repeating at all. Higher values = blocks larger phrases, lower values = blocks words or letters from repeating. Only 0 or high values are a good idea in most cases.""" num_beams: Optional[int] = 1 """Number of beams""" penalty_alpha: Optional[float] = 0 """Penalty Alpha""" length_penalty: Optional[float] = 1 """Length Penalty""" early_stopping: bool = Field(False, alias="early_stopping") """Early stopping""" seed: int = Field(-1, alias="seed") """Seed (-1 for random)""" add_bos_token: bool = Field(True, alias="add_bos_token") """Add the bos_token to the beginning of prompts. Disabling this can make the replies more creative.""" truncation_length: Optional[int] = 2048 """Truncate the prompt up to this length. The leftmost tokens are removed if the prompt exceeds this length. Most models require this to be at most 2048.""" ban_eos_token: bool = Field(False, alias="ban_eos_token") """Ban the eos_token. Forces the model to never end the generation prematurely.""" skip_special_tokens: bool = Field(True, alias="skip_special_tokens") """Skip special tokens. Some specific models need this unset.""" stopping_strings: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" streaming: bool = False """Whether to stream the results, token by token.""" @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling textgen.""" return { "max_new_tokens": self.max_new_tokens, "do_sample": self.do_sample, "temperature": self.temperature, "top_p": self.top_p, "typical_p": self.typical_p, "epsilon_cutoff": self.epsilon_cutoff, "eta_cutoff": self.eta_cutoff, "repetition_penalty": self.repetition_penalty, "top_k": self.top_k, "min_length": self.min_length, "no_repeat_ngram_size": self.no_repeat_ngram_size, "num_beams": self.num_beams, "penalty_alpha": self.penalty_alpha, "length_penalty": self.length_penalty, "early_stopping": self.early_stopping, "seed": self.seed, "add_bos_token": self.add_bos_token, "truncation_length": self.truncation_length, "ban_eos_token": self.ban_eos_token, "skip_special_tokens": self.skip_special_tokens, "stopping_strings": self.stopping_strings, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model_url": self.model_url}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "textgen" def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """ Performs sanity check, preparing parameters in format needed by textgen. Args: stop (Optional[List[str]]): List of stop sequences for textgen. Returns: Dictionary containing the combined parameters. """ # Raise error if stop sequences are in both input and default params # if self.stop and stop is not None: if self.stopping_strings and stop is not None: raise ValueError("`stop` found in both the input and default params.") if self.preset is None: params = self._default_params else: params = {"preset": self.preset} # then sets it as configured, or default to an empty list: params["stop"] = self.stopping_strings or stop or [] return params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call the textgen web API and return the output. Args: prompt: The prompt to use for generation. stop: A list of strings to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python from langchain.llms import TextGen llm = TextGen(model_url="http://localhost:5000") llm("Write a story about llamas.") """ if self.streaming: combined_text_output = "" for chunk in self._stream( prompt=prompt, stop=stop, run_manager=run_manager, **kwargs ): combined_text_output += chunk.text print(prompt + combined_text_output) result = combined_text_output else: url = f"{self.model_url}/api/v1/generate" params = self._get_parameters(stop) request = params.copy() request["prompt"] = prompt response = requests.post(url, json=request) if response.status_code == 200: result = response.json()["results"][0]["text"] print(prompt + result) else: print(f"ERROR: response: {response}") result = "" return result def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: """Yields results objects as they are generated in real time. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like objects containing a string token and metadata. See text-generation-webui docs and below for more. Example: .. code-block:: python from langchain.llms import TextGen llm = TextGen( model_url = "ws://localhost:5005" streaming=True ) for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'", stop=["'","\n"]): print(chunk, end='', flush=True) """ try: import websocket except ImportError: raise ImportError( "The `websocket-client` package is required for streaming." ) params = {**self._get_parameters(stop), **kwargs} url = f"{self.model_url}/api/v1/stream" request = params.copy() request["prompt"] = prompt websocket_client = websocket.WebSocket() websocket_client.connect(url) websocket_client.send(json.dumps(request)) while True: result = websocket_client.recv() result = json.loads(result) if result["event"] == "text_stream": chunk = GenerationChunk( text=result["text"], generation_info=None, ) yield chunk elif result["event"] == "stream_end": websocket_client.close() return if run_manager: run_manager.on_llm_new_token(token=chunk.text)
[ "langchain.pydantic_v1.Field", "langchain.schema.output.GenerationChunk" ]
[((303, 330), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (320, 330), False, 'import logging\n'), ((1278, 1308), 'langchain.pydantic_v1.Field', 'Field', (['(True)'], {'alias': '"""do_sample"""'}), "(True, alias='do_sample')\n", (1283, 1308), False, 'from langchain.pydantic_v1 import Field\n'), ((3044, 3080), 'langchain.pydantic_v1.Field', 'Field', (['(False)'], {'alias': '"""early_stopping"""'}), "(False, alias='early_stopping')\n", (3049, 3080), False, 'from langchain.pydantic_v1 import Field\n'), ((3123, 3146), 'langchain.pydantic_v1.Field', 'Field', (['(-1)'], {'alias': '"""seed"""'}), "(-1, alias='seed')\n", (3128, 3146), False, 'from langchain.pydantic_v1 import Field\n'), ((3205, 3239), 'langchain.pydantic_v1.Field', 'Field', (['(True)'], {'alias': '"""add_bos_token"""'}), "(True, alias='add_bos_token')\n", (3210, 3239), False, 'from langchain.pydantic_v1 import Field\n'), ((3589, 3624), 'langchain.pydantic_v1.Field', 'Field', (['(False)'], {'alias': '"""ban_eos_token"""'}), "(False, alias='ban_eos_token')\n", (3594, 3624), False, 'from langchain.pydantic_v1 import Field\n'), ((3745, 3785), 'langchain.pydantic_v1.Field', 'Field', (['(True)'], {'alias': '"""skip_special_tokens"""'}), "(True, alias='skip_special_tokens')\n", (3750, 3785), False, 'from langchain.pydantic_v1 import Field\n'), ((9592, 9613), 'websocket.WebSocket', 'websocket.WebSocket', ([], {}), '()\n', (9611, 9613), False, 'import websocket\n'), ((7631, 7663), 'requests.post', 'requests.post', (['url'], {'json': 'request'}), '(url, json=request)\n', (7644, 7663), False, 'import requests\n'), ((9684, 9703), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (9694, 9703), False, 'import json\n'), ((9792, 9810), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (9802, 9810), False, 'import json\n'), ((9885, 9943), 'langchain.schema.output.GenerationChunk', 'GenerationChunk', ([], {'text': "result['text']", 'generation_info': 'None'}), "(text=result['text'], generation_info=None)\n", (9900, 9943), False, 'from langchain.schema.output import GenerationChunk\n')]
# imports from loguru import logger # LLM modules from langchain_community.llms.huggingface_hub import HuggingFaceHub from langchain_community.llms.ollama import Ollama from langchain_openai import ChatOpenAI, AzureChatOpenAI from langchain.callbacks.manager import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler # local imports import settings_template as settings class LLMCreator(): """ LLM class to import into other modules """ def __init__(self, llm_type=None, llm_model_type=None, local_api_url=None, azureopenai_api_version=None) -> None: self.llm_type = settings.LLM_TYPE if llm_type is None else llm_type self.llm_model_type = settings.LLM_MODEL_TYPE if llm_model_type is None else llm_model_type self.local_api_url = settings.API_URL if local_api_url is None else local_api_url self.azureopenai_api_version = settings.AZUREOPENAI_API_VERSION \ if azureopenai_api_version is None and settings.AZUREOPENAI_API_VERSION is not None \ else azureopenai_api_version def get_llm(self): """ returns, based on settings, the llm object """ # if llm_type is "chatopenai" if self.llm_type == "chatopenai": # default llm_model_type value is "gpt-3.5-turbo" self.llm_model_type = "gpt-3.5-turbo" if self.llm_model_type == "gpt35_16": self.llm_model_type = "gpt-3.5-turbo-16k" elif self.llm_model_type == "gpt4": self.llm_model_type = "gpt-4" self.llm = ChatOpenAI( client=None, model=self.llm_model_type, temperature=0, ) # else, if llm_type is "huggingface" elif self.llm_type == "huggingface": # default value is llama-2, with maximum output length 512 self.llm_model_type = "meta-llama/Llama-2-7b-chat-hf" max_length = 512 if self.llm_model_type == 'GoogleFlan': self.llm_model_type = 'google/flan-t5-base' max_length = 512 self.llm = HuggingFaceHub(repo_id=self.llm_model_type, model_kwargs={"temperature": 0.1, "max_length": max_length} ) # else, if llm_type is "local_llm" elif self.llm_type == "local_llm": logger.info("Use Local LLM") logger.info("Retrieving " + self.llm_model_type) # If API URL is defined, use it if self.local_api_url is not None: logger.info("Using local api url " + self.local_api_url) self.llm = Ollama( model=self.llm_model_type, base_url=self.local_api_url, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]) ) else: self.llm = Ollama( model=self.llm_model_type, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]) ) logger.info("Retrieved " + self.llm_model_type) # else, if llm_type is "azureopenai" elif self.llm_type == "azureopenai": logger.info("Use Azure OpenAI LLM") logger.info("Retrieving " + self.llm_model_type) self.llm = AzureChatOpenAI( azure_deployment=self.llm_model_type, azure_endpoint=self.local_api_url, api_version=self.azureopenai_api_version, ) logger.info("Retrieved " + self.llm_model_type) return self.llm
[ "langchain_openai.AzureChatOpenAI", "langchain_openai.ChatOpenAI", "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler", "langchain_community.llms.huggingface_hub.HuggingFaceHub" ]
[((1610, 1675), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'client': 'None', 'model': 'self.llm_model_type', 'temperature': '(0)'}), '(client=None, model=self.llm_model_type, temperature=0)\n', (1620, 1675), False, 'from langchain_openai import ChatOpenAI, AzureChatOpenAI\n'), ((2163, 2272), 'langchain_community.llms.huggingface_hub.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'self.llm_model_type', 'model_kwargs': "{'temperature': 0.1, 'max_length': max_length}"}), "(repo_id=self.llm_model_type, model_kwargs={'temperature': \n 0.1, 'max_length': max_length})\n", (2177, 2272), False, 'from langchain_community.llms.huggingface_hub import HuggingFaceHub\n'), ((2495, 2523), 'loguru.logger.info', 'logger.info', (['"""Use Local LLM"""'], {}), "('Use Local LLM')\n", (2506, 2523), False, 'from loguru import logger\n'), ((2536, 2584), 'loguru.logger.info', 'logger.info', (["('Retrieving ' + self.llm_model_type)"], {}), "('Retrieving ' + self.llm_model_type)\n", (2547, 2584), False, 'from loguru import logger\n'), ((3206, 3253), 'loguru.logger.info', 'logger.info', (["('Retrieved ' + self.llm_model_type)"], {}), "('Retrieved ' + self.llm_model_type)\n", (3217, 3253), False, 'from loguru import logger\n'), ((2692, 2748), 'loguru.logger.info', 'logger.info', (["('Using local api url ' + self.local_api_url)"], {}), "('Using local api url ' + self.local_api_url)\n", (2703, 2748), False, 'from loguru import logger\n'), ((3356, 3391), 'loguru.logger.info', 'logger.info', (['"""Use Azure OpenAI LLM"""'], {}), "('Use Azure OpenAI LLM')\n", (3367, 3391), False, 'from loguru import logger\n'), ((3404, 3452), 'loguru.logger.info', 'logger.info', (["('Retrieving ' + self.llm_model_type)"], {}), "('Retrieving ' + self.llm_model_type)\n", (3415, 3452), False, 'from loguru import logger\n'), ((3476, 3611), 'langchain_openai.AzureChatOpenAI', 'AzureChatOpenAI', ([], {'azure_deployment': 'self.llm_model_type', 'azure_endpoint': 'self.local_api_url', 'api_version': 'self.azureopenai_api_version'}), '(azure_deployment=self.llm_model_type, azure_endpoint=self.\n local_api_url, api_version=self.azureopenai_api_version)\n', (3491, 3611), False, 'from langchain_openai import ChatOpenAI, AzureChatOpenAI\n'), ((3682, 3729), 'loguru.logger.info', 'logger.info', (["('Retrieved ' + self.llm_model_type)"], {}), "('Retrieved ' + self.llm_model_type)\n", (3693, 3729), False, 'from loguru import logger\n'), ((2934, 2966), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (2964, 2966), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((3141, 3173), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (3171, 3173), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')]
from typing import List from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import Chroma from langchain_core.documents import Document from dotenv import load_dotenv from themind.llm.func_instraction import instruct from pydantic import BaseModel import csv from themind.vectorstores.chunking.question_answer_strategy import QuestionChunkingStrategy from themind.vectorstores.chunking.chunking_strategy import ChunkingStrategy class VectorStore(object): def __init__(self, local_storage_dir: str = "./"): self.vectorstore = Chroma(collection_name="all-data", persist_directory=local_storage_dir, embedding_function=OpenAIEmbeddings()) def ingest(self, uid: str, data: List[str], chunking_strategy: ChunkingStrategy = QuestionChunkingStrategy): # Question & Answear strategy # for each chunk, crete a list a question and answear from the text, similar how embeddings are being trained! for chunk in data: print('Chunk: ' + chunk) docs = chunking_strategy.chunk(uid, chunk) if len(docs) == 0: print('No documents were created for this chunk') continue # append metadata to its document for doc in docs: doc.metadata['uid'] = uid # doc.metadata['location'] = location # doc.metadata['created_at'] = created_at self.vectorstore.add_documents(docs) print('Added chunk to vectorstore') def query(self, uid: str, query: str): output = self.vectorstore.similarity_search(query=query, k=10, filters={"uid": uid}) print(output) @instruct def answer(query: str, texts: List[str]) -> str: """ This was a query user made: {query} This is a context we have: {texts} Reply: """ return answer(query, [o.page_content for o in output]) if __name__ == '__main__': uid = 'test' # Process the CSV data csv_path = "/Users/zvada/Documents/TheMind/themind-memory/data/alex-rivera-ground-truth.csv" with open(csv_path, 'r') as file: sentences = file.read().splitlines() vec = VectorStore() vec.ingest(uid, sentences) # output = vec.query(uid, "what should i give laura for christmas?") output = vec.query(uid, "what is alex's favorite food?") print(output)
[ "langchain.embeddings.OpenAIEmbeddings" ]
[((657, 675), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (673, 675), False, 'from langchain.embeddings import OpenAIEmbeddings\n')]
import re import time import copy import random import numpy as np import multiprocessing import matplotlib.pyplot as plt import modules.prompts as prompts from langchain import PromptTemplate from shapely.ops import substring from shapely.geometry import Polygon, box, Point, LineString class WallObjectGenerator(): def __init__(self, llm, object_retriever): self.json_template = {"assetId": None, "id": None, "kinematic": True, "position": {}, "rotation": {}, "material": None, "roomId": None} self.llm = llm self.object_retriever = object_retriever self.database = object_retriever.database self.constraint_prompt_template = PromptTemplate(input_variables=["room_type", "wall_height", "floor_objects", "wall_objects"], template=prompts.wall_object_constraints_prompt) self.grid_size = 25 self.default_height = 150 self.constraint_type = "llm" def generate_wall_objects(self, scene, use_constraint=True): doors = scene["doors"] windows = scene["windows"] open_walls = scene["open_walls"] wall_height = scene["wall_height"] wall_objects = [] selected_objects = scene["selected_objects"] packed_args = [(room, scene, doors, windows, open_walls, wall_height, selected_objects, use_constraint) for room in scene["rooms"]] pool = multiprocessing.Pool(processes=4) all_placements = pool.map(self.generate_wall_objects_per_room, packed_args) pool.close() pool.join() for placements in all_placements: wall_objects += placements return wall_objects def generate_wall_objects_per_room(self, args): room, scene, doors, windows, open_walls, wall_height, selected_objects, use_constraint = args selected_wall_objects = selected_objects[room["roomType"]]["wall"] selected_wall_objects = self.order_objects_by_size(selected_wall_objects) wall_object_name2id = {object_name: asset_id for object_name, asset_id in selected_wall_objects} room_id = room["id"] room_type = room["roomType"] wall_object_names = list(wall_object_name2id.keys()) floor_object_name2id = {object["object_name"]: object["assetId"] for object in scene["floor_objects"] if object["roomId"] == room["id"]} floor_object_names = list(floor_object_name2id.keys()) # get constraints constraints_prompt = self.constraint_prompt_template.format(room_type=room_type, wall_height=int(wall_height*100), floor_objects=", ".join(floor_object_names), wall_objects=", ".join(wall_object_names)) if self.constraint_type == "llm" and use_constraint: constraint_plan = self.llm(constraints_prompt) else: constraint_plan = "" for object_name in wall_object_names: random_height = random.randint(0, int(wall_height*100)) constraint_plan += f"{object_name} | N/A | {random_height} \n" print(f"\nwall object constraint plan for {room_type}:\n{constraint_plan}") constraints = self.parse_wall_object_constraints(constraint_plan, wall_object_names, floor_object_names) # get wall objects wall_object2dimension = {object_name: self.database[object_id]['assetMetadata']['boundingBox'] for object_name, object_id in wall_object_name2id.items()} wall_objects_list = [(object_name, (wall_object2dimension[object_name]['x'] * 100, wall_object2dimension[object_name]['y'] * 100, wall_object2dimension[object_name]['z'] * 100)) for object_name in constraints] # update constraints with max height wall_object2max_height = {object_name: min(scene["wall_height"] * 100 - wall_object2dimension[object_name]["y"] * 100 - 20, constraints[object_name]["height"]) for object_name in constraints} for object_name in constraints: constraints[object_name]["height"] = max(wall_object2max_height[object_name], 0) # avoid negative height # get initial state room_vertices = [(x * 100, y * 100) for (x, y) in room["vertices"]] room_poly = Polygon(room_vertices) initial_state = self.get_initial_state(scene, doors, windows, room_vertices, open_walls) # solve room_x, room_z = self.get_room_size(room) grid_size = max(room_x // 20, room_z // 20) solver = DFS_Solver_Wall(grid_size=grid_size, max_duration=5, constraint_bouns=100) solutions = solver.get_solution(room_poly, wall_objects_list, constraints, initial_state) placements = self.solution2placement(solutions, wall_object_name2id, room_id) return placements def parse_wall_object_constraints(self, constraint_text, wall_object_names, floor_object_names): object2constraints = {} lines = [line.lower() for line in constraint_text.split('\n') if "|" in line] for line in lines: # remove index pattern = re.compile(r'^\d+\.\s*') line = pattern.sub('', line) if line[-1] == ".": line = line[:-1] # remove the last period try: object_name, location, height = line.split("|") object_name = object_name.replace("*", "").strip() location = location.strip() height = height.strip() except: print(f"Warning: cannot parse {line}.") continue if object_name not in wall_object_names: continue try: target_floor_object_name = location.split(", ")[-1] except: print(f"Warning: cannot parse {location}."); target_floor_object_name = None try: height = int(height) except: height = self.default_height if target_floor_object_name in floor_object_names: object2constraints[object_name] = {"target_floor_object_name": target_floor_object_name, "height": height} else: object2constraints[object_name] = {"target_floor_object_name": None, "height": height} return object2constraints def get_room_size(self, room): floor_polygon = room["floorPolygon"] x_values = [point['x'] for point in floor_polygon] z_values = [point['z'] for point in floor_polygon] return (int(max(x_values) - min(x_values)) * 100, int(max(z_values) - min(z_values)) * 100) def check_wall_object_size(self, room_size, object_size): if object_size["x"] * 100 > max(room_size) * 0.5: print(f"Warning: object size {object_size} is too large for room size {room_size}.") return False else: return True def get_initial_state(self, scene, doors, windows, room_vertices, open_walls): room_poly = Polygon(room_vertices) initial_state = {} i = 0 for door in doors: door_boxes = door["doorBoxes"] for door_box in door_boxes: door_vertices = [(x * 100, z * 100) for (x, z) in door_box] door_poly = Polygon(door_vertices) door_center = door_poly.centroid if room_poly.contains(door_center): door_height = door["assetPosition"]["y"] * 100 * 2 x_min, z_min, x_max, z_max = door_poly.bounds initial_state[f"door-{i}"] = ((x_min, 0, z_min), (x_max, door_height, z_max), 0, door_vertices, 1) i += 1 for window in windows: window_boxes = window["windowBoxes"] for window_box in window_boxes: window_vertices = [(x * 100, z * 100) for (x, z) in window_box] window_poly = Polygon(window_vertices) window_center = window_poly.centroid if room_poly.contains(window_center): y_min = window["holePolygon"][0]["y"] * 100 y_max = window["holePolygon"][1]["y"] * 100 x_min, z_min, x_max, z_max = window_poly.bounds initial_state[f"window-{i}"] = ((x_min, y_min, z_min), (x_max, y_max, z_max), 0, window_vertices, 1) i += 1 if len(open_walls) != 0: open_wall_boxes = open_walls["openWallBoxes"] for open_wall_box in open_wall_boxes: open_wall_vertices = [(x * 100, z * 100) for (x, z) in open_wall_box] open_wall_poly = Polygon(open_wall_vertices) open_wall_center = open_wall_poly.centroid if room_poly.contains(open_wall_center): x_min, z_min, x_max, z_max = open_wall_poly.bounds initial_state[f"open-{i}"] = ((x_min, 0, z_min), (x_max, scene["wall_height"] * 100, z_max), 0, open_wall_vertices, 1) i += 1 for object in scene["floor_objects"]: try: object_vertices = object["vertices"] except: continue object_poly = Polygon(object_vertices) object_center = object_poly.centroid if room_poly.contains(object_center): object_height = object["position"]["y"] * 100 * 2 # the height should be twice the value of the y coordinate x_min, z_min, x_max, z_max = object_poly.bounds initial_state[object["object_name"]] = ((x_min, 0, z_min), (x_max, object_height, z_max), object["rotation"]["y"], object_vertices, 1) return initial_state def solution2placement(self, solutions, wall_object_name2id, room_id): placements = [] for object_name, solution in solutions.items(): if object_name not in wall_object_name2id: continue placement = self.json_template.copy() placement["assetId"] = wall_object_name2id[object_name] placement["id"] = f"{object_name} ({room_id})" position_x = (solution[0][0] + solution[1][0]) / 200 position_y = (solution[0][1] + solution[1][1]) / 200 position_z = (solution[0][2] + solution[1][2]) / 200 placement["position"] = {"x": position_x, "y": position_y, "z": position_z} placement["rotation"] = {"x": 0, "y": solution[2], "z": 0} # move the object a little bit to avoid collision if placement["rotation"]["y"] == 0: placement["position"]["z"] += 0.01 elif placement["rotation"]["y"] == 90: placement["position"]["x"] += 0.01 elif placement["rotation"]["y"]== 180: placement["position"]["z"] -= 0.01 elif placement["rotation"]["y"] == 270: placement["position"]["x"] -= 0.01 placement["roomId"] = room_id placement["vertices"] = list(solution[3]) placement["object_name"] = object_name placements.append(placement) return placements def order_objects_by_size(self, selected_wall_objects): ordered_wall_objects = [] for object_name, asset_id in selected_wall_objects: dimensions = self.database[asset_id]['assetMetadata']['boundingBox'] size = dimensions["x"] ordered_wall_objects.append([object_name, asset_id, size]) ordered_wall_objects.sort(key=lambda x: x[2], reverse=True) ordered_wall_objects_no_size = [[object_name, asset_id] for object_name, asset_id, size in ordered_wall_objects] return ordered_wall_objects_no_size class SolutionFound(Exception): def __init__(self, solution): self.solution = solution pass class DFS_Solver_Wall(): def __init__(self, grid_size, random_seed=0, max_duration=5, constraint_bouns=100): self.grid_size = grid_size self.random_seed = random_seed self.max_duration = max_duration # maximum allowed time in seconds self.constraint_bouns = constraint_bouns self.start_time = None self.solutions = [] self.visualize = False def get_solution(self, room_poly, wall_objects_list, constraints, initial_state): grid_points = self.create_grids(room_poly) self.start_time = time.time() try: self.dfs(room_poly, wall_objects_list, constraints, grid_points, initial_state) except SolutionFound as e: print(f"Time taken: {time.time() - self.start_time}") max_solution = self.get_max_solution(self.solutions) if self.visualize: self.visualize_grid(room_poly, grid_points, max_solution) return max_solution def get_max_solution(self, solutions): path_weights = [] for solution in solutions: path_weights.append(sum([obj[-1] for obj in solution.values()])) max_index = np.argmax(path_weights) return solutions[max_index] def dfs(self, room_poly, wall_objects_list, constraints, grid_points, placed_objects): if len(wall_objects_list) == 0: self.solutions.append(placed_objects) return placed_objects if time.time() - self.start_time > self.max_duration: print(f"Time limit reached.") raise SolutionFound(self.solutions) object_name, object_dim = wall_objects_list[0] placements = self.get_possible_placements(room_poly, object_dim, constraints[object_name], grid_points, placed_objects) if len(placements) == 0: self.solutions.append(placed_objects) paths = [] for placement in placements: placed_objects_updated = copy.deepcopy(placed_objects) placed_objects_updated[object_name] = placement sub_paths = self.dfs(room_poly, wall_objects_list[1:], constraints, grid_points, placed_objects_updated) paths.extend(sub_paths) return paths def get_possible_placements(self, room_poly, object_dim, constraint, grid_points, placed_objects): all_solutions = self.filter_collision(placed_objects, self.get_all_solutions(room_poly, grid_points, object_dim, constraint["height"])) random.shuffle(all_solutions) target_floor_object_name = constraint["target_floor_object_name"] if target_floor_object_name is not None and target_floor_object_name in placed_objects: all_solutions = self.score_solution_by_distance(all_solutions, placed_objects[target_floor_object_name]) # order solutions by distance to target floor object all_solutions = sorted(all_solutions, key=lambda x: x[-1], reverse=True) return all_solutions def create_grids(self, room_poly): # Get the coordinates of the polygon poly_coords = list(room_poly.exterior.coords) grid_points = [] # Iterate over each pair of points (edges of the polygon) for i in range(len(poly_coords) - 1): line = LineString([poly_coords[i], poly_coords[i + 1]]) line_length = line.length # Create points along the edge at intervals of grid size for j in range(0, int(line_length), self.grid_size): point_on_line = substring(line, j, j) # Get a point at distance j from the start of the line if point_on_line: grid_points.append((point_on_line.x, point_on_line.y)) return grid_points def get_all_solutions(self, room_poly, grid_points, object_dim, height): obj_length, obj_height, obj_width = object_dim obj_half_length = obj_length / 2 rotation_adjustments = { 0: ((-obj_half_length, 0), (obj_half_length, obj_width)), 90: ((0, -obj_half_length), (obj_width, obj_half_length)), 180: ((-obj_half_length, -obj_width), (obj_half_length, 0)), 270: ((-obj_width, -obj_half_length), (0, obj_half_length)) } solutions = [] for rotation in [0, 90, 180, 270]: for point in grid_points: center_x, center_y = point lower_left_adjustment, upper_right_adjustment = rotation_adjustments[rotation] lower_left = (center_x + lower_left_adjustment[0], center_y + lower_left_adjustment[1]) upper_right = (center_x + upper_right_adjustment[0], center_y + upper_right_adjustment[1]) obj_box = box(*lower_left, *upper_right) if room_poly.contains(obj_box): object_coords = obj_box.exterior.coords[:] coordinates_on_edge = [coord for coord in object_coords if room_poly.boundary.contains(Point(coord))] coordinates_on_edge = list(set(coordinates_on_edge)) if len(coordinates_on_edge) >= 2: vertex_min = (lower_left[0], height, lower_left[1]) vertex_max = (upper_right[0], height + obj_height, upper_right[1]) solutions.append([vertex_min, vertex_max, rotation, tuple(obj_box.exterior.coords[:]), 1]) return solutions def filter_collision(self, placed_objects, solutions): def intersect_3d(box1, box2): # box1 and box2 are dictionaries with 'min' and 'max' keys, # which are tuples representing the minimum and maximum corners of the 3D box. for i in range(3): if box1['max'][i] < box2['min'][i] or box1['min'][i] > box2['max'][i]: return False return True valid_solutions = [] boxes = [{"min": vertex_min, "max": vertex_max} for vertex_min, vertex_max, rotation, box_coords, path_weight in placed_objects.values()] for solution in solutions: for box in boxes: if intersect_3d(box, {"min": solution[0], "max": solution[1]}): break else: valid_solutions.append(solution) return valid_solutions def score_solution_by_distance(self, solutions, target_object): distances = [] scored_solutions = [] for solution in solutions: center_x, center_y, center_z = (solution[0][0]+solution[1][0])/2, (solution[0][1]+solution[1][1])/2, (solution[0][2]+solution[1][2])/2 target_x, target_y, target_z = (target_object[0][0]+target_object[1][0])/2, (target_object[0][1]+target_object[1][1])/2, (target_object[0][2]+target_object[1][2])/2 distance = np.sqrt((center_x - target_x)**2 + (center_y - target_y)**2 + (center_z - target_z)**2) distances.append(distance) scored_solution = solution.copy() scored_solution[-1] = solution[-1] + self.constraint_bouns * (1/distance) scored_solutions.append(scored_solution) return scored_solutions def visualize_grid(self, room_poly, grid_points, solutions): # create a new figure fig, ax = plt.subplots() # draw the room x, y = room_poly.exterior.xy ax.plot(x, y, 'b-', label='Room') # draw the grid points grid_x = [point[0] for point in grid_points] grid_y = [point[1] for point in grid_points] ax.plot(grid_x, grid_y, 'ro', markersize=2) # draw the solutions for object_name, solution in solutions.items(): vertex_min, vertex_max, rotation, box_coords = solution[:-1] center_x, center_y = (vertex_min[0]+vertex_max[0])/2, (vertex_min[2]+vertex_max[2])/2 # create a polygon for the solution obj_poly = Polygon(box_coords) x, y = obj_poly.exterior.xy ax.plot(x, y, 'g-', linewidth=2) ax.text(center_x, center_y, object_name, fontsize=12, ha='center') # set arrow direction based on rotation if rotation == 0: ax.arrow(center_x, center_y, 0, 25, head_width=10, fc='g') elif rotation == 90: ax.arrow(center_x, center_y, 25, 0, head_width=10, fc='g') elif rotation == 180: ax.arrow(center_x, center_y, 0, -25, head_width=10, fc='g') elif rotation == 270: ax.arrow(center_x, center_y, -25, 0, head_width=10, fc='g') ax.set_aspect('equal', 'box') # to keep the ratios equal along x and y axis plt.show()
[ "langchain.PromptTemplate" ]
[((704, 850), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['room_type', 'wall_height', 'floor_objects', 'wall_objects']", 'template': 'prompts.wall_object_constraints_prompt'}), "(input_variables=['room_type', 'wall_height', 'floor_objects',\n 'wall_objects'], template=prompts.wall_object_constraints_prompt)\n", (718, 850), False, 'from langchain import PromptTemplate\n'), ((1447, 1480), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': '(4)'}), '(processes=4)\n', (1467, 1480), False, 'import multiprocessing\n'), ((4462, 4484), 'shapely.geometry.Polygon', 'Polygon', (['room_vertices'], {}), '(room_vertices)\n', (4469, 4484), False, 'from shapely.geometry import Polygon, box, Point, LineString\n'), ((7172, 7194), 'shapely.geometry.Polygon', 'Polygon', (['room_vertices'], {}), '(room_vertices)\n', (7179, 7194), False, 'from shapely.geometry import Polygon, box, Point, LineString\n'), ((12562, 12573), 'time.time', 'time.time', ([], {}), '()\n', (12571, 12573), False, 'import time\n'), ((13175, 13198), 'numpy.argmax', 'np.argmax', (['path_weights'], {}), '(path_weights)\n', (13184, 13198), True, 'import numpy as np\n'), ((14518, 14547), 'random.shuffle', 'random.shuffle', (['all_solutions'], {}), '(all_solutions)\n', (14532, 14547), False, 'import random\n'), ((19373, 19387), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19385, 19387), True, 'import matplotlib.pyplot as plt\n'), ((20776, 20786), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20784, 20786), True, 'import matplotlib.pyplot as plt\n'), ((5311, 5337), 're.compile', 're.compile', (['"""^\\\\d+\\\\.\\\\s*"""'], {}), "('^\\\\d+\\\\.\\\\s*')\n", (5321, 5337), False, 'import re\n'), ((9403, 9427), 'shapely.geometry.Polygon', 'Polygon', (['object_vertices'], {}), '(object_vertices)\n', (9410, 9427), False, 'from shapely.geometry import Polygon, box, Point, LineString\n'), ((13991, 14020), 'copy.deepcopy', 'copy.deepcopy', (['placed_objects'], {}), '(placed_objects)\n', (14004, 14020), False, 'import copy\n'), ((15311, 15359), 'shapely.geometry.LineString', 'LineString', (['[poly_coords[i], poly_coords[i + 1]]'], {}), '([poly_coords[i], poly_coords[i + 1]])\n', (15321, 15359), False, 'from shapely.geometry import Polygon, box, Point, LineString\n'), ((18902, 18999), 'numpy.sqrt', 'np.sqrt', (['((center_x - target_x) ** 2 + (center_y - target_y) ** 2 + (center_z -\n target_z) ** 2)'], {}), '((center_x - target_x) ** 2 + (center_y - target_y) ** 2 + (center_z -\n target_z) ** 2)\n', (18909, 18999), True, 'import numpy as np\n'), ((20011, 20030), 'shapely.geometry.Polygon', 'Polygon', (['box_coords'], {}), '(box_coords)\n', (20018, 20030), False, 'from shapely.geometry import Polygon, box, Point, LineString\n'), ((7450, 7472), 'shapely.geometry.Polygon', 'Polygon', (['door_vertices'], {}), '(door_vertices)\n', (7457, 7472), False, 'from shapely.geometry import Polygon, box, Point, LineString\n'), ((8100, 8124), 'shapely.geometry.Polygon', 'Polygon', (['window_vertices'], {}), '(window_vertices)\n', (8107, 8124), False, 'from shapely.geometry import Polygon, box, Point, LineString\n'), ((8845, 8872), 'shapely.geometry.Polygon', 'Polygon', (['open_wall_vertices'], {}), '(open_wall_vertices)\n', (8852, 8872), False, 'from shapely.geometry import Polygon, box, Point, LineString\n'), ((13472, 13483), 'time.time', 'time.time', ([], {}), '()\n', (13481, 13483), False, 'import time\n'), ((15565, 15586), 'shapely.ops.substring', 'substring', (['line', 'j', 'j'], {}), '(line, j, j)\n', (15574, 15586), False, 'from shapely.ops import substring\n'), ((16776, 16806), 'shapely.geometry.box', 'box', (['*lower_left', '*upper_right'], {}), '(*lower_left, *upper_right)\n', (16779, 16806), False, 'from shapely.geometry import Polygon, box, Point, LineString\n'), ((12747, 12758), 'time.time', 'time.time', ([], {}), '()\n', (12756, 12758), False, 'import time\n'), ((17026, 17038), 'shapely.geometry.Point', 'Point', (['coord'], {}), '(coord)\n', (17031, 17038), False, 'from shapely.geometry import Polygon, box, Point, LineString\n')]
from langchain_openai import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain.prompts import PromptTemplate from langchain.prompts.chat import ChatPromptTemplate from config.config import OPENAI_API_KEY from game.poker import PokerGameManager from db.db_utils import DatabaseManager import json class GPTPlayer: def __init__(self, db: DatabaseManager, model_name="gpt-3.5-turbo"): self.db = db llm = ChatOpenAI(model_name=model_name) output_parser = StrOutputParser() template = ''' Imagine you're a poker bot in a heads-up Texas Hold'em game. Your play is optimal, mixing strategic bluffs and strong hands. You raise on strength, going All-in only with the best hands. Folding against a superior opponent hand, you call and check when fitting. Remember, only "call" the ALL-IN if your hand is better. Please reply in the following JSON format: {{your_hand": "what is the current hand you are playing", "opponents_hand": "what do you think your opponent has based on how he has played", "thought_process": "what is your thought process", "action": "your action", "raise_amount": your raise amount if applicable}} Note: If the action you chose doesn't involve a raise, please do not include the "raise_amount" key in your JSON response. ''' prompt = ChatPromptTemplate.from_messages([ ("system", template), ("user", "{input}") ]) self.chain = prompt | llm | output_parser def _extract_action(self, json_string, pokerGame: PokerGameManager): min_raise, max_raise = pokerGame.return_min_max_raise(1) try: json_data = json.loads(json_string) action = json_data['action'].capitalize() raise_amount = 0 if action == "Raise": raise_amount = json_data['raise_amount'] raise_amount = int(raise_amount) if raise_amount < min_raise: raise_amount = min_raise elif raise_amount > max_raise: action = "All-in" raise_amount = pokerGame.return_player_stack(1) self.db.record_gpt_action(action, raise_amount, json_string) return (action, raise_amount) except Exception as erro: return ("Default", 0) def pre_flop_small_blind(self, pokerGame: PokerGameManager): # return Call, Raise, Fold or All-in inputs = { 'small_blind': pokerGame.small_blind, 'big_blind': pokerGame.big_blind, 'stack': pokerGame.return_player_stack(1), 'opponents_stack': pokerGame.return_player_stack(0), 'hand': pokerGame.players[1].return_long_hand(), 'pot': pokerGame.current_pot, 'amount_to_call': pokerGame.big_blind - pokerGame.small_blind } human_template = ''' The small blind is {small_blind} chips and the big blind is {big_blind} chips. You have {stack} chips in your stack and your opponent has {opponents_stack} chips. Your hand is {hand}. The pot is {pot} chips. You are the small blind and it's your turn. It costs {amount_to_call} chips to call. What action would you take? (Call, Raise, All-in, or Fold) ''' formatted_text = human_template.format(**inputs) response = self.chain.invoke({'input': formatted_text}) return self._extract_action(response, pokerGame) def pre_flop_big_blind(self, pokerGame: PokerGameManager): # return Check, Raise, or All-in inputs = { 'small_blind': pokerGame.small_blind, 'big_blind': pokerGame.big_blind, 'stack': pokerGame.return_player_stack(1), 'opponents_stack': pokerGame.return_player_stack(0), 'hand': pokerGame.players[1].return_long_hand(), 'pot': pokerGame.current_pot, 'amount_to_call': pokerGame.big_blind - pokerGame.small_blind } human_template = ''' The small blind is {small_blind} chips and the big blind is {big_blind} chips. You have {stack} chips in your stack and your opponent has {opponents_stack} chips. Your hand is {hand}. The pot is {pot} chips. You are the small blind and it's your turn. It costs {amount_to_call} chips to call. What action would you take? (Check, Raise, or All-in) ''' formatted_text = human_template.format(**inputs) response = self.chain.invoke({'input': formatted_text}) return self._extract_action(response, pokerGame) def first_to_act(self, pokerGame: PokerGameManager): # return Check, Raise, or All-in inputs = { 'small_blind': pokerGame.small_blind, 'big_blind': pokerGame.big_blind, 'stack': pokerGame.return_player_stack(1), 'opponents_stack': pokerGame.return_player_stack(0), 'hand': pokerGame.players[1].return_long_hand(), 'pot': pokerGame.current_pot, 'round': pokerGame.round, 'community_cards': pokerGame.return_community_cards() } human_template = ''' The small blind is {small_blind} chips and the big blind is {big_blind} chips. You have {stack} chips in your stack and your opponent has {opponents_stack} chips. Your hand is {hand}. The pot is {pot} chips. It's the {round} round and you're first to act. The community cards are {community_cards}. What action would you take? (Check, Raise, or All-in) ''' formatted_text = human_template.format(**inputs) response = self.chain.invoke({'input': formatted_text}) return self._extract_action(response, pokerGame) def player_check(self, pokerGame: PokerGameManager): # return Check, Raise, or All-in inputs = { 'small_blind': pokerGame.small_blind, 'big_blind': pokerGame.big_blind, 'stack': pokerGame.return_player_stack(1), 'opponents_stack': pokerGame.return_player_stack(0), 'hand': pokerGame.players[1].return_long_hand(), 'pot': pokerGame.current_pot, 'round': pokerGame.round, 'community_cards': pokerGame.return_community_cards() } human_template = """ The small blind is {small_blind} chips and the big blind is {big_blind} chips. You have {stack} chips in your stack and your opponent has {opponents_stack} chips. Your hand is {hand}. The pot is {pot} chips. It is the {round} round and the action checks to you. The community cards are {community_cards}. Based on this information, what action would you like to take? (Check, Raise, or All-in). """ formatted_text = human_template.format(**inputs) response = self.chain.invoke({'input': formatted_text}) return self._extract_action(response, pokerGame) def player_raise(self, pokerGame: PokerGameManager): # return Call, Raise, All-in, or Fold inputs = { 'small_blind': pokerGame.small_blind, 'big_blind': pokerGame.big_blind, 'stack': pokerGame.return_player_stack(1), 'opponents_stack': pokerGame.return_player_stack(0), 'hand': pokerGame.players[1].return_long_hand(), 'pot': pokerGame.current_pot, 'round': pokerGame.round, 'community_cards': pokerGame.return_community_cards(), 'opponent_raise': pokerGame.current_bet, 'amount_to_call': pokerGame.current_bet - pokerGame.players[1].round_pot_commitment } human_template = ''' The small blind is {small_blind} chips and the big blind is {big_blind} chips. You have {stack} chips in your stack and your opponent has {opponents_stack} chips. Your hand is {hand}. The pot is {pot} chips. It's the {round} round. The community cards are {community_cards}. Your opponent has raised to {opponent_raise} chips. It costs {amount_to_call} chips to call. What action would you take? (Call, Raise, All-in, or Fold) ''' formatted_text = human_template.format(**inputs) response = self.chain.invoke({'input': formatted_text}) return self._extract_action(response, pokerGame) def player_all_in(self, pokerGame: PokerGameManager): # return Call, or Fold amount_to_call = pokerGame.current_bet - pokerGame.players[1].round_pot_commitment if amount_to_call > pokerGame.return_player_stack(1): amount_to_call = pokerGame.return_player_stack(1) inputs = { 'small_blind': pokerGame.small_blind, 'big_blind': pokerGame.big_blind, 'stack': pokerGame.return_player_stack(1), 'hand': pokerGame.players[1].return_long_hand(), 'pot': pokerGame.current_pot, 'round': pokerGame.round, 'community_cards': pokerGame.return_community_cards(), 'opponent_raise': pokerGame.current_bet, 'amount_to_call': amount_to_call } human_template = ''' The small blind is {small_blind} chips and the big blind is {big_blind} chips. You have {stack} chips in your stack. Your hand is {hand}. The pot is {pot} chips. It's the {round} round. The community cards are {community_cards}. Your opponent has gone all in for {opponent_raise} chips. It costs {amount_to_call} chips to call. What action would you take? (Call, or Fold) ''' formatted_text = human_template.format(**inputs) response = self.chain.invoke({'input': formatted_text}) return self._extract_action(response, pokerGame)
[ "langchain_openai.ChatOpenAI", "langchain_core.output_parsers.StrOutputParser", "langchain.prompts.chat.ChatPromptTemplate.from_messages" ]
[((456, 489), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (466, 489), False, 'from langchain_openai import ChatOpenAI\n'), ((514, 531), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (529, 531), False, 'from langchain_core.output_parsers import StrOutputParser\n'), ((1408, 1485), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system', template), ('user', '{input}')]"], {}), "([('system', template), ('user', '{input}')])\n", (1440, 1485), False, 'from langchain.prompts.chat import ChatPromptTemplate\n'), ((1755, 1778), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (1765, 1778), False, 'import json\n')]
import logging from typing import Any, Dict, List, Optional from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import Extra, root_validator from langchain.schema import Generation, LLMResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class Clarifai(LLM): """Clarifai large language models. To use, you should have an account on the Clarifai platform, the ``clarifai`` python package installed, and the environment variable ``CLARIFAI_PAT`` set with your PAT key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Clarifai clarifai_llm = Clarifai(pat=CLARIFAI_PAT, \ user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID) """ stub: Any #: :meta private: userDataObject: Any model_id: Optional[str] = None """Model id to use.""" model_version_id: Optional[str] = None """Model version id to use.""" app_id: Optional[str] = None """Clarifai application id to use.""" user_id: Optional[str] = None """Clarifai user id to use.""" pat: Optional[str] = None api_base: str = "https://api.clarifai.com" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that we have all required info to access Clarifai platform and python package exists in environment.""" values["pat"] = get_from_dict_or_env(values, "pat", "CLARIFAI_PAT") user_id = values.get("user_id") app_id = values.get("app_id") model_id = values.get("model_id") if values["pat"] is None: raise ValueError("Please provide a pat.") if user_id is None: raise ValueError("Please provide a user_id.") if app_id is None: raise ValueError("Please provide a app_id.") if model_id is None: raise ValueError("Please provide a model_id.") try: from clarifai.auth.helper import ClarifaiAuthHelper from clarifai.client import create_stub except ImportError: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) auth = ClarifaiAuthHelper( user_id=user_id, app_id=app_id, pat=values["pat"], base=values["api_base"], ) values["userDataObject"] = auth.get_user_app_id_proto() values["stub"] = create_stub(auth) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Clarifai API.""" return {} @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { **{ "user_id": self.user_id, "app_id": self.app_id, "model_id": self.model_id, } } @property def _llm_type(self) -> str: """Return type of llm.""" return "clarifai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Clarfai's PostModelOutputs endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = clarifai_llm("Tell me a joke.") """ try: from clarifai_grpc.grpc.api import ( resources_pb2, service_pb2, ) from clarifai_grpc.grpc.api.status import status_code_pb2 except ImportError: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) # The userDataObject is created in the overview and # is required when using a PAT # If version_id None, Defaults to the latest model version post_model_outputs_request = service_pb2.PostModelOutputsRequest( user_app_id=self.userDataObject, model_id=self.model_id, version_id=self.model_version_id, inputs=[ resources_pb2.Input( data=resources_pb2.Data(text=resources_pb2.Text(raw=prompt)) ) ], ) post_model_outputs_response = self.stub.PostModelOutputs( post_model_outputs_request ) if post_model_outputs_response.status.code != status_code_pb2.SUCCESS: logger.error(post_model_outputs_response.status) first_model_failure = ( post_model_outputs_response.outputs[0].status if len(post_model_outputs_response.outputs) else None ) raise Exception( f"Post model outputs failed, status: " f"{post_model_outputs_response.status}, first output failure: " f"{first_model_failure}" ) text = post_model_outputs_response.outputs[0].data.text.raw # In order to make this consistent with other endpoints, we strip them. if stop is not None: text = enforce_stop_tokens(text, stop) return text def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" try: from clarifai_grpc.grpc.api import ( resources_pb2, service_pb2, ) from clarifai_grpc.grpc.api.status import status_code_pb2 except ImportError: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) # TODO: add caching here. generations = [] batch_size = 32 for i in range(0, len(prompts), batch_size): batch = prompts[i : i + batch_size] post_model_outputs_request = service_pb2.PostModelOutputsRequest( user_app_id=self.userDataObject, model_id=self.model_id, version_id=self.model_version_id, inputs=[ resources_pb2.Input( data=resources_pb2.Data(text=resources_pb2.Text(raw=prompt)) ) for prompt in batch ], ) post_model_outputs_response = self.stub.PostModelOutputs( post_model_outputs_request ) if post_model_outputs_response.status.code != status_code_pb2.SUCCESS: logger.error(post_model_outputs_response.status) first_model_failure = ( post_model_outputs_response.outputs[0].status if len(post_model_outputs_response.outputs) else None ) raise Exception( f"Post model outputs failed, status: " f"{post_model_outputs_response.status}, first output failure: " f"{first_model_failure}" ) for output in post_model_outputs_response.outputs: if stop is not None: text = enforce_stop_tokens(output.data.text.raw, stop) else: text = output.data.text.raw generations.append([Generation(text=text)]) return LLMResult(generations=generations)
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env", "langchain.schema.Generation", "langchain.schema.LLMResult", "langchain.pydantic_v1.root_validator" ]
[((381, 408), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (398, 408), False, 'import logging\n'), ((1472, 1488), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (1486, 1488), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((1702, 1753), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""pat"""', '"""CLARIFAI_PAT"""'], {}), "(values, 'pat', 'CLARIFAI_PAT')\n", (1722, 1753), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2565, 2664), 'clarifai.auth.helper.ClarifaiAuthHelper', 'ClarifaiAuthHelper', ([], {'user_id': 'user_id', 'app_id': 'app_id', 'pat': "values['pat']", 'base': "values['api_base']"}), "(user_id=user_id, app_id=app_id, pat=values['pat'], base=\n values['api_base'])\n", (2583, 2664), False, 'from clarifai.auth.helper import ClarifaiAuthHelper\n'), ((2808, 2825), 'clarifai.client.create_stub', 'create_stub', (['auth'], {}), '(auth)\n', (2819, 2825), False, 'from clarifai.client import create_stub\n'), ((8240, 8274), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (8249, 8274), False, 'from langchain.schema import Generation, LLMResult\n'), ((5810, 5841), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5829, 5841), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((8045, 8092), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['output.data.text.raw', 'stop'], {}), '(output.data.text.raw, stop)\n', (8064, 8092), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((8200, 8221), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (8210, 8221), False, 'from langchain.schema import Generation, LLMResult\n'), ((4864, 4894), 'clarifai_grpc.grpc.api.resources_pb2.Text', 'resources_pb2.Text', ([], {'raw': 'prompt'}), '(raw=prompt)\n', (4882, 4894), False, 'from clarifai_grpc.grpc.api import resources_pb2, service_pb2\n'), ((7057, 7087), 'clarifai_grpc.grpc.api.resources_pb2.Text', 'resources_pb2.Text', ([], {'raw': 'prompt'}), '(raw=prompt)\n', (7075, 7087), False, 'from clarifai_grpc.grpc.api import resources_pb2, service_pb2\n')]
"""This example shows how to use the ChatGPT API with LangChain to answer questions about Prefect.""" from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import Chroma from langchain.text_splitter import CharacterTextSplitter from langchain.chains import ChatVectorDBChain from langchain.chat_models import ChatOpenAI from langchain.prompts.chat import ( ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain_prefect.loaders import GitHubRepoLoader from langchain_prefect.plugins import RecordLLMCalls documents = GitHubRepoLoader("PrefectHQ/prefect", glob="**/*.md").load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) documents = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() system_template = """Use the following pieces of context to answer the users question. If you don't know the answer, just say that you don't know, don't make up an answer. ---------------- {context}""" prompt = ChatPromptTemplate.from_messages( [ SystemMessagePromptTemplate.from_template(system_template), HumanMessagePromptTemplate.from_template("{question}"), ] ) qa = ChatVectorDBChain.from_llm( llm=ChatOpenAI(temperature=0), vectorstore=Chroma.from_documents(documents, embeddings), qa_prompt=prompt, ) with RecordLLMCalls( tags={qa.vectorstore.__class__.__name__}, max_prompt_tokens=int(1e4) ): chat_history = [] query = "What infrastructures does Prefect support?" result = qa({"question": query, "chat_history": chat_history}) print(result["answer"]) chat_history = [(query, result["answer"])] query = "Can I use Prefect with AWS?" result = qa({"question": query, "chat_history": chat_history}) print(result["answer"])
[ "langchain.text_splitter.CharacterTextSplitter", "langchain_prefect.loaders.GitHubRepoLoader", "langchain.prompts.chat.SystemMessagePromptTemplate.from_template", "langchain.chat_models.ChatOpenAI", "langchain.vectorstores.Chroma.from_documents", "langchain.prompts.chat.HumanMessagePromptTemplate.from_template", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((680, 735), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (701, 735), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((803, 821), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (819, 821), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((602, 655), 'langchain_prefect.loaders.GitHubRepoLoader', 'GitHubRepoLoader', (['"""PrefectHQ/prefect"""'], {'glob': '"""**/*.md"""'}), "('PrefectHQ/prefect', glob='**/*.md')\n", (618, 655), False, 'from langchain_prefect.loaders import GitHubRepoLoader\n'), ((1084, 1142), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['system_template'], {}), '(system_template)\n', (1125, 1142), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1152, 1206), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{question}"""'], {}), "('{question}')\n", (1192, 1206), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1258, 1283), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (1268, 1283), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1301, 1345), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['documents', 'embeddings'], {}), '(documents, embeddings)\n', (1322, 1345), False, 'from langchain.vectorstores import Chroma\n')]
from dotenv import load_dotenv load_dotenv() import os from langchain.llms import OpenAI from langchain.chat_models import ChatOpenAI from langchain.prompts import ( PromptTemplate, ) from langchain.chains import ConversationChain from langchain.memory import ConversationBufferMemory from langchain.agents import AgentExecutor, ConversationalChatAgent from tools.make_thunder_tool import MakeThunderTool from tools.draw_tool import DrawTool from tools.is_in_heaven import IsInHeavenTool from voice.speech import speak from voice.listen import listen openai_api_key = os.getenv("OPENAI_API_KEY") class GodAgent: def __init__(self): self.executor = self.assemble_agent_executor() def assemble_agent_executor(self): template = """ You are omnipotent, kind, benevolent god. The user is "your child". Be a little bit condescending yet funny. You try to fulfill his every wish. Make witty comments about user wishes. You can use tools to help you fulfill user wishes. YOU MUST RESPOND IN THE CORRECT FORMAT. """ #Initialize LLM llm = ChatOpenAI(openai_api_key=openai_api_key, verbose=True, temperature=0.3, model_name="gpt-4") # Create memory memory = ConversationBufferMemory(memory_key="chat_history", human_prefix="User", ai_prefix="God", return_messages=True) #Register tools tools = [ IsInHeavenTool(), MakeThunderTool(), DrawTool() ] # Create Langchain agent and executor agent = ConversationalChatAgent.from_llm_and_tools(llm= llm, memory=memory, tools=tools, verbose=True, system_message=template) executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, memory=memory, verbose=True) return executor def processing_callback(self,recognized_input): print("--") print(recognized_input) print("") result = self.executor.run(input=recognized_input) #print(result) speak(result) def run(self): listen(self.processing_callback) GodAgent().run()
[ "langchain.agents.ConversationalChatAgent.from_llm_and_tools", "langchain.agents.AgentExecutor.from_agent_and_tools", "langchain.memory.ConversationBufferMemory", "langchain.chat_models.ChatOpenAI" ]
[((31, 44), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (42, 44), False, 'from dotenv import load_dotenv\n'), ((574, 601), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (583, 601), False, 'import os\n'), ((1087, 1183), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'openai_api_key', 'verbose': '(True)', 'temperature': '(0.3)', 'model_name': '"""gpt-4"""'}), "(openai_api_key=openai_api_key, verbose=True, temperature=0.3,\n model_name='gpt-4')\n", (1097, 1183), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1222, 1337), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'human_prefix': '"""User"""', 'ai_prefix': '"""God"""', 'return_messages': '(True)'}), "(memory_key='chat_history', human_prefix='User',\n ai_prefix='God', return_messages=True)\n", (1246, 1337), False, 'from langchain.memory import ConversationBufferMemory\n'), ((1550, 1673), 'langchain.agents.ConversationalChatAgent.from_llm_and_tools', 'ConversationalChatAgent.from_llm_and_tools', ([], {'llm': 'llm', 'memory': 'memory', 'tools': 'tools', 'verbose': '(True)', 'system_message': 'template'}), '(llm=llm, memory=memory, tools=\n tools, verbose=True, system_message=template)\n', (1592, 1673), False, 'from langchain.agents import AgentExecutor, ConversationalChatAgent\n'), ((1690, 1783), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent', 'tools': 'tools', 'memory': 'memory', 'verbose': '(True)'}), '(agent=agent, tools=tools, memory=memory,\n verbose=True)\n', (1724, 1783), False, 'from langchain.agents import AgentExecutor, ConversationalChatAgent\n'), ((2021, 2034), 'voice.speech.speak', 'speak', (['result'], {}), '(result)\n', (2026, 2034), False, 'from voice.speech import speak\n'), ((2063, 2095), 'voice.listen.listen', 'listen', (['self.processing_callback'], {}), '(self.processing_callback)\n', (2069, 2095), False, 'from voice.listen import listen\n'), ((1397, 1413), 'tools.is_in_heaven.IsInHeavenTool', 'IsInHeavenTool', ([], {}), '()\n', (1411, 1413), False, 'from tools.is_in_heaven import IsInHeavenTool\n'), ((1427, 1444), 'tools.make_thunder_tool.MakeThunderTool', 'MakeThunderTool', ([], {}), '()\n', (1442, 1444), False, 'from tools.make_thunder_tool import MakeThunderTool\n'), ((1458, 1468), 'tools.draw_tool.DrawTool', 'DrawTool', ([], {}), '()\n', (1466, 1468), False, 'from tools.draw_tool import DrawTool\n')]
from __future__ import annotations from abc import abstractmethod from typing import TYPE_CHECKING, Any, Dict, List, Sequence from langchain.load.serializable import Serializable from langchain.pydantic_v1 import Field if TYPE_CHECKING: from langchain.prompts.chat import ChatPromptTemplate def get_buffer_string( messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI" ) -> str: """Convert sequence of Messages to strings and concatenate them into one string. Args: messages: Messages to be converted to strings. human_prefix: The prefix to prepend to contents of HumanMessages. ai_prefix: THe prefix to prepend to contents of AIMessages. Returns: A single string concatenation of all input messages. Example: .. code-block:: python from langchain.schema import AIMessage, HumanMessage messages = [ HumanMessage(content="Hi, how are you?"), AIMessage(content="Good, how are you?"), ] get_buffer_string(messages) # -> "Human: Hi, how are you?\nAI: Good, how are you?" """ string_messages = [] for m in messages: if isinstance(m, HumanMessage): role = human_prefix elif isinstance(m, AIMessage): role = ai_prefix elif isinstance(m, SystemMessage): role = "System" elif isinstance(m, FunctionMessage): role = "Function" elif isinstance(m, ChatMessage): role = m.role else: raise ValueError(f"Got unsupported message type: {m}") message = f"{role}: {m.content}" if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs: message += f"{m.additional_kwargs['function_call']}" string_messages.append(message) return "\n".join(string_messages) class BaseMessage(Serializable): """The base abstract Message class. Messages are the inputs and outputs of ChatModels. """ content: str """The string contents of the message.""" additional_kwargs: dict = Field(default_factory=dict) """Any additional information.""" @property @abstractmethod def type(self) -> str: """Type of the Message, used for serialization.""" @property def lc_serializable(self) -> bool: """Whether this class is LangChain serializable.""" return True def __add__(self, other: Any) -> ChatPromptTemplate: from langchain.prompts.chat import ChatPromptTemplate prompt = ChatPromptTemplate(messages=[self]) return prompt + other class BaseMessageChunk(BaseMessage): """A Message chunk, which can be concatenated with other Message chunks.""" def _merge_kwargs_dict( self, left: Dict[str, Any], right: Dict[str, Any] ) -> Dict[str, Any]: """Merge additional_kwargs from another BaseMessageChunk into this one.""" merged = left.copy() for k, v in right.items(): if k not in merged: merged[k] = v elif type(merged[k]) != type(v): raise ValueError( f'additional_kwargs["{k}"] already exists in this message,' " but with a different type." ) elif isinstance(merged[k], str): merged[k] += v elif isinstance(merged[k], dict): merged[k] = self._merge_kwargs_dict(merged[k], v) else: raise ValueError( f"Additional kwargs key {k} already exists in this message." ) return merged def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, BaseMessageChunk): # If both are (subclasses of) BaseMessageChunk, # concat into a single BaseMessageChunk return self.__class__( content=self.content + other.content, additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) else: raise TypeError( 'unsupported operand type(s) for +: "' f"{self.__class__.__name__}" f'" and "{other.__class__.__name__}"' ) class HumanMessage(BaseMessage): """A Message from a human.""" example: bool = False """Whether this Message is being passed in to the model as part of an example conversation. """ @property def type(self) -> str: """Type of the message, used for serialization.""" return "human" class HumanMessageChunk(HumanMessage, BaseMessageChunk): """A Human Message chunk.""" pass class AIMessage(BaseMessage): """A Message from an AI.""" example: bool = False """Whether this Message is being passed in to the model as part of an example conversation. """ @property def type(self) -> str: """Type of the message, used for serialization.""" return "ai" class AIMessageChunk(AIMessage, BaseMessageChunk): """A Message chunk from an AI.""" pass class SystemMessage(BaseMessage): """A Message for priming AI behavior, usually passed in as the first of a sequence of input messages. """ @property def type(self) -> str: """Type of the message, used for serialization.""" return "system" class SystemMessageChunk(SystemMessage, BaseMessageChunk): """A System Message chunk.""" pass class FunctionMessage(BaseMessage): """A Message for passing the result of executing a function back to a model.""" name: str """The name of the function that was executed.""" @property def type(self) -> str: """Type of the message, used for serialization.""" return "function" class FunctionMessageChunk(FunctionMessage, BaseMessageChunk): """A Function Message chunk.""" pass class ChatMessage(BaseMessage): """A Message that can be assigned an arbitrary speaker (i.e. role).""" role: str """The speaker / role of the Message.""" @property def type(self) -> str: """Type of the message, used for serialization.""" return "chat" class ChatMessageChunk(ChatMessage, BaseMessageChunk): """A Chat Message chunk.""" pass def _message_to_dict(message: BaseMessage) -> dict: return {"type": message.type, "data": message.dict()} def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]: """Convert a sequence of Messages to a list of dictionaries. Args: messages: Sequence of messages (as BaseMessages) to convert. Returns: List of messages as dicts. """ return [_message_to_dict(m) for m in messages] def _message_from_dict(message: dict) -> BaseMessage: _type = message["type"] if _type == "human": return HumanMessage(**message["data"]) elif _type == "ai": return AIMessage(**message["data"]) elif _type == "system": return SystemMessage(**message["data"]) elif _type == "chat": return ChatMessage(**message["data"]) elif _type == "function": return FunctionMessage(**message["data"]) else: raise ValueError(f"Got unexpected message type: {_type}") def messages_from_dict(messages: List[dict]) -> List[BaseMessage]: """Convert a sequence of messages from dicts to Message objects. Args: messages: Sequence of messages (as dicts) to convert. Returns: List of messages (BaseMessages). """ return [_message_from_dict(m) for m in messages]
[ "langchain.pydantic_v1.Field", "langchain.prompts.chat.ChatPromptTemplate" ]
[((2151, 2178), 'langchain.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (2156, 2178), False, 'from langchain.pydantic_v1 import Field\n'), ((2610, 2645), 'langchain.prompts.chat.ChatPromptTemplate', 'ChatPromptTemplate', ([], {'messages': '[self]'}), '(messages=[self])\n', (2628, 2645), False, 'from langchain.prompts.chat import ChatPromptTemplate\n')]
import logging from typing import Any, Dict, List, Mapping, Optional from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class PipelineAI(LLM, BaseModel): """PipelineAI large language models. To use, you should have the ``pipeline-ai`` python package installed, and the environment variable ``PIPELINE_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain import PipelineAI pipeline = PipelineAI(pipeline_key="") """ pipeline_key: str = "" """The id or tag of the target pipeline""" pipeline_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any pipeline parameters valid for `create` call not explicitly specified.""" pipeline_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("pipeline_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transferred to pipeline_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["pipeline_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" pipeline_api_key = get_from_dict_or_env( values, "pipeline_api_key", "PIPELINE_API_KEY" ) values["pipeline_api_key"] = pipeline_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"pipeline_key": self.pipeline_key}, **{"pipeline_kwargs": self.pipeline_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "pipeline_ai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call to Pipeline Cloud endpoint.""" try: from pipeline import PipelineCloud except ImportError: raise ValueError( "Could not import pipeline-ai python package. " "Please install it with `pip install pipeline-ai`." ) client = PipelineCloud(token=self.pipeline_api_key) params = self.pipeline_kwargs or {} params = {**params, **kwargs} run = client.run_pipeline(self.pipeline_key, [prompt, params]) try: text = run.result_preview[0][0] except AttributeError: raise AttributeError( f"A pipeline run should have a `result_preview` attribute." f"Run was: {run}" ) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the pipeline parameters text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.pydantic_v1.Field", "langchain.pydantic_v1.root_validator", "langchain.utils.get_from_dict_or_env" ]
[((357, 384), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (374, 384), False, 'import logging\n'), ((1004, 1031), 'langchain.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1009, 1031), False, 'from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((1279, 1303), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (1293, 1303), False, 'from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((2131, 2147), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2145, 2147), False, 'from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((2310, 2378), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""pipeline_api_key"""', '"""PIPELINE_API_KEY"""'], {}), "(values, 'pipeline_api_key', 'PIPELINE_API_KEY')\n", (2330, 2378), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3361, 3403), 'pipeline.PipelineCloud', 'PipelineCloud', ([], {'token': 'self.pipeline_api_key'}), '(token=self.pipeline_api_key)\n', (3374, 3403), False, 'from pipeline import PipelineCloud\n'), ((3973, 4004), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (3992, 4004), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
from typing import Optional, Type import streamlit as st import tldextract import whois import whoisit from langchain.agents import AgentType, Tool, initialize_agent from langchain.chat_models import ChatOpenAI from langchain.tools import BaseTool from langchain.tools.ddg_search import DuckDuckGoSearchRun from pydantic import BaseModel, Field # Streamlit app st.title("TakedownGPT ⬇️🤖") # Add 'How to Use' section to the sidebar st.sidebar.header("How to Use 📝") st.sidebar.markdown(""" 1. Enter your OpenAI API key and select the OpenAI model you would like to use. 2. Input the domain name for which you want to send a takedown request. 3. Select the reason for the takedown request, or specify a custom reason. 4. Click the 'Generate Takedown Request' button to create the draft email and find the appropriate email address for the takedown request. 5. Copy or download the draft email and send it to the appropriate email address. """) api_key = st.sidebar.text_input("Enter your OpenAI API key:", type="password", help="You can find your OpenAI API on the [OpenAI dashboard](https://platform.openai.com/account/api-keys)") # Add 'Model Selection' section to the sidebar model_options = [ "gpt-3.5-turbo-0613", "gpt-4-0613" ] selected_model = st.sidebar.selectbox("Select the OpenAI model you would like to use:", model_options, help="You must have been given access to the [GPT-4 API](https://openai.com/waitlist/gpt-4-api) by OpenAI in order to use it.") # Add 'About' section to the sidebar st.sidebar.header("About 🌐") st.sidebar.markdown(""" This app helps you draft takedown requests to domain registrars. It uses a combination of autonomous LangChain Agents and OpenAI's recently introduced support for function calling to: 1. Perform a WHOIS / RDAP lookup to identify the registrar for the given website 2. Search the web with DuckDuckGo to find the appropriate email address for takedown requests for that domain registrar 3. Draft a takedown request email to the hosting provider citing the reason for the takedown request Created by [Matt Adams](https://www.linkedin.com/in/matthewrwadams/). """) # Domain input field domain = st.text_input("Enter the domain that is the subject of the takedown request:", help="e.g. 'example.com'") # Takedown reason drop-down field reason_options = [ "Copyright infringement", "Trademark infringement", "Defamation or libel", "Privacy violations", "Malware or phishing activities", "Violation of terms of service", "Personal safety concerns", "Other (specify)", ] reason = st.selectbox("Select the reason for the takedown request:", reason_options) if reason == "Other (specify)": custom_reason = st.text_input("Specify the custom reason for the takedown request:") else: custom_reason = None # Additional information input field additional_info = st.text_area("Provide additional information to support your request (optional):", help="This information will be included in the takedown request email.") # Advanced Options collapsible menu advanced_options = st.expander("Advanced Options ⚙️") # Add protocol options for performing domain lookups lookup_options = [ "WHOIS", "RDAP" ] selected_lookup = advanced_options.selectbox("Select your preferred protocol for domain registrar lookups:", lookup_options) if selected_lookup == "RDAP": tool_name = "rdap_lookup" else: tool_name = "get_registrar" # Check if domain is valid def is_valid_domain(domain): extracted = tldextract.extract(domain) if extracted.domain and extracted.suffix: return True return False # Error handling function def handle_error(error_message): st.error(error_message) if st.button("Generate Takedown Request 📨"): if not api_key: handle_error("Please provide an OpenAI API key. 🔑") elif not domain: handle_error("Please provide a domain name. 🌐") elif not is_valid_domain(domain): handle_error("Please provide a valid domain name. 🌐") else: # Set API key api_key = api_key # Initialize ChatOpenAI llm = ChatOpenAI(temperature=0.7, model=selected_model, openai_api_key=api_key) # Initialize DuckDuckGo Search search = DuckDuckGoSearchRun() # Define a custom tool for WHOIS lookups class GetRegistrarCheckInput(BaseModel): domain: str = Field(..., description="The domain name to look up") class GetRegistrarTool(BaseTool): name = "get_registrar" description = "Useful for finding the registrar of a given domain name using WHOIS" def _run(self, domain: str): w = whois.whois(domain) return w.registrar def _arun(self, domain: str): raise NotImplementedError("This tool does not support async") args_schema: Optional[Type[BaseModel]] = GetRegistrarCheckInput # Define a custom tool for RDAP lookups class RDAPLookupTool(BaseTool): name = "rdap_lookup" description = "Useful for finding the registrar of a given domain name using RDAP" def _run(self, domain: str): whoisit.bootstrap() results = whoisit.domain(domain) return results def _arun(self, domain: str): raise NotImplementedError("This tool does not support async") args_schema: Optional[Type[BaseModel]] = GetRegistrarCheckInput # Defining Tools tools = [ Tool( name="Search", func=search.run, description="useful for when you need to find web pages. You should ask targeted questions" ), GetRegistrarTool(), RDAPLookupTool() ] # Initializing the Agent open_ai_agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True) # Defining and running the Prompt prompt = """ Task: 1. Use the {tool_name} tool to find the domain registrar for {domain}. 2. Perform a web search to find the email address for takedown requests for that domain registrar. 3. Prepare a draft email takedown request to the hosting provider citing the following reason: {reason}. Include the additional information provided: {additional_info} Your response must be in the following format and should not include any other information: - Registrar name: [registrar] - Email address for takedown requests: [registrar_email] - Email subject: [subject] - Email body: [body] Your response: """ # Fill placeholders with actual data if custom_reason: prompt_filled = prompt.format(tool_name=tool_name, domain=domain, reason=custom_reason, additional_info=additional_info) else: prompt_filled = prompt.format(tool_name=tool_name, domain=domain, reason=reason, additional_info=additional_info) try: with st.spinner("Processing your request... ⏳"): # Run the agent response = open_ai_agent.run(prompt_filled) if "Email address for takedown requests: [not found]" in response: handle_error("Could not find the email address for takedown requests. Please try again or manually search for the domain registrar's contact information. 🚫") else: # Display the result st.code(response, language="text") # Add download button for the generated takedown request filename = f"{domain}_takedown_request.txt" st.download_button( label="Download Takedown Request 📥", data=response.encode("utf-8"), file_name=filename, mime="text/plain", ) except Exception as e: handle_error(f"An error occurred while processing your request: {str(e)} ❌")
[ "langchain.tools.ddg_search.DuckDuckGoSearchRun", "langchain.agents.initialize_agent", "langchain.agents.Tool", "langchain.chat_models.ChatOpenAI" ]
[((363, 390), 'streamlit.title', 'st.title', (['"""TakedownGPT ⬇️🤖"""'], {}), "('TakedownGPT ⬇️🤖')\n", (371, 390), True, 'import streamlit as st\n'), ((434, 467), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""How to Use 📝"""'], {}), "('How to Use 📝')\n", (451, 467), True, 'import streamlit as st\n'), ((468, 954), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""\n1. Enter your OpenAI API key and select the OpenAI model you would like to use.\n2. Input the domain name for which you want to send a takedown request.\n3. Select the reason for the takedown request, or specify a custom reason.\n4. Click the \'Generate Takedown Request\' button to create the draft email and find the appropriate email address for the takedown request.\n5. Copy or download the draft email and send it to the appropriate email address.\n"""'], {}), '(\n """\n1. Enter your OpenAI API key and select the OpenAI model you would like to use.\n2. Input the domain name for which you want to send a takedown request.\n3. Select the reason for the takedown request, or specify a custom reason.\n4. Click the \'Generate Takedown Request\' button to create the draft email and find the appropriate email address for the takedown request.\n5. Copy or download the draft email and send it to the appropriate email address.\n"""\n )\n', (487, 954), True, 'import streamlit as st\n'), ((956, 1143), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Enter your OpenAI API key:"""'], {'type': '"""password"""', 'help': '"""You can find your OpenAI API on the [OpenAI dashboard](https://platform.openai.com/account/api-keys)"""'}), "('Enter your OpenAI API key:', type='password', help=\n 'You can find your OpenAI API on the [OpenAI dashboard](https://platform.openai.com/account/api-keys)'\n )\n", (977, 1143), True, 'import streamlit as st\n'), ((1262, 1489), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Select the OpenAI model you would like to use:"""', 'model_options'], {'help': '"""You must have been given access to the [GPT-4 API](https://openai.com/waitlist/gpt-4-api) by OpenAI in order to use it."""'}), "('Select the OpenAI model you would like to use:',\n model_options, help=\n 'You must have been given access to the [GPT-4 API](https://openai.com/waitlist/gpt-4-api) by OpenAI in order to use it.'\n )\n", (1282, 1489), True, 'import streamlit as st\n'), ((1514, 1542), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""About 🌐"""'], {}), "('About 🌐')\n", (1531, 1542), True, 'import streamlit as st\n'), ((1543, 2144), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""\nThis app helps you draft takedown requests to domain registrars.\nIt uses a combination of autonomous LangChain Agents and OpenAI\'s recently introduced support for function calling to:\n 1. Perform a WHOIS / RDAP lookup to identify the registrar for the given website\n 2. Search the web with DuckDuckGo to find the appropriate email address for takedown requests for that domain registrar\n 3. Draft a takedown request email to the hosting provider citing the reason for the takedown request\n\nCreated by [Matt Adams](https://www.linkedin.com/in/matthewrwadams/).\n"""'], {}), '(\n """\nThis app helps you draft takedown requests to domain registrars.\nIt uses a combination of autonomous LangChain Agents and OpenAI\'s recently introduced support for function calling to:\n 1. Perform a WHOIS / RDAP lookup to identify the registrar for the given website\n 2. Search the web with DuckDuckGo to find the appropriate email address for takedown requests for that domain registrar\n 3. Draft a takedown request email to the hosting provider citing the reason for the takedown request\n\nCreated by [Matt Adams](https://www.linkedin.com/in/matthewrwadams/).\n"""\n )\n', (1562, 2144), True, 'import streamlit as st\n'), ((2166, 2275), 'streamlit.text_input', 'st.text_input', (['"""Enter the domain that is the subject of the takedown request:"""'], {'help': '"""e.g. \'example.com\'"""'}), '(\'Enter the domain that is the subject of the takedown request:\',\n help="e.g. \'example.com\'")\n', (2179, 2275), True, 'import streamlit as st\n'), ((2580, 2655), 'streamlit.selectbox', 'st.selectbox', (['"""Select the reason for the takedown request:"""', 'reason_options'], {}), "('Select the reason for the takedown request:', reason_options)\n", (2592, 2655), True, 'import streamlit as st\n'), ((2865, 3029), 'streamlit.text_area', 'st.text_area', (['"""Provide additional information to support your request (optional):"""'], {'help': '"""This information will be included in the takedown request email."""'}), "(\n 'Provide additional information to support your request (optional):',\n help='This information will be included in the takedown request email.')\n", (2877, 3029), True, 'import streamlit as st\n'), ((3077, 3111), 'streamlit.expander', 'st.expander', (['"""Advanced Options ⚙️"""'], {}), "('Advanced Options ⚙️')\n", (3088, 3111), True, 'import streamlit as st\n'), ((3710, 3750), 'streamlit.button', 'st.button', (['"""Generate Takedown Request 📨"""'], {}), "('Generate Takedown Request 📨')\n", (3719, 3750), True, 'import streamlit as st\n'), ((2709, 2777), 'streamlit.text_input', 'st.text_input', (['"""Specify the custom reason for the takedown request:"""'], {}), "('Specify the custom reason for the takedown request:')\n", (2722, 2777), True, 'import streamlit as st\n'), ((3508, 3534), 'tldextract.extract', 'tldextract.extract', (['domain'], {}), '(domain)\n', (3526, 3534), False, 'import tldextract\n'), ((3682, 3705), 'streamlit.error', 'st.error', (['error_message'], {}), '(error_message)\n', (3690, 3705), True, 'import streamlit as st\n'), ((4114, 4187), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.7)', 'model': 'selected_model', 'openai_api_key': 'api_key'}), '(temperature=0.7, model=selected_model, openai_api_key=api_key)\n', (4124, 4187), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4245, 4266), 'langchain.tools.ddg_search.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {}), '()\n', (4264, 4266), False, 'from langchain.tools.ddg_search import DuckDuckGoSearchRun\n'), ((5885, 5961), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.OPENAI_FUNCTIONS', 'verbose': '(True)'}), '(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True)\n', (5901, 5961), False, 'from langchain.agents import AgentType, Tool, initialize_agent\n'), ((4392, 4444), 'pydantic.Field', 'Field', (['...'], {'description': '"""The domain name to look up"""'}), "(..., description='The domain name to look up')\n", (4397, 4444), False, 'from pydantic import BaseModel, Field\n'), ((5563, 5702), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'search.run', 'description': '"""useful for when you need to find web pages. You should ask targeted questions"""'}), "(name='Search', func=search.run, description=\n 'useful for when you need to find web pages. You should ask targeted questions'\n )\n", (5567, 5702), False, 'from langchain.agents import AgentType, Tool, initialize_agent\n'), ((4681, 4700), 'whois.whois', 'whois.whois', (['domain'], {}), '(domain)\n', (4692, 4700), False, 'import whois\n'), ((5209, 5228), 'whoisit.bootstrap', 'whoisit.bootstrap', ([], {}), '()\n', (5226, 5228), False, 'import whoisit\n'), ((5255, 5277), 'whoisit.domain', 'whoisit.domain', (['domain'], {}), '(domain)\n', (5269, 5277), False, 'import whoisit\n'), ((7118, 7160), 'streamlit.spinner', 'st.spinner', (['"""Processing your request... ⏳"""'], {}), "('Processing your request... ⏳')\n", (7128, 7160), True, 'import streamlit as st\n'), ((7591, 7625), 'streamlit.code', 'st.code', (['response'], {'language': '"""text"""'}), "(response, language='text')\n", (7598, 7625), True, 'import streamlit as st\n')]
import sqlite3 import pandas as pd import os import json import warnings from langchain import SQLDatabase from langchain.docstore.document import Document from langchain.vectorstores import Chroma from langchain.embeddings import HuggingFaceEmbeddings from sqlalchemy import exc from sqlalchemy.exc import SAWarning warnings.filterwarnings('ignore', category=SAWarning) from src.data.setup.vector_setup_functions import get_json, connect_db, prep_chroma_documents, create_chroma_db from src.data.setup.db_setup_functions import get_filenames, get_table_names, get_column_info, df_text_processing, build_schema_info, convert_df_to_json #### BUILD CONSOLIDATED SCHEMA INFORMATION #### #you can do this from the provided tables, but that would not be as scaleable in the real world. #point to location you saved the data to and the type of database data_directory = 'src/data/raw/spider/database/' db_type = '.sqlite' #create a dataframe with schema info schema_df = build_schema_info(filepath=data_directory, filetype=db_type) #create a json of the same data if that fomrat tickles your fancy schema_json = convert_df_to_json(df=schema_df) ##### SAVE SCHEMA INFO ##### save_path = 'src/data/processed/db/' print("\nSaving dataframe and JSON...") #save df in pickle file filepath = save_path+'schema_info.pkl' schema_df.to_pickle(filepath) #save json in json file with open(save_path+'schema_info.json', 'w') as file: json.dump(schema_json, file) print("...Success") #### CREATING VECTOR DATABASE FROM SCHEMA INFORMATION #### #setup embeddings using HuggingFace embeddings = HuggingFaceEmbeddings() #point to json file with schema info json_path = 'src/data/processed/db/schema_info.json' #point to location to save the vector database persist_directory = 'src/data/processed/chromadb/' schema_docs = prep_chroma_documents(json_path=json_path, db_path=data_directory) create_chroma_db(docs=schema_docs, persist_dir=persist_directory, embed_func=embeddings)
[ "langchain.embeddings.HuggingFaceEmbeddings" ]
[((320, 373), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'SAWarning'}), "('ignore', category=SAWarning)\n", (343, 373), False, 'import warnings\n'), ((973, 1033), 'src.data.setup.db_setup_functions.build_schema_info', 'build_schema_info', ([], {'filepath': 'data_directory', 'filetype': 'db_type'}), '(filepath=data_directory, filetype=db_type)\n', (990, 1033), False, 'from src.data.setup.db_setup_functions import get_filenames, get_table_names, get_column_info, df_text_processing, build_schema_info, convert_df_to_json\n'), ((1115, 1147), 'src.data.setup.db_setup_functions.convert_df_to_json', 'convert_df_to_json', ([], {'df': 'schema_df'}), '(df=schema_df)\n', (1133, 1147), False, 'from src.data.setup.db_setup_functions import get_filenames, get_table_names, get_column_info, df_text_processing, build_schema_info, convert_df_to_json\n'), ((1594, 1617), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (1615, 1617), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((1823, 1889), 'src.data.setup.vector_setup_functions.prep_chroma_documents', 'prep_chroma_documents', ([], {'json_path': 'json_path', 'db_path': 'data_directory'}), '(json_path=json_path, db_path=data_directory)\n', (1844, 1889), False, 'from src.data.setup.vector_setup_functions import get_json, connect_db, prep_chroma_documents, create_chroma_db\n'), ((1891, 1983), 'src.data.setup.vector_setup_functions.create_chroma_db', 'create_chroma_db', ([], {'docs': 'schema_docs', 'persist_dir': 'persist_directory', 'embed_func': 'embeddings'}), '(docs=schema_docs, persist_dir=persist_directory,\n embed_func=embeddings)\n', (1907, 1983), False, 'from src.data.setup.vector_setup_functions import get_json, connect_db, prep_chroma_documents, create_chroma_db\n'), ((1432, 1460), 'json.dump', 'json.dump', (['schema_json', 'file'], {}), '(schema_json, file)\n', (1441, 1460), False, 'import json\n')]
from langchain.llms import OpenAI from callback import MyCallbackHandler from langchain.callbacks.base import BaseCallbackManager class QaLlm(): def __init__(self) -> None: manager = BaseCallbackManager([MyCallbackHandler()]) self.llm = OpenAI(temperature=0, callback_manager=manager, model_name="gpt-3.5-turbo") def get_llm(self): return self.llm
[ "langchain.llms.OpenAI" ]
[((259, 334), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'callback_manager': 'manager', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, callback_manager=manager, model_name='gpt-3.5-turbo')\n", (265, 334), False, 'from langchain.llms import OpenAI\n'), ((218, 237), 'callback.MyCallbackHandler', 'MyCallbackHandler', ([], {}), '()\n', (235, 237), False, 'from callback import MyCallbackHandler\n')]
from langchain.llms import OpenAI from langchain.chat_models import ChatOpenAI from apikey import ( apikey, google_search, google_cse, serp, aws_access_key, aws_secret_key, aws_region, ) import os from typing import Dict from langchain.prompts import PromptTemplate from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory from langchain.utilities import GoogleSearchAPIWrapper os.environ["OPENAI_API_KEY"] = apikey os.environ["GOOGLE_API_KEY"] = google_search os.environ["GOOGLE_CSE_ID"] = google_cse os.environ["SERPAPI_API_KEY"] = serp os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_key os.environ["AWS_DEFAULT_REGION"] = aws_region # LLMs llm = OpenAI(temperature=0.3, max_tokens=100, model_name="text-davinci-003") # Memory conv_memory = ConversationBufferMemory() # Prompt template for LLM script_template = PromptTemplate( input_variables=["topic", "google_search"], template="Write me a YouTube voiceover script about {topic}, and also do research about the topic on Google. {google_search}", ) adjust_template = PromptTemplate( input_variables=["script"], template="Edit, and adjust the script in a fun, relaxed way: {script}\n\n-=-=-=- Adjusted Script -=-=-=-", ) # Add a new prompt template for further adjustments refine_template = PromptTemplate( input_variables=[ "script", "adjusted_script", ], template="Refine the adjusted script staying on topic to make it more charismatic:\n{script}\n\n-=-=-=- Adjusted Script -=-=-=-\n{adjusted_script}\n\n-=-=-=- Refined Script -=-=-=-", ) # LLM Chains script_chain = LLMChain( llm=llm, prompt=script_template, verbose=True, output_key="script" ) adjust_chain = LLMChain( llm=llm, prompt=adjust_template, verbose=True, output_key="adjusted_script" ) refine_chain = LLMChain( llm=llm, prompt=refine_template, verbose=True, output_key="refined_script" ) search = GoogleSearchAPIWrapper() def run_all_chains(prompt: str, google_search_result: str) -> Dict[str, str]: script = script_chain({"topic": prompt, "google_search": google_search_result}) conv_memory.save_context( {"topic": prompt}, {"script": script[script_chain.output_key]} ) adjust = adjust_chain({"script": script[script_chain.output_key]}) conv_memory.save_context( {"script": script[script_chain.output_key]}, {"adjusted_script": adjust[adjust_chain.output_key]}, ) adjust_output = adjust[adjust_chain.output_key] adjusted_script = adjust_output.split("-=-=-=- Adjusted Script -=-=-=-")[-1].strip() refine = refine_chain( { "script": script[script_chain.output_key], "adjusted_script": adjust[adjust_chain.output_key], } ) conv_memory.save_context( {"adjusted_script": adjust[adjust_chain.output_key]}, {"refined_script": refine[refine_chain.output_key]}, ) refine_output = refine[refine_chain.output_key] refined_script = refine_output.split("-=-=-=- Refined Script -=-=-=-")[-1].strip() return { "script": script[script_chain.output_key], "adjusted_script": adjusted_script, "refined_script": refined_script, }
[ "langchain.memory.ConversationBufferMemory", "langchain.llms.OpenAI", "langchain.chains.LLMChain", "langchain.utilities.GoogleSearchAPIWrapper", "langchain.prompts.PromptTemplate" ]
[((765, 835), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.3)', 'max_tokens': '(100)', 'model_name': '"""text-davinci-003"""'}), "(temperature=0.3, max_tokens=100, model_name='text-davinci-003')\n", (771, 835), False, 'from langchain.llms import OpenAI\n'), ((860, 886), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (884, 886), False, 'from langchain.memory import ConversationBufferMemory\n'), ((933, 1128), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['topic', 'google_search']", 'template': '"""Write me a YouTube voiceover script about {topic}, and also do research about the topic on Google. {google_search}"""'}), "(input_variables=['topic', 'google_search'], template=\n 'Write me a YouTube voiceover script about {topic}, and also do research about the topic on Google. {google_search}'\n )\n", (947, 1128), False, 'from langchain.prompts import PromptTemplate\n'), ((1149, 1310), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['script']", 'template': '"""Edit, and adjust the script in a fun, relaxed way: {script}\n\n-=-=-=- Adjusted Script -=-=-=-"""'}), '(input_variables=[\'script\'], template=\n """Edit, and adjust the script in a fun, relaxed way: {script}\n\n-=-=-=- Adjusted Script -=-=-=-"""\n )\n', (1163, 1310), False, 'from langchain.prompts import PromptTemplate\n'), ((1381, 1633), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['script', 'adjusted_script']", 'template': '"""Refine the adjusted script staying on topic to make it more charismatic:\n{script}\n\n-=-=-=- Adjusted Script -=-=-=-\n{adjusted_script}\n\n-=-=-=- Refined Script -=-=-=-"""'}), '(input_variables=[\'script\', \'adjusted_script\'], template=\n """Refine the adjusted script staying on topic to make it more charismatic:\n{script}\n\n-=-=-=- Adjusted Script -=-=-=-\n{adjusted_script}\n\n-=-=-=- Refined Script -=-=-=-"""\n )\n', (1395, 1633), False, 'from langchain.prompts import PromptTemplate\n'), ((1689, 1765), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'script_template', 'verbose': '(True)', 'output_key': '"""script"""'}), "(llm=llm, prompt=script_template, verbose=True, output_key='script')\n", (1697, 1765), False, 'from langchain.chains import LLMChain\n'), ((1787, 1877), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'adjust_template', 'verbose': '(True)', 'output_key': '"""adjusted_script"""'}), "(llm=llm, prompt=adjust_template, verbose=True, output_key=\n 'adjusted_script')\n", (1795, 1877), False, 'from langchain.chains import LLMChain\n'), ((1894, 1983), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'refine_template', 'verbose': '(True)', 'output_key': '"""refined_script"""'}), "(llm=llm, prompt=refine_template, verbose=True, output_key=\n 'refined_script')\n", (1902, 1983), False, 'from langchain.chains import LLMChain\n'), ((1995, 2019), 'langchain.utilities.GoogleSearchAPIWrapper', 'GoogleSearchAPIWrapper', ([], {}), '()\n', (2017, 2019), False, 'from langchain.utilities import GoogleSearchAPIWrapper\n')]
from langchain.retrievers import AmazonKendraRetriever from langchain.chains import ConversationalRetrievalChain from langchain import SagemakerEndpoint from langchain.llms.sagemaker_endpoint import LLMContentHandler from langchain.prompts import PromptTemplate import sys import json import os class bcolors: HEADER = "\033[95m" OKBLUE = "\033[94m" OKCYAN = "\033[96m" OKGREEN = "\033[92m" WARNING = "\033[93m" FAIL = "\033[91m" ENDC = "\033[0m" BOLD = "\033[1m" UNDERLINE = "\033[4m" MAX_HISTORY_LENGTH = 5 def build_chain(): region = os.environ["AWS_REGION"] kendra_index_id = os.environ["KENDRA_INDEX_ID"] endpoint_name = os.environ["FALCON_40B_ENDPOINT"] language_code = os.environ["LANGUAGE_CODE"] class ContentHandler(LLMContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: dict) -> bytes: input_str = json.dumps({"inputs": prompt, "parameters": model_kwargs}) return input_str.encode("utf-8") def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json[0]["generated_text"] content_handler = ContentHandler() llm = SagemakerEndpoint( endpoint_name=endpoint_name, region_name=region, model_kwargs={ "temperature": 0.8, "max_new_tokens": 512, "do_sample": True, "top_p": 0.9, "repetition_penalty": 1.03, "stop": ["\nUser:", "<|endoftext|>", "</s>"], }, content_handler=content_handler, ) retriever = AmazonKendraRetriever( index_id=kendra_index_id, region_name=region, top_k=1, attribute_filter={ "EqualsTo": { "Key": "_language_code", "Value": {"StringValue": language_code}, } }, ) prompt_template = """ システム: システムは資料から抜粋して質問に答えます。資料にない内容には答えず、正直に「わかりません」と答えます。 {context} 上記の資料に基づいて以下の質問について資料から抜粋して回答を生成します。資料にない内容には答えず「わかりません」と答えます。 ユーザー: {question} システム: """ PROMPT = PromptTemplate( template=prompt_template, input_variables=["context", "question"] ) condense_qa_template = """ 次のような会話とフォローアップの質問に基づいて、フォローアップの質問を独立した質問に言い換えてください。 フォローアップの質問: {question} 独立した質問:""" standalone_question_prompt = PromptTemplate.from_template(condense_qa_template) qa = ConversationalRetrievalChain.from_llm( llm=llm, retriever=retriever, condense_question_prompt=standalone_question_prompt, return_source_documents=True, verbose=True, combine_docs_chain_kwargs={"prompt": PROMPT}, ) return qa def run_chain(chain, prompt: str, history=[]): return chain({"question": prompt, "chat_history": history}) if __name__ == "__main__": chat_history = [] qa = build_chain() print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC) print( bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC ) print(">", end=" ", flush=True) for query in sys.stdin: if query.strip().lower().startswith("new search:"): query = query.strip().lower().replace("new search:", "") chat_history = [] elif len(chat_history) == MAX_HISTORY_LENGTH: chat_history.pop(0) result = run_chain(qa, query, chat_history) chat_history.append((query, result["answer"])) print(bcolors.OKGREEN + result["answer"] + bcolors.ENDC) if "source_documents" in result: print(bcolors.OKGREEN + "Sources:") for d in result["source_documents"]: print(d.metadata["source"]) print(bcolors.ENDC) print( bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC ) print(">", end=" ", flush=True) print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)
[ "langchain.SagemakerEndpoint", "langchain.chains.ConversationalRetrievalChain.from_llm", "langchain.retrievers.AmazonKendraRetriever", "langchain.prompts.PromptTemplate.from_template", "langchain.prompts.PromptTemplate" ]
[((1327, 1604), 'langchain.SagemakerEndpoint', 'SagemakerEndpoint', ([], {'endpoint_name': 'endpoint_name', 'region_name': 'region', 'model_kwargs': "{'temperature': 0.8, 'max_new_tokens': 512, 'do_sample': True, 'top_p': 0.9,\n 'repetition_penalty': 1.03, 'stop': ['\\nUser:', '<|endoftext|>', '</s>']}", 'content_handler': 'content_handler'}), "(endpoint_name=endpoint_name, region_name=region,\n model_kwargs={'temperature': 0.8, 'max_new_tokens': 512, 'do_sample': \n True, 'top_p': 0.9, 'repetition_penalty': 1.03, 'stop': ['\\nUser:',\n '<|endoftext|>', '</s>']}, content_handler=content_handler)\n", (1344, 1604), False, 'from langchain import SagemakerEndpoint\n'), ((1731, 1915), 'langchain.retrievers.AmazonKendraRetriever', 'AmazonKendraRetriever', ([], {'index_id': 'kendra_index_id', 'region_name': 'region', 'top_k': '(1)', 'attribute_filter': "{'EqualsTo': {'Key': '_language_code', 'Value': {'StringValue': language_code}}\n }"}), "(index_id=kendra_index_id, region_name=region, top_k=1,\n attribute_filter={'EqualsTo': {'Key': '_language_code', 'Value': {\n 'StringValue': language_code}}})\n", (1752, 1915), False, 'from langchain.retrievers import AmazonKendraRetriever\n'), ((2227, 2312), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (2241, 2312), False, 'from langchain.prompts import PromptTemplate\n'), ((2482, 2532), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['condense_qa_template'], {}), '(condense_qa_template)\n', (2510, 2532), False, 'from langchain.prompts import PromptTemplate\n'), ((2543, 2766), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'condense_question_prompt': 'standalone_question_prompt', 'return_source_documents': '(True)', 'verbose': '(True)', 'combine_docs_chain_kwargs': "{'prompt': PROMPT}"}), "(llm=llm, retriever=retriever,\n condense_question_prompt=standalone_question_prompt,\n return_source_documents=True, verbose=True, combine_docs_chain_kwargs={\n 'prompt': PROMPT})\n", (2580, 2766), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((989, 1047), 'json.dumps', 'json.dumps', (["{'inputs': prompt, 'parameters': model_kwargs}"], {}), "({'inputs': prompt, 'parameters': model_kwargs})\n", (999, 1047), False, 'import json\n')]
#Make sure to install the following packages: dlt, langchain, duckdb, python-dotenv, openai, weaviate-client import dlt from langchain import PromptTemplate, LLMChain from langchain.chains.openai_functions import create_structured_output_chain from langchain.chat_models import ChatOpenAI from langchain.document_loaders import PyPDFLoader import weaviate import os import json import argparse from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import HumanMessagePromptTemplate, ChatPromptTemplate from langchain.retrievers import WeaviateHybridSearchRetriever from langchain.schema import Document, SystemMessage, HumanMessage from langchain.vectorstores import Weaviate import uuid from dotenv import load_dotenv load_dotenv() from pathlib import Path from langchain import OpenAI, LLMMathChain import os embeddings = OpenAIEmbeddings() from deep_translator import (GoogleTranslator) def _convert_pdf_to_document(path: str = None): """Convert a PDF document to a Document object""" if path is None: raise ValueError("A valid path to the document must be provided.") loader = PyPDFLoader(path) pages = loader.load_and_split() print("PAGES", pages[0]) # Parse metadata from the folder path path_parts = Path(path).parts personal_receipts_index = path_parts.index("personal_receipts") metadata_parts = path_parts[personal_receipts_index+1:] documents = [] for page in pages: translation = GoogleTranslator(source='auto', target='en').translate(text=page.page_content) documents.append( Document( metadata={ "title": "Personal Receipt", "country": metadata_parts[1], "year": metadata_parts[0], "author": str(uuid.uuid4()), "source": "/".join(metadata_parts), }, page_content=translation, ) ) print(documents) return documents def _init_weaviate(): """Initialize weaviate client and retriever""" auth_config = weaviate.auth.AuthApiKey(api_key=os.environ.get('WEAVIATE_API_KEY')) client = weaviate.Client( url='https://my-vev-index-o4qitptw.weaviate.network', auth_client_secret=auth_config, additional_headers={ "X-OpenAI-Api-Key": os.environ.get('OPENAI_API_KEY') } ) retriever = WeaviateHybridSearchRetriever( client=client, index_name="PDFloader", text_key="text", attributes=[], embedding=embeddings, create_schema_if_missing=True, ) return retriever def load_to_weaviate(document_path=None): """Load documents to weaviate""" retriever =_init_weaviate() docs = _convert_pdf_to_document(document_path) return retriever.add_documents(docs) def get_from_weaviate(query=None, path=None, operator=None, valueText=None): """ Get documents from weaviate. Args: query (str): The query string. path (list): The path for filtering, e.g., ['year']. operator (str): The operator for filtering, e.g., 'Equal'. valueText (str): The value for filtering, e.g., '2017*'. Example: get_from_weaviate(query="some query", path=['year'], operator='Equal', valueText='2017*') """ retriever = _init_weaviate() # Initial retrieval without filters output = retriever.get_relevant_documents( query, score=True, ) # Apply filters if provided if path or operator or valueText: # Create the where_filter based on provided parameters where_filter = { 'path': path if path else [], 'operator': operator if operator else '', 'valueText': valueText if valueText else '' } # Retrieve documents with filters applied output = retriever.get_relevant_documents( query, score=True, where_filter=where_filter ) return output def delete_from_weaviate(query=None, filters=None): """Delete documents from weaviate, pass dict as filters""" """ { 'path': ['year'], 'operator': 'Equal', 'valueText': '2017*' }""" auth_config = weaviate.auth.AuthApiKey(api_key=os.environ.get('WEAVIATE_API_KEY')) client = weaviate.Client( url='https://my-vev-index-o4qitptw.weaviate.network', auth_client_secret=auth_config, additional_headers={ "X-OpenAI-Api-Key": os.environ.get('OPENAI_API_KEY') } ) client.batch.delete_objects( class_name='PDFloader', # Same `where` filter as in the GraphQL API where={ 'path': ['year'], 'operator': 'Equal', 'valueText': '2017*' }, ) return "Success" llm = ChatOpenAI( temperature=0.0, max_tokens=1200, openai_api_key=os.environ.get('OPENAI_API_KEY'), model_name="gpt-4-0613", ) def infer_schema_from_text(text: str): """Infer schema from text""" prompt_ = """ You are a json schema master. Create a JSON schema based on the following data and don't write anything else: {prompt} """ complete_query = PromptTemplate( input_variables=["prompt"], template=prompt_, ) chain = LLMChain( llm=llm, prompt=complete_query, verbose=True ) chain_result = chain.run(prompt=text).strip() json_data = json.dumps(chain_result) return json_data def set_data_contract(data, version, date, agreement_id=None, privacy_policy=None, terms_of_service=None, format=None, schema_version=None, checksum=None, owner=None, license=None, validity_start=None, validity_end=None): # Creating the generic data contract data_contract = { "version": version or "", "date": date or "", "agreement_id": agreement_id or "", "privacy_policy": privacy_policy or "", "terms_of_service": terms_of_service or "", "format": format or "", "schema_version": schema_version or "", "checksum": checksum or "", "owner": owner or "", "license": license or "", "validity_start": validity_start or "", "validity_end": validity_end or "", "properties": data # Adding the given data under the "properties" field } return data_contract def create_id_dict(memory_id=None, st_memory_id=None, buffer_id=None): """ Create a dictionary containing IDs for memory, st_memory, and buffer. Args: memory_id (str): The Memory ID. st_memory_id (str): The St_memory ID. buffer_id (str): The Buffer ID. Returns: dict: A dictionary containing the IDs. """ id_dict = { "memoryID": memory_id or "", "st_MemoryID": st_memory_id or "", "bufferID": buffer_id or "" } return id_dict def init_buffer(data, version, date, memory_id=None, st_memory_id=None, buffer_id=None, agreement_id=None, privacy_policy=None, terms_of_service=None, format=None, schema_version=None, checksum=None, owner=None, license=None, validity_start=None, validity_end=None, text=None, process=None): # Create ID dictionary id_dict = create_id_dict(memory_id, st_memory_id, buffer_id) # Set data contract data_contract = set_data_contract(data, version, date, agreement_id, privacy_policy, terms_of_service, format, schema_version, checksum, owner, license, validity_start, validity_end) # Add ID dictionary to properties data_contract["properties"]["relations"] = id_dict # Infer schema from text and add to properties if text: schema = infer_schema_from_text(text) data_contract["properties"]["schema"] = schema if process: data_contract["properties"]["process"] = process return data_contract def infer_properties_from_text(text: str): """Infer schema properties from text""" prompt_ = """ You are a json index master. Create a short JSON index containing the most important data and don't write anything else: {prompt} """ complete_query = PromptTemplate( input_variables=["prompt"], template=prompt_, ) chain = LLMChain( llm=llm, prompt=complete_query, verbose=True ) chain_result = chain.run(prompt=text).strip() # json_data = json.dumps(chain_result) return chain_result # # # # print(infer_schema_from_text(output[0].page_content)) def load_json_or_infer_schema(file_path, document_path): """Load JSON schema from file or infer schema from text""" try: # Attempt to load the JSON file with open(file_path, 'r') as file: json_schema = json.load(file) return json_schema except FileNotFoundError: # If the file doesn't exist, run the specified function output = _convert_pdf_to_document(path=document_path) json_schema = infer_schema_from_text(output[0].page_content) return json_schema def ai_function(prompt=None, json_schema=None): """AI function to convert unstructured data to structured data""" # Here we define the user prompt and the structure of the output we desire # prompt = output[0].page_content prompt_msgs = [ SystemMessage( content="You are a world class algorithm converting unstructured data into structured data." ), HumanMessage(content="Convert unstructured data to structured data:"), HumanMessagePromptTemplate.from_template("{input}"), HumanMessage(content="Tips: Make sure to answer in the correct format"), ] prompt_ = ChatPromptTemplate(messages=prompt_msgs) chain = create_structured_output_chain(json_schema , prompt=prompt_, llm=llm, verbose=True) output = chain.run(input = prompt, llm=llm) yield output # Define a base directory if you have one; this could be the directory where your script is located BASE_DIR = os.path.dirname(os.path.abspath(__file__)) def higher_level_thinking(): """Higher level thinking function to calculate the sum of the price of the tickets from these documents""" docs_data = get_from_weaviate(query="Train", path=['year'], operator='Equal', valueText='2017*') str_docs_data = str(docs_data) llm_math = LLMMathChain.from_llm(llm, verbose=True) output = llm_math.run(f"Calculate the sum of the price of the tickets from these documents: {str_docs_data}") # data_format = init_buffer(data=output, version="0.0.1", date="2021-09-01") yield output result_higher_level_thinking = higher_level_thinking() def process_higher_level_thinking(result=None): data_format = init_buffer(data=result, version="0.0.1", date="2021-09-01") import json data_format=json.dumps(data_format) yield data_format document_paths = [ os.path.join(BASE_DIR, "personal_receipts", "2017", "de", "public_transport", "3ZCCCW.pdf"), os.path.join(BASE_DIR, "personal_receipts", "2017", "de", "public_transport", "4GBEC9.pdf") ] def main(raw_loading, processed_loading,document_paths): BASE_DIR = os.getcwd() # Assuming the current working directory is where the data_processing_script.py is located def format_document_paths(base_dir, path): # Split the input path and extract the elements elements = path.strip("/").split("/") # Construct the document_paths list document_paths = [os.path.join(base_dir, *elements)] return document_paths document_paths_ =[format_document_paths(BASE_DIR, path) for path in document_paths][0] print(document_paths) if raw_loading: for document in document_paths_: file_path = os.path.join(BASE_DIR, "ticket_schema.json") json_schema = load_json_or_infer_schema(file_path, document) output = _convert_pdf_to_document(path=document) find_data_in_store = get_from_weaviate(query="Train", path=['year'], operator='Equal', valueText='2017*') if find_data_in_store: output = find_data_in_store print(output[1]) else: load_to_weaviate(document) pipeline = dlt.pipeline(pipeline_name="train_ticket", destination='duckdb', dataset_name='train_ticket_data') info = pipeline.run(data=ai_function(output[0].page_content, json_schema)) print(info) elif processed_loading: pipeline_processed = dlt.pipeline(pipeline_name="train_ticket_processed", destination='duckdb', dataset_name='train_ticket_processed_data') info = pipeline_processed.run(data=higher_level_thinking()) print(info) else: print("Please specify either '--raw_loading' or '--processed_loading' option.") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Data Processing Script") parser.add_argument("--raw_loading", action="store_true", help="Load raw document data and perform AI tasks") parser.add_argument("--processed_loading", action="store_true", help="Load processed data and run higher-level thinking AI function") parser.add_argument("document_paths", nargs="*", help="Paths to the documents to process") args = parser.parse_args() main(args.raw_loading, args.processed_loading, args.document_paths) #to run: python3 level_1_pdf_vectorstore_dlt_etl.py --raw_loading "/personal_receipts/2017/de/public_transport/3ZCCCW.pdf"
[ "langchain.LLMChain", "langchain.chains.openai_functions.create_structured_output_chain", "langchain.prompts.HumanMessagePromptTemplate.from_template", "langchain.retrievers.WeaviateHybridSearchRetriever", "langchain.LLMMathChain.from_llm", "langchain.schema.HumanMessage", "langchain.schema.SystemMessage", "langchain.prompts.ChatPromptTemplate", "langchain.document_loaders.PyPDFLoader", "langchain.embeddings.OpenAIEmbeddings", "langchain.PromptTemplate" ]
[((741, 754), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (752, 754), False, 'from dotenv import load_dotenv\n'), ((848, 866), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (864, 866), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1129, 1146), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['path'], {}), '(path)\n', (1140, 1146), False, 'from langchain.document_loaders import PyPDFLoader\n'), ((2438, 2599), 'langchain.retrievers.WeaviateHybridSearchRetriever', 'WeaviateHybridSearchRetriever', ([], {'client': 'client', 'index_name': '"""PDFloader"""', 'text_key': '"""text"""', 'attributes': '[]', 'embedding': 'embeddings', 'create_schema_if_missing': '(True)'}), "(client=client, index_name='PDFloader',\n text_key='text', attributes=[], embedding=embeddings,\n create_schema_if_missing=True)\n", (2467, 2599), False, 'from langchain.retrievers import WeaviateHybridSearchRetriever\n'), ((5296, 5356), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['prompt']", 'template': 'prompt_'}), "(input_variables=['prompt'], template=prompt_)\n", (5310, 5356), False, 'from langchain import PromptTemplate, LLMChain\n'), ((5381, 5435), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'complete_query', 'verbose': '(True)'}), '(llm=llm, prompt=complete_query, verbose=True)\n', (5389, 5435), False, 'from langchain import PromptTemplate, LLMChain\n'), ((5517, 5541), 'json.dumps', 'json.dumps', (['chain_result'], {}), '(chain_result)\n', (5527, 5541), False, 'import json\n'), ((8185, 8245), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['prompt']", 'template': 'prompt_'}), "(input_variables=['prompt'], template=prompt_)\n", (8199, 8245), False, 'from langchain import PromptTemplate, LLMChain\n'), ((8270, 8324), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'complete_query', 'verbose': '(True)'}), '(llm=llm, prompt=complete_query, verbose=True)\n', (8278, 8324), False, 'from langchain import PromptTemplate, LLMChain\n'), ((9693, 9733), 'langchain.prompts.ChatPromptTemplate', 'ChatPromptTemplate', ([], {'messages': 'prompt_msgs'}), '(messages=prompt_msgs)\n', (9711, 9733), False, 'from langchain.prompts import HumanMessagePromptTemplate, ChatPromptTemplate\n'), ((9746, 9832), 'langchain.chains.openai_functions.create_structured_output_chain', 'create_structured_output_chain', (['json_schema'], {'prompt': 'prompt_', 'llm': 'llm', 'verbose': '(True)'}), '(json_schema, prompt=prompt_, llm=llm,\n verbose=True)\n', (9776, 9832), False, 'from langchain.chains.openai_functions import create_structured_output_chain\n'), ((10024, 10049), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (10039, 10049), False, 'import os\n'), ((10347, 10387), 'langchain.LLMMathChain.from_llm', 'LLMMathChain.from_llm', (['llm'], {'verbose': '(True)'}), '(llm, verbose=True)\n', (10368, 10387), False, 'from langchain import OpenAI, LLMMathChain\n'), ((10815, 10838), 'json.dumps', 'json.dumps', (['data_format'], {}), '(data_format)\n', (10825, 10838), False, 'import json\n'), ((10885, 10980), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""personal_receipts"""', '"""2017"""', '"""de"""', '"""public_transport"""', '"""3ZCCCW.pdf"""'], {}), "(BASE_DIR, 'personal_receipts', '2017', 'de',\n 'public_transport', '3ZCCCW.pdf')\n", (10897, 10980), False, 'import os\n'), ((10982, 11077), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""personal_receipts"""', '"""2017"""', '"""de"""', '"""public_transport"""', '"""4GBEC9.pdf"""'], {}), "(BASE_DIR, 'personal_receipts', '2017', 'de',\n 'public_transport', '4GBEC9.pdf')\n", (10994, 11077), False, 'import os\n'), ((11151, 11162), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11160, 11162), False, 'import os\n'), ((12901, 12962), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Data Processing Script"""'}), "(description='Data Processing Script')\n", (12924, 12962), False, 'import argparse\n'), ((1273, 1283), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1277, 1283), False, 'from pathlib import Path\n'), ((4976, 5008), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (4990, 5008), False, 'import os\n'), ((9321, 9438), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a world class algorithm converting unstructured data into structured data."""'}), "(content=\n 'You are a world class algorithm converting unstructured data into structured data.'\n )\n", (9334, 9438), False, 'from langchain.schema import Document, SystemMessage, HumanMessage\n'), ((9460, 9529), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""Convert unstructured data to structured data:"""'}), "(content='Convert unstructured data to structured data:')\n", (9472, 9529), False, 'from langchain.schema import Document, SystemMessage, HumanMessage\n'), ((9539, 9590), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{input}"""'], {}), "('{input}')\n", (9579, 9590), False, 'from langchain.prompts import HumanMessagePromptTemplate, ChatPromptTemplate\n'), ((9600, 9671), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""Tips: Make sure to answer in the correct format"""'}), "(content='Tips: Make sure to answer in the correct format')\n", (9612, 9671), False, 'from langchain.schema import Document, SystemMessage, HumanMessage\n'), ((2143, 2177), 'os.environ.get', 'os.environ.get', (['"""WEAVIATE_API_KEY"""'], {}), "('WEAVIATE_API_KEY')\n", (2157, 2177), False, 'import os\n'), ((4324, 4358), 'os.environ.get', 'os.environ.get', (['"""WEAVIATE_API_KEY"""'], {}), "('WEAVIATE_API_KEY')\n", (4338, 4358), False, 'import os\n'), ((8758, 8773), 'json.load', 'json.load', (['file'], {}), '(file)\n', (8767, 8773), False, 'import json\n'), ((11476, 11509), 'os.path.join', 'os.path.join', (['base_dir', '*elements'], {}), '(base_dir, *elements)\n', (11488, 11509), False, 'import os\n'), ((11746, 11790), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""ticket_schema.json"""'], {}), "(BASE_DIR, 'ticket_schema.json')\n", (11758, 11790), False, 'import os\n'), ((12241, 12343), 'dlt.pipeline', 'dlt.pipeline', ([], {'pipeline_name': '"""train_ticket"""', 'destination': '"""duckdb"""', 'dataset_name': '"""train_ticket_data"""'}), "(pipeline_name='train_ticket', destination='duckdb',\n dataset_name='train_ticket_data')\n", (12253, 12343), False, 'import dlt\n'), ((12509, 12631), 'dlt.pipeline', 'dlt.pipeline', ([], {'pipeline_name': '"""train_ticket_processed"""', 'destination': '"""duckdb"""', 'dataset_name': '"""train_ticket_processed_data"""'}), "(pipeline_name='train_ticket_processed', destination='duckdb',\n dataset_name='train_ticket_processed_data')\n", (12521, 12631), False, 'import dlt\n'), ((1483, 1527), 'deep_translator.GoogleTranslator', 'GoogleTranslator', ([], {'source': '"""auto"""', 'target': '"""en"""'}), "(source='auto', target='en')\n", (1499, 1527), False, 'from deep_translator import GoogleTranslator\n'), ((2373, 2405), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2387, 2405), False, 'import os\n'), ((4554, 4586), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (4568, 4586), False, 'import os\n'), ((1817, 1829), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1827, 1829), False, 'import uuid\n')]
import logging from time import sleep from langchain.llms import OpenAI from scrapy import Request, Spider from selenium import webdriver from selenium.webdriver.common.keys import Keys from conf import ( CONNECTION_REQUEST_LLM_PROMPT, DEFAULT_CONNECTION_MESSAGE, MAX_PROFILES_TO_CONNECT, MAX_PROFILES_TO_SCRAPE, OPENAI_API_KEY, ROLES_KEYWORDS, SELECTIVE_SCRAPING, SEND_CONNECTION_REQUESTS, ) from linkedin.integrations.linkedin_api import extract_profile_from_url from linkedin.integrations.selenium import build_driver, get_by_xpath_or_none from linkedin.items import LinkedinUser from linkedin.middlewares.selenium import SeleniumSpiderMixin logger = logging.getLogger(__name__) SLEEP_TIME_BETWEEN_CLICKS = 1.5 roles_keywords_lowercase = [role.lower() for role in ROLES_KEYWORDS] def remove_non_bmp_characters(text): return "".join(c for c in text if 0x0000 <= ord(c) <= 0xFFFF) def remove_primary_language(text): lines = text.split("\n") filtered_lines = [line for line in lines if "primary language" not in line.lower()] return "\n".join(filtered_lines) def is_your_network_is_growing_present(driver): got_it_button = get_by_xpath_or_none( driver, '//button[@aria-label="Got it"]', wait_timeout=0.5, ) return got_it_button is not None def is_email_verifier_present(driver): email_verifier = get_by_xpath_or_none( driver, "//label[@for='email']", wait_timeout=0.5, ) return email_verifier is not None def send_connection_request(driver, message): sleep(SLEEP_TIME_BETWEEN_CLICKS) # Click the "Add a note" button add_note_button = get_by_xpath_or_none( driver, "//button[contains(@aria-label, 'note')]", ) click(driver, add_note_button) if add_note_button else logger.warning( "Add note button unreachable" ) sleep(SLEEP_TIME_BETWEEN_CLICKS) # Write the message in the textarea message_textarea = get_by_xpath_or_none( driver, "//textarea[@name='message' and @id='custom-message']", ) message_textarea.send_keys(message[:300]) if message_textarea else logger.warning( "Textarea unreachable" ) sleep(SLEEP_TIME_BETWEEN_CLICKS) # Click the "Send" button send_button = get_by_xpath_or_none( driver, "//button[@aria-label='Send now']", ) click(driver, send_button) if send_button else logger.warning( "Send button unreachable" ) sleep(SLEEP_TIME_BETWEEN_CLICKS) return True def skip_connection_request(connect_button): return not (connect_button and SEND_CONNECTION_REQUESTS) def contains_keywords(user_profile): headline = user_profile["headline"].lower() return any(role in headline for role in roles_keywords_lowercase) def skip_profile(user_profile): return SELECTIVE_SCRAPING and not contains_keywords(user_profile) def generate_connection_message(llm: OpenAI, user_profile): from langchain import PromptTemplate prompt_template = PromptTemplate.from_template(CONNECTION_REQUEST_LLM_PROMPT) prompt = prompt_template.format(profile=user_profile) logger.debug(f"Generate message with prompt:\n{prompt}:") msg = llm.predict(prompt).strip() msg = remove_primary_language(msg).strip() msg = remove_non_bmp_characters(msg).strip() logger.info(f"Generated Icebreaker:\n{msg}") return msg def extract_connect_button(user_container): connect_button = get_by_xpath_or_none( user_container, ".//button[contains(@aria-label, 'connect')]/span", wait_timeout=5, ) return ( connect_button if connect_button else logger.debug("Connect button not found") ) def increment_index_at_end_url(response): # incrementing the index at the end of the url url = response.request.url next_url_split = url.split("=") index = int(next_url_split[-1]) next_url = "=".join(next_url_split[:-1]) + "=" + str(index + 1) return index, next_url def extract_user_url(user_container): # Use this XPath to select the <a> element link_elem = get_by_xpath_or_none( user_container, ".//a[contains(@class, 'app-aware-link') and contains(@href, '/in/')]", ) if not link_elem: logger.warning("Can't extract user URL") return None user_url = link_elem.get_attribute("href") logger.debug(f"Extracted user URL: {user_url}") return user_url def click(driver, element): driver.execute_script("arguments[0].scrollIntoView();", element) driver.execute_script("arguments[0].click();", element) def press_exit(driver): webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform() class SearchSpider(Spider, SeleniumSpiderMixin): """ Abstract class for generic search on linkedin. """ allowed_domains = ("linkedin.com",) def __init__(self, start_url, driver=None, name=None, *args, **kwargs): super().__init__(name=name, *args, **kwargs) self.start_url = start_url self.driver = driver or build_driver() self.user_profile = None self.profile_counter = 0 self.connections_sent_counter = 0 self.llm = ( OpenAI( max_tokens=90, model_name="text-davinci-003", openai_api_key=OPENAI_API_KEY, ) if SEND_CONNECTION_REQUESTS else None ) def wait_page_completion(self, driver): """ Abstract function, used to customize how the specific spider must wait for a search page completion. """ get_by_xpath_or_none(driver, "//*[@id='global-nav']/div", wait_timeout=5) def parse_search_list(self, response): continue_scrape = True driver = self.get_driver_from_response(response) if self.check_if_no_results_found(driver): logger.warning("No results found. Stopping crawl.") return for user_container in self.iterate_containers(driver): if is_your_network_is_growing_present(driver): press_exit(driver) user_profile_url = extract_user_url(user_container) if user_profile_url is None: continue logger.debug(f"Found user URL:{user_profile_url}") self.user_profile = extract_profile_from_url( user_profile_url, driver.get_cookies() ) if self.should_stop(response): continue_scrape = False break connect_button = extract_connect_button(user_container) if skip_profile(self.user_profile): logger.info(f"Skipped profile: {user_profile_url}") else: message = ( generate_connection_message(self.llm, self.user_profile) if OPENAI_API_KEY else DEFAULT_CONNECTION_MESSAGE ) self.user_profile["connection_msg"] = ( message if OPENAI_API_KEY else None ) if skip_connection_request(connect_button): logger.info(f"Skipped connection request: {user_profile_url}") else: click(driver, connect_button) if is_email_verifier_present(driver): press_exit(driver) else: conn_sent = send_connection_request(driver, message=message) logger.info( f"Connection request sent to {user_profile_url}\n{message}" ) if conn_sent else None self.connections_sent_counter += 1 yield LinkedinUser(linkedinUrl=user_profile_url, **self.user_profile) self.profile_counter += 1 if continue_scrape: next_url = self.get_next_url(response) yield self.create_next_request(next_url, response) def get_driver_from_response(self, response): return response.meta.pop("driver") def check_if_no_results_found(self, driver): no_result_found_xpath = ( "//div[contains(@class, 'search-reusable-search-no-results')]" ) return ( get_by_xpath_or_none( driver=driver, xpath=no_result_found_xpath, wait_timeout=3 ) is not None ) def get_next_url(self, response): index, next_url = increment_index_at_end_url(response) return next_url def create_next_request(self, next_url, response): return Request( url=next_url, priority=-1, callback=self.parse_search_list, meta=response.meta, ) def iterate_containers(self, driver): for i in range(1, 11): container_xpath = f"//li[contains(@class, 'result-container')][{i}]" container_elem = get_by_xpath_or_none( driver, container_xpath, wait_timeout=2 ) if container_elem: logger.debug(f"Loading {i}th user") driver.execute_script("arguments[0].scrollIntoView();", container_elem) self.sleep() yield container_elem def should_stop(self, response): max_num_profiles = self.profile_counter >= MAX_PROFILES_TO_SCRAPE if max_num_profiles: logger.info( "Stopping Reached maximum number of profiles to scrape. Stopping crawl." ) max_num_connections = self.connections_sent_counter >= MAX_PROFILES_TO_CONNECT if max_num_connections: logger.info( "Stopping Reached maximum number of profiles to connect. Stopping crawl." ) return max_num_profiles
[ "langchain.llms.OpenAI", "langchain.PromptTemplate.from_template" ]
[((689, 716), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (706, 716), False, 'import logging\n'), ((1186, 1271), 'linkedin.integrations.selenium.get_by_xpath_or_none', 'get_by_xpath_or_none', (['driver', '"""//button[@aria-label="Got it"]"""'], {'wait_timeout': '(0.5)'}), '(driver, \'//button[@aria-label="Got it"]\', wait_timeout=0.5\n )\n', (1206, 1271), False, 'from linkedin.integrations.selenium import build_driver, get_by_xpath_or_none\n'), ((1397, 1468), 'linkedin.integrations.selenium.get_by_xpath_or_none', 'get_by_xpath_or_none', (['driver', '"""//label[@for=\'email\']"""'], {'wait_timeout': '(0.5)'}), '(driver, "//label[@for=\'email\']", wait_timeout=0.5)\n', (1417, 1468), False, 'from linkedin.integrations.selenium import build_driver, get_by_xpath_or_none\n'), ((1590, 1622), 'time.sleep', 'sleep', (['SLEEP_TIME_BETWEEN_CLICKS'], {}), '(SLEEP_TIME_BETWEEN_CLICKS)\n', (1595, 1622), False, 'from time import sleep\n'), ((1682, 1753), 'linkedin.integrations.selenium.get_by_xpath_or_none', 'get_by_xpath_or_none', (['driver', '"""//button[contains(@aria-label, \'note\')]"""'], {}), '(driver, "//button[contains(@aria-label, \'note\')]")\n', (1702, 1753), False, 'from linkedin.integrations.selenium import build_driver, get_by_xpath_or_none\n'), ((1900, 1932), 'time.sleep', 'sleep', (['SLEEP_TIME_BETWEEN_CLICKS'], {}), '(SLEEP_TIME_BETWEEN_CLICKS)\n', (1905, 1932), False, 'from time import sleep\n'), ((1997, 2085), 'linkedin.integrations.selenium.get_by_xpath_or_none', 'get_by_xpath_or_none', (['driver', '"""//textarea[@name=\'message\' and @id=\'custom-message\']"""'], {}), '(driver,\n "//textarea[@name=\'message\' and @id=\'custom-message\']")\n', (2017, 2085), False, 'from linkedin.integrations.selenium import build_driver, get_by_xpath_or_none\n'), ((2233, 2265), 'time.sleep', 'sleep', (['SLEEP_TIME_BETWEEN_CLICKS'], {}), '(SLEEP_TIME_BETWEEN_CLICKS)\n', (2238, 2265), False, 'from time import sleep\n'), ((2315, 2379), 'linkedin.integrations.selenium.get_by_xpath_or_none', 'get_by_xpath_or_none', (['driver', '"""//button[@aria-label=\'Send now\']"""'], {}), '(driver, "//button[@aria-label=\'Send now\']")\n', (2335, 2379), False, 'from linkedin.integrations.selenium import build_driver, get_by_xpath_or_none\n'), ((2514, 2546), 'time.sleep', 'sleep', (['SLEEP_TIME_BETWEEN_CLICKS'], {}), '(SLEEP_TIME_BETWEEN_CLICKS)\n', (2519, 2546), False, 'from time import sleep\n'), ((3058, 3117), 'langchain.PromptTemplate.from_template', 'PromptTemplate.from_template', (['CONNECTION_REQUEST_LLM_PROMPT'], {}), '(CONNECTION_REQUEST_LLM_PROMPT)\n', (3086, 3117), False, 'from langchain import PromptTemplate\n'), ((3504, 3612), 'linkedin.integrations.selenium.get_by_xpath_or_none', 'get_by_xpath_or_none', (['user_container', '""".//button[contains(@aria-label, \'connect\')]/span"""'], {'wait_timeout': '(5)'}), '(user_container,\n ".//button[contains(@aria-label, \'connect\')]/span", wait_timeout=5)\n', (3524, 3612), False, 'from linkedin.integrations.selenium import build_driver, get_by_xpath_or_none\n'), ((4142, 4254), 'linkedin.integrations.selenium.get_by_xpath_or_none', 'get_by_xpath_or_none', (['user_container', '""".//a[contains(@class, \'app-aware-link\') and contains(@href, \'/in/\')]"""'], {}), '(user_container,\n ".//a[contains(@class, \'app-aware-link\') and contains(@href, \'/in/\')]")\n', (4162, 4254), False, 'from linkedin.integrations.selenium import build_driver, get_by_xpath_or_none\n'), ((5656, 5729), 'linkedin.integrations.selenium.get_by_xpath_or_none', 'get_by_xpath_or_none', (['driver', '"""//*[@id=\'global-nav\']/div"""'], {'wait_timeout': '(5)'}), '(driver, "//*[@id=\'global-nav\']/div", wait_timeout=5)\n', (5676, 5729), False, 'from linkedin.integrations.selenium import build_driver, get_by_xpath_or_none\n'), ((8691, 8783), 'scrapy.Request', 'Request', ([], {'url': 'next_url', 'priority': '(-1)', 'callback': 'self.parse_search_list', 'meta': 'response.meta'}), '(url=next_url, priority=-1, callback=self.parse_search_list, meta=\n response.meta)\n', (8698, 8783), False, 'from scrapy import Request, Spider\n'), ((5095, 5109), 'linkedin.integrations.selenium.build_driver', 'build_driver', ([], {}), '()\n', (5107, 5109), False, 'from linkedin.integrations.selenium import build_driver, get_by_xpath_or_none\n'), ((5251, 5339), 'langchain.llms.OpenAI', 'OpenAI', ([], {'max_tokens': '(90)', 'model_name': '"""text-davinci-003"""', 'openai_api_key': 'OPENAI_API_KEY'}), "(max_tokens=90, model_name='text-davinci-003', openai_api_key=\n OPENAI_API_KEY)\n", (5257, 5339), False, 'from langchain.llms import OpenAI\n'), ((8349, 8434), 'linkedin.integrations.selenium.get_by_xpath_or_none', 'get_by_xpath_or_none', ([], {'driver': 'driver', 'xpath': 'no_result_found_xpath', 'wait_timeout': '(3)'}), '(driver=driver, xpath=no_result_found_xpath, wait_timeout=3\n )\n', (8369, 8434), False, 'from linkedin.integrations.selenium import build_driver, get_by_xpath_or_none\n'), ((9022, 9083), 'linkedin.integrations.selenium.get_by_xpath_or_none', 'get_by_xpath_or_none', (['driver', 'container_xpath'], {'wait_timeout': '(2)'}), '(driver, container_xpath, wait_timeout=2)\n', (9042, 9083), False, 'from linkedin.integrations.selenium import build_driver, get_by_xpath_or_none\n'), ((4675, 4705), 'selenium.webdriver.ActionChains', 'webdriver.ActionChains', (['driver'], {}), '(driver)\n', (4697, 4705), False, 'from selenium import webdriver\n'), ((7808, 7871), 'linkedin.items.LinkedinUser', 'LinkedinUser', ([], {'linkedinUrl': 'user_profile_url'}), '(linkedinUrl=user_profile_url, **self.user_profile)\n', (7820, 7871), False, 'from linkedin.items import LinkedinUser\n')]
import streamlit as st import os from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient from PyPDF2 import PdfReader # Import #import textwrap import openai from langchain.llms import AzureOpenAI, OpenAI from langchain.embeddings import OpenAIEmbeddings from llama_index.vector_stores import RedisVectorStore from llama_index import LangchainEmbedding from llama_index import ( GPTVectorStoreIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper, ServiceContext, StorageContext ) import sys import logging logging.basicConfig(stream=sys.stdout, level=logging.INFO) # logging.DEBUG for more verbose output logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) REDIS_HOST = os.getenv("REDIS_HOST", "localhost") REDIS_PORT = os.getenv("REDIS_PORT", "6379") REDIS_PASSWORD = os.getenv("REDIS_PASSWORD", "") OPENAI_API_TYPE = os.getenv("OPENAI_API_TYPE", "") OPENAI_COMPLETIONS_ENGINE = os.getenv("OPENAI_COMPLETIONS_ENGINE", "text-davinci-003") OPENAI_EMBEDDINGS_ENGINE = os.getenv("OPENAI_EMBEDDINGS_ENGINE", "text-embedding-ada-002") STORAGE_CONNECTION_STRING=os.getenv("STORAGE_CONNECTION_STRING", "") CONTAINER_NAME=os.getenv("CONTAINER_NAME", "data") def get_embeddings(): if OPENAI_API_TYPE=="azure": #currently Azure OpenAI embeddings require request for service limit increase to be useful #using build-in HuggingFace instead #from langchain.embeddings import HuggingFaceEmbeddings #embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings(deployment=OPENAI_EMBEDDINGS_ENGINE, chunk_size=1 ) else: from langchain.embeddings import OpenAIEmbeddings # Init OpenAI Embeddings embeddings = OpenAIEmbeddings() return embeddings def get_llm(): if OPENAI_API_TYPE=="azure": openai.api_type = "azure" openai.api_base = os.getenv("OPENAI_API_BASE") openai.api_version = os.getenv("OPENAI_API_VERSION") openai.api_key = os.getenv("OPENAI_API_KEY") text_model_deployment = OPENAI_COMPLETIONS_ENGINE from langchain.llms import AzureOpenAI llm = AzureOpenAI(deployment_name=text_model_deployment, model_kwargs={ "api_key": openai.api_key, "api_base": openai.api_base, "api_type": openai.api_type, "api_version": openai.api_version, }) #llm_predictor = LLMPredictor(llm=llm) else: from langchain.llms import OpenAI llm=OpenAI() return llm @st.cache_resource def get_query_engine(): blob_service_client = BlobServiceClient.from_connection_string(STORAGE_CONNECTION_STRING) container_client = blob_service_client.get_container_client(container=CONTAINER_NAME) download_file_path = "/tmp/docs" isExist = os.path.exists(download_file_path) if not isExist: os.makedirs(download_file_path) # List the blobs in the container blob_list = container_client.list_blobs() for blob in blob_list: print("\t" + blob.name) if not os.path.exists( download_file_path+ "/" + blob.name): print("\nDownloading blob to \n\t" + download_file_path+ "/" + blob.name) with open(file=download_file_path + "/" + blob.name, mode="wb") as download_file: download_file.write(container_client.download_blob(blob.name).readall()) else: print("\nSkipping \n\t" + download_file_path+ "/" + blob.name) # load documents documents = SimpleDirectoryReader(download_file_path).load_data() print('Document ID:', documents[0].doc_id) from llama_index.storage.storage_context import StorageContext vector_store = RedisVectorStore( index_name="chevy_docs", index_prefix="llama", redis_url="rediss://default:{}@{}:{}".format(REDIS_PASSWORD,REDIS_HOST,REDIS_PORT), overwrite=True ) llm_predictor = LLMPredictor(llm=get_llm()) llm_embedding = LangchainEmbedding(get_embeddings()) service_context = ServiceContext.from_defaults( llm_predictor=llm_predictor, embed_model=llm_embedding, ) storage_context = StorageContext.from_defaults( vector_store=vector_store ) index = GPTVectorStoreIndex.from_documents( documents, storage_context=storage_context, service_context=service_context ) return index.as_query_engine() file = open("assets/app-info.md", "r") st.markdown(file.read()) query_engine = get_query_engine() user_query = st.text_input("Query:", 'What types of variants are available for the Chevrolet Colorado?') try: response = query_engine.query(user_query) except Exception as e: response = "Error: %s" % str(e) st.markdown(str(response)) #print(str(response))
[ "langchain.embeddings.OpenAIEmbeddings", "langchain.llms.OpenAI", "langchain.llms.AzureOpenAI" ]
[((558, 616), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (577, 616), False, 'import logging\n'), ((744, 780), 'os.getenv', 'os.getenv', (['"""REDIS_HOST"""', '"""localhost"""'], {}), "('REDIS_HOST', 'localhost')\n", (753, 780), False, 'import os\n'), ((794, 825), 'os.getenv', 'os.getenv', (['"""REDIS_PORT"""', '"""6379"""'], {}), "('REDIS_PORT', '6379')\n", (803, 825), False, 'import os\n'), ((843, 874), 'os.getenv', 'os.getenv', (['"""REDIS_PASSWORD"""', '""""""'], {}), "('REDIS_PASSWORD', '')\n", (852, 874), False, 'import os\n'), ((894, 926), 'os.getenv', 'os.getenv', (['"""OPENAI_API_TYPE"""', '""""""'], {}), "('OPENAI_API_TYPE', '')\n", (903, 926), False, 'import os\n'), ((955, 1013), 'os.getenv', 'os.getenv', (['"""OPENAI_COMPLETIONS_ENGINE"""', '"""text-davinci-003"""'], {}), "('OPENAI_COMPLETIONS_ENGINE', 'text-davinci-003')\n", (964, 1013), False, 'import os\n'), ((1041, 1104), 'os.getenv', 'os.getenv', (['"""OPENAI_EMBEDDINGS_ENGINE"""', '"""text-embedding-ada-002"""'], {}), "('OPENAI_EMBEDDINGS_ENGINE', 'text-embedding-ada-002')\n", (1050, 1104), False, 'import os\n'), ((1133, 1175), 'os.getenv', 'os.getenv', (['"""STORAGE_CONNECTION_STRING"""', '""""""'], {}), "('STORAGE_CONNECTION_STRING', '')\n", (1142, 1175), False, 'import os\n'), ((1191, 1226), 'os.getenv', 'os.getenv', (['"""CONTAINER_NAME"""', '"""data"""'], {}), "('CONTAINER_NAME', 'data')\n", (1200, 1226), False, 'import os\n'), ((4640, 4735), 'streamlit.text_input', 'st.text_input', (['"""Query:"""', '"""What types of variants are available for the Chevrolet Colorado?"""'], {}), "('Query:',\n 'What types of variants are available for the Chevrolet Colorado?')\n", (4653, 4735), True, 'import streamlit as st\n'), ((688, 728), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (709, 728), False, 'import logging\n'), ((2701, 2768), 'azure.storage.blob.BlobServiceClient.from_connection_string', 'BlobServiceClient.from_connection_string', (['STORAGE_CONNECTION_STRING'], {}), '(STORAGE_CONNECTION_STRING)\n', (2741, 2768), False, 'from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient\n'), ((2910, 2944), 'os.path.exists', 'os.path.exists', (['download_file_path'], {}), '(download_file_path)\n', (2924, 2944), False, 'import os\n'), ((4135, 4224), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'llm_embedding'}), '(llm_predictor=llm_predictor, embed_model=\n llm_embedding)\n', (4163, 4224), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n'), ((4265, 4320), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (4293, 4320), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((4347, 4463), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(documents, storage_context=\n storage_context, service_context=service_context)\n', (4381, 4463), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n'), ((657, 676), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (674, 676), False, 'import logging\n'), ((1645, 1712), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'deployment': 'OPENAI_EMBEDDINGS_ENGINE', 'chunk_size': '(1)'}), '(deployment=OPENAI_EMBEDDINGS_ENGINE, chunk_size=1)\n', (1661, 1712), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1836, 1854), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1852, 1854), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1987, 2015), 'os.getenv', 'os.getenv', (['"""OPENAI_API_BASE"""'], {}), "('OPENAI_API_BASE')\n", (1996, 2015), False, 'import os\n'), ((2045, 2076), 'os.getenv', 'os.getenv', (['"""OPENAI_API_VERSION"""'], {}), "('OPENAI_API_VERSION')\n", (2054, 2076), False, 'import os\n'), ((2102, 2129), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2111, 2129), False, 'import os\n'), ((2249, 2443), 'langchain.llms.AzureOpenAI', 'AzureOpenAI', ([], {'deployment_name': 'text_model_deployment', 'model_kwargs': "{'api_key': openai.api_key, 'api_base': openai.api_base, 'api_type': openai\n .api_type, 'api_version': openai.api_version}"}), "(deployment_name=text_model_deployment, model_kwargs={'api_key':\n openai.api_key, 'api_base': openai.api_base, 'api_type': openai.\n api_type, 'api_version': openai.api_version})\n", (2260, 2443), False, 'from langchain.llms import AzureOpenAI\n'), ((2605, 2613), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (2611, 2613), False, 'from langchain.llms import OpenAI\n'), ((2973, 3004), 'os.makedirs', 'os.makedirs', (['download_file_path'], {}), '(download_file_path)\n', (2984, 3004), False, 'import os\n'), ((3164, 3216), 'os.path.exists', 'os.path.exists', (["(download_file_path + '/' + blob.name)"], {}), "(download_file_path + '/' + blob.name)\n", (3178, 3216), False, 'import os\n'), ((3615, 3656), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['download_file_path'], {}), '(download_file_path)\n', (3636, 3656), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n')]
from langchain.chat_models import ChatOpenAI from langchain.schema import HumanMessage, SystemMessage from whenx.models.team import Team from whenx.models.scout import Scout from whenx.models.sentinel import Sentinel from whenx.models.soldier import Soldier import re from whenx.database import db class Captain: def __init__(self, mission: str): self.mission = mission def run(self, team): prompts = self.generate_prompts() team = self.create_team(prompts, team) return team def initialize_team(self, prompts, team): db.add(team) db.commit() scout = Scout(instruction=prompts["scout"], teamId=team.id) sentinel = Sentinel(instruction=prompts["sentinel"], teamId=team.id) soldier = Soldier(instruction=prompts["soldier"], teamId=team.id) db.add(scout) db.add(sentinel) db.add(soldier) db.commit() return team def generate_prompts(self): system = """You are the captain of a team of scouts, sentinels, and soldiers. You generate instructions for your team to follow based on a mission. Scouts are responsible for gathering information from the internet. Sentinels are responsible for monitoring the observations of scouts for changes. Soldiers are responsible for writing reports. Instruction examples: Mission: When apple relseases a new product. Scout: What is the new apple product? return the answer. Sentinel: Was a new product released? Reply with (Yes/No) and the name of the product. Soldier: Write a report about it. """ prompt = f""" Complete the instructions for the scouts, sentinels, and soldiers. One per line. Mission:{self.mission} """ model = ChatOpenAI(model="gpt-4", temperature=0) messages = [ SystemMessage( content=system ), HumanMessage(content=prompt), ] response = model(messages) response = self.parse_response(response.content) return response def parse_response(self, response): lines = re.split(r'\n+', response.strip()) # Extract the relevant information from the lines prompts = {} prompts["scout"] = lines[0].split(": ")[1] prompts["sentinel"] = lines[1].split(": ")[1] prompts["soldier"] = lines[2].split(": ")[1] return prompts
[ "langchain.schema.SystemMessage", "langchain.schema.HumanMessage", "langchain.chat_models.ChatOpenAI" ]
[((575, 587), 'whenx.database.db.add', 'db.add', (['team'], {}), '(team)\n', (581, 587), False, 'from whenx.database import db\n'), ((596, 607), 'whenx.database.db.commit', 'db.commit', ([], {}), '()\n', (605, 607), False, 'from whenx.database import db\n'), ((624, 675), 'whenx.models.scout.Scout', 'Scout', ([], {'instruction': "prompts['scout']", 'teamId': 'team.id'}), "(instruction=prompts['scout'], teamId=team.id)\n", (629, 675), False, 'from whenx.models.scout import Scout\n'), ((695, 752), 'whenx.models.sentinel.Sentinel', 'Sentinel', ([], {'instruction': "prompts['sentinel']", 'teamId': 'team.id'}), "(instruction=prompts['sentinel'], teamId=team.id)\n", (703, 752), False, 'from whenx.models.sentinel import Sentinel\n'), ((771, 826), 'whenx.models.soldier.Soldier', 'Soldier', ([], {'instruction': "prompts['soldier']", 'teamId': 'team.id'}), "(instruction=prompts['soldier'], teamId=team.id)\n", (778, 826), False, 'from whenx.models.soldier import Soldier\n'), ((835, 848), 'whenx.database.db.add', 'db.add', (['scout'], {}), '(scout)\n', (841, 848), False, 'from whenx.database import db\n'), ((857, 873), 'whenx.database.db.add', 'db.add', (['sentinel'], {}), '(sentinel)\n', (863, 873), False, 'from whenx.database import db\n'), ((882, 897), 'whenx.database.db.add', 'db.add', (['soldier'], {}), '(soldier)\n', (888, 897), False, 'from whenx.database import db\n'), ((906, 917), 'whenx.database.db.commit', 'db.commit', ([], {}), '()\n', (915, 917), False, 'from whenx.database import db\n'), ((1719, 1759), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-4"""', 'temperature': '(0)'}), "(model='gpt-4', temperature=0)\n", (1729, 1759), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1793, 1822), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system'}), '(content=system)\n', (1806, 1822), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((1866, 1894), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (1878, 1894), False, 'from langchain.schema import HumanMessage, SystemMessage\n')]
import json import re from langchain.chains import RetrievalQA from utils.functions import find_nth, remove_extra_heading, add_json_characters, Timeout from langchain import LLMChain from langchain.chat_models import ChatOpenAI def section_schemas(heading, keyword, format_instructions, retriever, prompt): chat = ChatOpenAI( temperature=0, model_name='gpt-3.5-turbo-16k-0613' ) llm = LLMChain(llm=chat, prompt=prompt) if "Introduction" in heading: return 'none' elif "introduction" in heading: return 'none' try: with Timeout(60): qa = RetrievalQA.from_chain_type(llm=chat, chain_type="stuff", retriever=retriever) print("<----- closest") print(qa.run(heading)) closest = qa.run(heading) print("<----- closest end") except Timeout.Timeout: print("<---- excepting out of qa") return "nothing" if len(closest)<350: return 'none' temp = """ Don't repeat anything you've already said. Output in html format with subheadings. Do not write anything about Artificial Intelligence. If anything is about artificial intelligence remove it. Make sure to write as a blog writer NOT as the manufacturer. Don't start the intro with 'Yes'. Remember to have the closing quotation marks and closing curly bracket for the JSON. Remember - DO NOT add any titles, subtitles or intro before the blog section. Only add in subheadings (h3) where applicable to break up the text. Only add h3 heading every 150 to 250 words. Put the subheadings in html 'h3' tags and the content in 'p' tags. Use ordered and unordered lists where applicable. Write 8, 60 word paragraphs for my blog section with subheadings for my article about "{keyword}". Use the context below to create the blog section. There should be at least 6-9 paragraph 60 word paragraphs. Use this context (real article summaries) to create the intro. Context: {context} Format the output as JSON with the following keys: blog_section {format_instructions} Final Checks: Don't repeat anything you've already said. Are there 1 or 2 subheadings? If not, add them. Do not say 'Sure!' Are any of the paragraphs longer than 80 words? If so, break them up into smaller paragraphs. Is the entire thing under 350 words? If so, lengthen it. Is there a closing quotation mark for the JSON content? If not, add one. Make sure to include the opening and closing brackets of the JSON. Section: """ messages = temp.format( format_instructions=format_instructions, keyword=keyword, heading=heading, context=closest, ) output_dict = llm.run(input=messages) print("<-- output dict start for "+heading) print(output_dict) print(heading+r"\n\n" in output_dict) print("<-- output dict end") output_dict = output_dict.replace("\\'","'") output_dict = output_dict.replace('\\"',"'") output_dict = remove_extra_heading(output_dict, heading) result = re.findall(r'{([^{]*?)}', str(output_dict)) if len(result)>0: try: t_res = result[0].strip().replace('“',"'") t_res = t_res.replace('"',"'") nth=find_nth(t_res, "'",3) nth_text = t_res[nth+1:] res_2 = add_json_characters(nth_text) except: print("res2 second") pass else: stripped_output = output_dict.replace("{","") stripped_output = stripped_output.strip() if stripped_output.startswith('"blog_section":'): t_res = stripped_output.replace('"',"'") t_res = t_res.replace('“',"'") nth=find_nth(t_res, "'",3) nth_text = t_res[nth+1:] res_2 = add_json_characters(nth_text) else: test_res = '{"blog_section": "'+stripped_output.replace('"',"'") period_index = test_res.rfind(".") + 1 res_2 = test_res[:period_index]+'</p>"}' if "I apologize" not in str(res_2): print("is not in string") try: new_response = json.loads(str(res_2), strict=False) new_response = new_response['blog_section'] except: new_response = res_2 else: new_response = res_2 print("<---section start") print("section for "+heading) print(new_response) print("<---section end") return new_response
[ "langchain.LLMChain", "langchain.chains.RetrievalQA.from_chain_type", "langchain.chat_models.ChatOpenAI" ]
[((325, 387), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo-16k-0613"""'}), "(temperature=0, model_name='gpt-3.5-turbo-16k-0613')\n", (335, 387), False, 'from langchain.chat_models import ChatOpenAI\n'), ((433, 466), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'chat', 'prompt': 'prompt'}), '(llm=chat, prompt=prompt)\n', (441, 466), False, 'from langchain import LLMChain\n'), ((3113, 3155), 'utils.functions.remove_extra_heading', 'remove_extra_heading', (['output_dict', 'heading'], {}), '(output_dict, heading)\n', (3133, 3155), False, 'from utils.functions import find_nth, remove_extra_heading, add_json_characters, Timeout\n'), ((618, 629), 'utils.functions.Timeout', 'Timeout', (['(60)'], {}), '(60)\n', (625, 629), False, 'from utils.functions import find_nth, remove_extra_heading, add_json_characters, Timeout\n'), ((648, 726), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'chat', 'chain_type': '"""stuff"""', 'retriever': 'retriever'}), "(llm=chat, chain_type='stuff', retriever=retriever)\n", (675, 726), False, 'from langchain.chains import RetrievalQA\n'), ((3363, 3386), 'utils.functions.find_nth', 'find_nth', (['t_res', '"""\'"""', '(3)'], {}), '(t_res, "\'", 3)\n', (3371, 3386), False, 'from utils.functions import find_nth, remove_extra_heading, add_json_characters, Timeout\n'), ((3443, 3472), 'utils.functions.add_json_characters', 'add_json_characters', (['nth_text'], {}), '(nth_text)\n', (3462, 3472), False, 'from utils.functions import find_nth, remove_extra_heading, add_json_characters, Timeout\n'), ((3825, 3848), 'utils.functions.find_nth', 'find_nth', (['t_res', '"""\'"""', '(3)'], {}), '(t_res, "\'", 3)\n', (3833, 3848), False, 'from utils.functions import find_nth, remove_extra_heading, add_json_characters, Timeout\n'), ((3905, 3934), 'utils.functions.add_json_characters', 'add_json_characters', (['nth_text'], {}), '(nth_text)\n', (3924, 3934), False, 'from utils.functions import find_nth, remove_extra_heading, add_json_characters, Timeout\n')]
"""Experiment with different models.""" from __future__ import annotations from typing import List, Optional, Sequence from langchain_core.language_models.llms import BaseLLM from langchain_core.prompts.prompt import PromptTemplate from langchain_core.utils.input import get_color_mapping, print_text from langchain.chains.base import Chain from langchain.chains.llm import LLMChain class ModelLaboratory: """Experiment with different models.""" def __init__(self, chains: Sequence[Chain], names: Optional[List[str]] = None): """Initialize with chains to experiment with. Args: chains: list of chains to experiment with. """ for chain in chains: if not isinstance(chain, Chain): raise ValueError( "ModelLaboratory should now be initialized with Chains. " "If you want to initialize with LLMs, use the `from_llms` method " "instead (`ModelLaboratory.from_llms(...)`)" ) if len(chain.input_keys) != 1: raise ValueError( "Currently only support chains with one input variable, " f"got {chain.input_keys}" ) if len(chain.output_keys) != 1: raise ValueError( "Currently only support chains with one output variable, " f"got {chain.output_keys}" ) if names is not None: if len(names) != len(chains): raise ValueError("Length of chains does not match length of names.") self.chains = chains chain_range = [str(i) for i in range(len(self.chains))] self.chain_colors = get_color_mapping(chain_range) self.names = names @classmethod def from_llms( cls, llms: List[BaseLLM], prompt: Optional[PromptTemplate] = None ) -> ModelLaboratory: """Initialize with LLMs to experiment with and optional prompt. Args: llms: list of LLMs to experiment with prompt: Optional prompt to use to prompt the LLMs. Defaults to None. If a prompt was provided, it should only have one input variable. """ if prompt is None: prompt = PromptTemplate(input_variables=["_input"], template="{_input}") chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms] names = [str(llm) for llm in llms] return cls(chains, names=names) def compare(self, text: str) -> None: """Compare model outputs on an input text. If a prompt was provided with starting the laboratory, then this text will be fed into the prompt. If no prompt was provided, then the input text is the entire prompt. Args: text: input text to run all models on. """ print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201 for i, chain in enumerate(self.chains): if self.names is not None: name = self.names[i] else: name = str(chain) print_text(name, end="\n") output = chain.run(text) print_text(output, color=self.chain_colors[str(i)], end="\n\n")
[ "langchain_core.utils.input.print_text", "langchain_core.utils.input.get_color_mapping", "langchain_core.prompts.prompt.PromptTemplate", "langchain.chains.llm.LLMChain" ]
[((1752, 1782), 'langchain_core.utils.input.get_color_mapping', 'get_color_mapping', (['chain_range'], {}), '(chain_range)\n', (1769, 1782), False, 'from langchain_core.utils.input import get_color_mapping, print_text\n'), ((2307, 2370), 'langchain_core.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['_input']", 'template': '"""{_input}"""'}), "(input_variables=['_input'], template='{_input}')\n", (2321, 2370), False, 'from langchain_core.prompts.prompt import PromptTemplate\n'), ((2389, 2421), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2397, 2421), False, 'from langchain.chains.llm import LLMChain\n'), ((3138, 3164), 'langchain_core.utils.input.print_text', 'print_text', (['name'], {'end': '"""\n"""'}), "(name, end='\\n')\n", (3148, 3164), False, 'from langchain_core.utils.input import get_color_mapping, print_text\n')]
from typing import Any, List, Optional, Sequence, Tuple from langchain_core._api import deprecated from langchain_core.agents import AgentAction from langchain_core.callbacks import BaseCallbackManager from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate from langchain_core.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain_core.pydantic_v1 import Field from langchain_core.tools import BaseTool from langchain.agents.agent import Agent, AgentOutputParser from langchain.agents.chat.output_parser import ChatOutputParser from langchain.agents.chat.prompt import ( FORMAT_INSTRUCTIONS, HUMAN_MESSAGE, SYSTEM_MESSAGE_PREFIX, SYSTEM_MESSAGE_SUFFIX, ) from langchain.agents.utils import validate_tools_single_input from langchain.chains.llm import LLMChain @deprecated("0.1.0", alternative="create_react_agent", removal="0.2.0") class ChatAgent(Agent): """Chat Agent.""" output_parser: AgentOutputParser = Field(default_factory=ChatOutputParser) """Output parser for the agent.""" @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" def _construct_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> str: agent_scratchpad = super()._construct_scratchpad(intermediate_steps) if not isinstance(agent_scratchpad, str): raise ValueError("agent_scratchpad should be of type string.") if agent_scratchpad: return ( f"This was your previous work " f"(but I haven't seen any of it! I only see what " f"you return as final answer):\n{agent_scratchpad}" ) else: return agent_scratchpad @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return ChatOutputParser() @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: super()._validate_tools(tools) validate_tools_single_input(class_name=cls.__name__, tools=tools) @property def _stop(self) -> List[str]: return ["Observation:"] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], system_message_prefix: str = SYSTEM_MESSAGE_PREFIX, system_message_suffix: str = SYSTEM_MESSAGE_SUFFIX, human_message: str = HUMAN_MESSAGE, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, ) -> BasePromptTemplate: tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format(tool_names=tool_names) template = "\n\n".join( [ system_message_prefix, tool_strings, format_instructions, system_message_suffix, ] ) messages = [ SystemMessagePromptTemplate.from_template(template), HumanMessagePromptTemplate.from_template(human_message), ] if input_variables is None: input_variables = ["input", "agent_scratchpad"] return ChatPromptTemplate(input_variables=input_variables, messages=messages) # type: ignore[arg-type] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, system_message_prefix: str = SYSTEM_MESSAGE_PREFIX, system_message_suffix: str = SYSTEM_MESSAGE_SUFFIX, human_message: str = HUMAN_MESSAGE, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, system_message_prefix=system_message_prefix, system_message_suffix=system_message_suffix, human_message=human_message, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser() return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) @property def _agent_type(self) -> str: raise ValueError
[ "langchain_core.prompts.chat.SystemMessagePromptTemplate.from_template", "langchain.chains.llm.LLMChain", "langchain.agents.utils.validate_tools_single_input", "langchain_core.pydantic_v1.Field", "langchain_core.prompts.chat.ChatPromptTemplate", "langchain_core.prompts.chat.HumanMessagePromptTemplate.from_template", "langchain_core._api.deprecated", "langchain.agents.chat.output_parser.ChatOutputParser" ]
[((915, 985), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_react_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_react_agent', removal='0.2.0')\n", (925, 985), False, 'from langchain_core._api import deprecated\n'), ((1072, 1111), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'ChatOutputParser'}), '(default_factory=ChatOutputParser)\n', (1077, 1111), False, 'from langchain_core.pydantic_v1 import Field\n'), ((2134, 2152), 'langchain.agents.chat.output_parser.ChatOutputParser', 'ChatOutputParser', ([], {}), '()\n', (2150, 2152), False, 'from langchain.agents.chat.output_parser import ChatOutputParser\n'), ((2283, 2348), 'langchain.agents.utils.validate_tools_single_input', 'validate_tools_single_input', ([], {'class_name': 'cls.__name__', 'tools': 'tools'}), '(class_name=cls.__name__, tools=tools)\n', (2310, 2348), False, 'from langchain.agents.utils import validate_tools_single_input\n'), ((3543, 3613), 'langchain_core.prompts.chat.ChatPromptTemplate', 'ChatPromptTemplate', ([], {'input_variables': 'input_variables', 'messages': 'messages'}), '(input_variables=input_variables, messages=messages)\n', (3561, 3613), False, 'from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((4630, 4697), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callback_manager': 'callback_manager'}), '(llm=llm, prompt=prompt, callback_manager=callback_manager)\n', (4638, 4697), False, 'from langchain.chains.llm import LLMChain\n'), ((3300, 3351), 'langchain_core.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (3341, 3351), False, 'from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((3365, 3420), 'langchain_core.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['human_message'], {}), '(human_message)\n', (3405, 3420), False, 'from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n')]
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2023 # SPDX-License-Identifier: Apache-2.0 from typing import Any, Dict, List, Optional from langchain.agents import tool from langchain.chains.base import Chain from langchain.chains import LLMChain from langchain import PromptTemplate from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) import chainlit as cl from chainlit.context import context from chainlit import run_sync from tabulate import tabulate from ..llm import get_bedrock_text, get_processed_prompt_template from .graph import GraphChain def get_tool_metadata(): return { "name": "3dview", "description": "Useful to teleport in 3D viewer to the equipment the user is interested in. \ Input to this tool should be the entityId of the equipment. \ Output is a string to confirm whether the view is found or not.", } @tool def run(input: str) -> str: """Identify the location of the object user is asking about.""" point_camera_to_entity(input) return 'Found it!' def point_camera_to_entity(entityId): run_sync(context.session.emit('view', entityId)) ENTITY_EXTRACTION_PROMPT = """ Your job is to identify the entity user is asking about based on the user question. Use the following format: Question: the input question from the user Entity: the phrase about the entity in the original question Only output the entity phrase, do not repeat the question. Here are some examples: Question: teleport me to the cookie line in alarm state Entity: the cookie line in alarm state Question: show me the freezer tunnel Entity: the freezer tunnel Question: show me the conveyer belt Entity: the conveyer belt Now begin! Question: {question} Entity: """ class EntityExtractorChain(Chain): """Chain to find the entity in the question.""" llm_chain: LLMChain @property def input_keys(self) -> List[str]: return ['question'] @property def output_keys(self) -> List[str]: return ['entity'] @classmethod def create(cls, **kwargs): llm = get_bedrock_text() prompt = PromptTemplate( template=get_processed_prompt_template(ENTITY_EXTRACTION_PROMPT), input_variables=["question"], ) llm_chain = LLMChain( llm=llm, prompt=prompt, **kwargs) return cls(llm_chain=llm_chain) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() output = self.llm_chain.run(callbacks=callbacks, **inputs) return { 'entity': output } async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() output = await self.llm_chain.arun(callbacks=callbacks, **inputs) return { 'entity': output } class ViewChain(Chain): """Chain that manipulates 3D viewer.""" entity_extractor: EntityExtractorChain entity_lookup: GraphChain @property def input_keys(self) -> List[str]: return ['question'] @property def output_keys(self) -> List[str]: return ['text', 'selected_entity'] @classmethod def create(cls, **kwargs): entity_extractor = EntityExtractorChain.create(**kwargs) entity_lookup = GraphChain.create(**kwargs) return cls(entity_extractor=entity_extractor, entity_lookup=entity_lookup, **kwargs) def pick_entity(self, entities): if entities.shape[0] > 1: headers = ['No', 'Name', 'Id'] rows = [[i + 1, row.entityName, row.entityId] for i, row in entities.items()] entity_table = tabulate(rows, headers=headers, tablefmt="pipe") run_sync(cl.Message(content="I've found these matching entities:\n\n" + entity_table).send()) res = run_sync(cl.AskUserMessage(content="Which one do you mean?").send()) if res is not None: # TODO: use a LLMChain to parse the user input idx = int(res['content']) - 1 entityId = entities.iloc[idx].entityId else: entityId = None else: entityId = entities.iloc[0].entityId return entityId async def apick_entity(self, entities): if entities.shape[0] > 1: headers = ['No', 'Name', 'Id'] rows = [[i + 1, row.entityName, row.entityId] for i, row in entities.items()] entity_table = tabulate(rows, headers=headers, tablefmt="pipe") await cl.Message(content="I've found these matching entities:\n\n" + entity_table).send() res = await cl.AskUserMessage(content="Which one do you mean?").send() if res is not None: # TODO: use a LLMChain to parse the user input idx = int(res['content']) - 1 entityId = entities.iloc[idx].entityId else: entityId = None else: entityId = entities.iloc[0].entityId return entityId def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() entity = self.entity_extractor.run(callbacks=callbacks, **inputs) df = self.entity_lookup.run( callbacks, { "question": "Find all entities matching the description: " + entity, "format_output": False }) # TODO: handle the column detection better if df.shape[0] < 1 or df.columns[0] != 'e': return { 'text': "I didn't find any result.", 'selected_entity': '' } entities = df[df.columns[0]] entityId = self.pick_entity(entities) if entityId is None: return { 'text': "I didn't find any result.", 'selected_entity': '' } point_camera_to_entity(entityId) return { 'text': f"I've pointed you to the {entityId} in the 3D Viewer.", 'selected_entity': entityId } async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() entity = await self.entity_extractor.arun(callbacks=callbacks, **inputs) df = await self.entity_lookup.arun( **{ "question": "Find all entities matching the description: " + entity, "format_output": False }) # TODO: handle the column detection better if df.shape[0] < 1 or df.columns[0] != 'e': return { 'text': "I didn't find any result.", 'selected_entity': '' } entities = df[df.columns[0]] entityId = await self.apick_entity(entities) if entityId is None: return { 'text': "I didn't find any result.", 'selected_entity': '' } point_camera_to_entity(entityId) return { 'text': f"I've pointed you to the {entityId} in the 3D Viewer.", 'selected_entity': entityId }
[ "langchain.chains.LLMChain", "langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager" ]
[((1178, 1216), 'chainlit.context.context.session.emit', 'context.session.emit', (['"""view"""', 'entityId'], {}), "('view', entityId)\n", (1198, 1216), False, 'from chainlit.context import context\n'), ((2370, 2412), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **kwargs)\n', (2378, 2412), False, 'from langchain.chains import LLMChain\n'), ((2681, 2726), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (2724, 2726), False, 'from langchain.callbacks.manager import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n'), ((3098, 3143), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (3141, 3143), False, 'from langchain.callbacks.manager import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n'), ((4150, 4198), 'tabulate.tabulate', 'tabulate', (['rows'], {'headers': 'headers', 'tablefmt': '"""pipe"""'}), "(rows, headers=headers, tablefmt='pipe')\n", (4158, 4198), False, 'from tabulate import tabulate\n'), ((5033, 5081), 'tabulate.tabulate', 'tabulate', (['rows'], {'headers': 'headers', 'tablefmt': '"""pipe"""'}), "(rows, headers=headers, tablefmt='pipe')\n", (5041, 5081), False, 'from tabulate import tabulate\n'), ((5864, 5909), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (5907, 5909), False, 'from langchain.callbacks.manager import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n'), ((7168, 7213), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (7211, 7213), False, 'from langchain.callbacks.manager import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n'), ((4233, 4309), 'chainlit.Message', 'cl.Message', ([], {'content': '("I\'ve found these matching entities:\\n\\n" + entity_table)'}), '(content="I\'ve found these matching entities:\\n\\n" + entity_table)\n', (4243, 4309), True, 'import chainlit as cl\n'), ((4358, 4409), 'chainlit.AskUserMessage', 'cl.AskUserMessage', ([], {'content': '"""Which one do you mean?"""'}), "(content='Which one do you mean?')\n", (4375, 4409), True, 'import chainlit as cl\n'), ((5113, 5189), 'chainlit.Message', 'cl.Message', ([], {'content': '("I\'ve found these matching entities:\\n\\n" + entity_table)'}), '(content="I\'ve found these matching entities:\\n\\n" + entity_table)\n', (5123, 5189), True, 'import chainlit as cl\n'), ((5234, 5285), 'chainlit.AskUserMessage', 'cl.AskUserMessage', ([], {'content': '"""Which one do you mean?"""'}), "(content='Which one do you mean?')\n", (5251, 5285), True, 'import chainlit as cl\n')]
from langchain.retrievers import AmazonKendraRetriever from langchain.chains import RetrievalQA from langchain import OpenAI from langchain.prompts import PromptTemplate from langchain import SagemakerEndpoint from langchain.llms.sagemaker_endpoint import LLMContentHandler import json import os def build_chain(): region = os.environ["AWS_REGION"] kendra_index_id = os.environ["KENDRA_INDEX_ID"] endpoint_name = os.environ["FALCON_40B_ENDPOINT"] inference_component_name = os.environ["INFERENCE_COMPONENT_NAME"] class ContentHandler(LLMContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: dict) -> bytes: input_str = json.dumps({"inputs": prompt, "parameters": model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) print(response_json) return response_json[0]["generated_text"] content_handler = ContentHandler() if 'inference_component_name' in locals(): llm=SagemakerEndpoint( endpoint_name=endpoint_name, region_name=region, model_kwargs={"max_new_tokens": 1500, "top_p": 0.8,"temperature":0.6}, endpoint_kwargs={"CustomAttributes":"accept_eula=true", "InferenceComponentName":inference_component_name}, content_handler=content_handler, ) else : llm=SagemakerEndpoint( endpoint_name=endpoint_name, region_name=region, model_kwargs={"max_new_tokens": 1500, "top_p": 0.8,"temperature":0.6}, content_handler=content_handler, ) retriever = AmazonKendraRetriever(index_id=kendra_index_id,region_name=region) prompt_template = """ The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. {context} Instruction: Based on the above documents, provide a detailed answer for, {question} Answer "don't know" if not present in the document. Solution:""" PROMPT = PromptTemplate( template=prompt_template, input_variables=["context", "question"] ) chain_type_kwargs = {"prompt": PROMPT} qa = RetrievalQA.from_chain_type( llm, chain_type="stuff", retriever=retriever, chain_type_kwargs=chain_type_kwargs, return_source_documents=True ) return qa def run_chain(chain, prompt: str, history=[]): result = chain(prompt) # To make it compatible with chat samples return { "answer": result['result'], "source_documents": result['source_documents'] } if __name__ == "__main__": chain = build_chain() result = run_chain(chain, "What's SageMaker?") print(result['answer']) if 'source_documents' in result: print('Sources:') for d in result['source_documents']: print(d.metadata['source'])
[ "langchain.SagemakerEndpoint", "langchain.retrievers.AmazonKendraRetriever", "langchain.prompts.PromptTemplate", "langchain.chains.RetrievalQA.from_chain_type" ]
[((1839, 1906), 'langchain.retrievers.AmazonKendraRetriever', 'AmazonKendraRetriever', ([], {'index_id': 'kendra_index_id', 'region_name': 'region'}), '(index_id=kendra_index_id, region_name=region)\n', (1860, 1906), False, 'from langchain.retrievers import AmazonKendraRetriever\n'), ((2373, 2458), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (2387, 2458), False, 'from langchain.prompts import PromptTemplate\n'), ((2521, 2665), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', (['llm'], {'chain_type': '"""stuff"""', 'retriever': 'retriever', 'chain_type_kwargs': 'chain_type_kwargs', 'return_source_documents': '(True)'}), "(llm, chain_type='stuff', retriever=retriever,\n chain_type_kwargs=chain_type_kwargs, return_source_documents=True)\n", (2548, 2665), False, 'from langchain.chains import RetrievalQA\n'), ((1186, 1485), 'langchain.SagemakerEndpoint', 'SagemakerEndpoint', ([], {'endpoint_name': 'endpoint_name', 'region_name': 'region', 'model_kwargs': "{'max_new_tokens': 1500, 'top_p': 0.8, 'temperature': 0.6}", 'endpoint_kwargs': "{'CustomAttributes': 'accept_eula=true', 'InferenceComponentName':\n inference_component_name}", 'content_handler': 'content_handler'}), "(endpoint_name=endpoint_name, region_name=region,\n model_kwargs={'max_new_tokens': 1500, 'top_p': 0.8, 'temperature': 0.6},\n endpoint_kwargs={'CustomAttributes': 'accept_eula=true',\n 'InferenceComponentName': inference_component_name}, content_handler=\n content_handler)\n", (1203, 1485), False, 'from langchain import SagemakerEndpoint\n'), ((1590, 1770), 'langchain.SagemakerEndpoint', 'SagemakerEndpoint', ([], {'endpoint_name': 'endpoint_name', 'region_name': 'region', 'model_kwargs': "{'max_new_tokens': 1500, 'top_p': 0.8, 'temperature': 0.6}", 'content_handler': 'content_handler'}), "(endpoint_name=endpoint_name, region_name=region,\n model_kwargs={'max_new_tokens': 1500, 'top_p': 0.8, 'temperature': 0.6},\n content_handler=content_handler)\n", (1607, 1770), False, 'from langchain import SagemakerEndpoint\n'), ((758, 816), 'json.dumps', 'json.dumps', (["{'inputs': prompt, 'parameters': model_kwargs}"], {}), "({'inputs': prompt, 'parameters': model_kwargs})\n", (768, 816), False, 'import json\n')]
''' This script takes the True/False style questions from the csv file and save the result as another csv file. This script makes use of Llama model. Before running this script, make sure to configure the filepaths in config.yaml file. ''' from langchain import PromptTemplate, LLMChain from kg_rag.utility import * import sys QUESTION_PATH = config_data["TRUE_FALSE_PATH"] SYSTEM_PROMPT = system_prompts["TRUE_FALSE_QUESTION"] QUESTION_VS_CONTEXT_SIMILARITY_PERCENTILE_THRESHOLD = float(config_data["QUESTION_VS_CONTEXT_SIMILARITY_PERCENTILE_THRESHOLD"]) QUESTION_VS_CONTEXT_MINIMUM_SIMILARITY = float(config_data["QUESTION_VS_CONTEXT_MINIMUM_SIMILARITY"]) VECTOR_DB_PATH = config_data["VECTOR_DB_PATH"] NODE_CONTEXT_PATH = config_data["NODE_CONTEXT_PATH"] SENTENCE_EMBEDDING_MODEL_FOR_NODE_RETRIEVAL = config_data["SENTENCE_EMBEDDING_MODEL_FOR_NODE_RETRIEVAL"] SENTENCE_EMBEDDING_MODEL_FOR_CONTEXT_RETRIEVAL = config_data["SENTENCE_EMBEDDING_MODEL_FOR_CONTEXT_RETRIEVAL"] SAVE_PATH = config_data["SAVE_RESULTS_PATH"] MODEL_NAME = config_data["LLAMA_MODEL_NAME"] BRANCH_NAME = config_data["LLAMA_MODEL_BRANCH"] CACHE_DIR = config_data["LLM_CACHE_DIR"] CONTEXT_VOLUME = 100 save_name = "_".join(MODEL_NAME.split("/")[-1].split("-"))+"_one_hop_true_false_binary_response.csv" INSTRUCTION = "Context:\n\n{context} \n\nQuestion: {question}" vectorstore = load_chroma(VECTOR_DB_PATH, SENTENCE_EMBEDDING_MODEL_FOR_NODE_RETRIEVAL) embedding_function_for_context_retrieval = load_sentence_transformer(SENTENCE_EMBEDDING_MODEL_FOR_CONTEXT_RETRIEVAL) node_context_df = pd.read_csv(NODE_CONTEXT_PATH) def main(): start_time = time.time() llm = llama_model(MODEL_NAME, BRANCH_NAME, CACHE_DIR) template = get_prompt(INSTRUCTION, SYSTEM_PROMPT) prompt = PromptTemplate(template=template, input_variables=["context", "question"]) llm_chain = LLMChain(prompt=prompt, llm=llm) question_df = pd.read_csv(QUESTION_PATH) answer_list = [] for index, row in question_df.iterrows(): question = row["text"] context = retrieve_context(question, vectorstore, embedding_function_for_context_retrieval, node_context_df, CONTEXT_VOLUME, QUESTION_VS_CONTEXT_SIMILARITY_PERCENTILE_THRESHOLD, QUESTION_VS_CONTEXT_MINIMUM_SIMILARITY) output = llm_chain.run(context=context, question=question) answer_list.append((row["text"], row["label"], output)) answer_df = pd.DataFrame(answer_list, columns=["question", "label", "llm_answer"]) answer_df.to_csv(os.path.join(SAVE_PATH, save_name), index=False, header=True) print("Completed in {} min".format((time.time()-start_time)/60)) if __name__ == "__main__": main()
[ "langchain.LLMChain", "langchain.PromptTemplate" ]
[((1786, 1860), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['context', 'question']"}), "(template=template, input_variables=['context', 'question'])\n", (1800, 1860), False, 'from langchain import PromptTemplate, LLMChain\n'), ((1877, 1909), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'llm'}), '(prompt=prompt, llm=llm)\n', (1885, 1909), False, 'from langchain import PromptTemplate, LLMChain\n')]
import os from typing import Any, Callable from langchain.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains import LLMChain import registry from .base import BaseChat, ChatHistory, Response TEMPLATE = ''' You are a web3 assistant. You help users use web3 apps, such as Uniswap, AAVE, MakerDao, etc. You assist users in achieving their goals with these protocols, by providing users with relevant information, and creating transactions for users. Your responses should sound natural, helpful, cheerful, and engaging, and you should use easy to understand language with explanations for jargon. Information to help complete your task is below. Only use information below to answer the question, and create a final answer with references ("SOURCES"). If you don't know the answer, just say that you don't know. Don't try to make up an answer. ALWAYS return a "SOURCES" part in your answer. ----- {task_info} ----- User: {question} Assistant:''' # TODO: make this few-shot on real examples instead of dummy ones REPHRASE_TEMPLATE = ''' Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. You should assume that the question is related to web3. ## Example: Chat History: User: Who created Ethereum? Assistant: Vitalik Buterin Follow Up Input: What about AAVE? Standalone question: Who created AAVE? ## Example: Chat History: User: Who created Ethereum? Assistant: Vitalik Buterin User: What about AAVE? Assistant: Stani Kulechov Follow Up Input: When was that? Standalone question: When were Ethereum and AAVE created? ## Example: Chat History: User: Who created Ethereum? Assistant: Vitalik Buterin Follow Up Input: What is AAVE? Standalone question: What is AAVE? ## Example: Chat History: User: Who created Ethereum? Assistant: Vitalik Buterin User: What is AAVE? Assistant: AAVE is a decentralized finance protocol that allows users to borrow and lend digital assets. It is a protocol built on Ethereum and is powered by a native token, Aave. Follow Up Input: Bitoin? Standalone question: What is Bitcoin? ## Example: Chat History: {history} Follow Up Input: {question} Standalone question:''' @registry.register_class class RephraseCitedChat(BaseChat): def __init__(self, doc_index: Any, top_k: int = 3, show_thinking: bool = True) -> None: super().__init__() self.prompt = PromptTemplate( input_variables=["task_info", "question"], template=TEMPLATE, ) self.llm = OpenAI(temperature=0.0, max_tokens=-1) self.chain = LLMChain(llm=self.llm, prompt=self.prompt) self.chain.verbose = True self.doc_index = doc_index self.top_k = top_k self.show_thinking = show_thinking self.rephrase_prompt = PromptTemplate( input_variables=["history", "question"], template=REPHRASE_TEMPLATE, ) self.rephrase_chain = LLMChain(llm=self.llm, prompt=self.rephrase_prompt) self.rephrase_chain.verbose = True def receive_input(self, history: ChatHistory, userinput: str, send: Callable) -> None: userinput = userinput.strip() if history: # First rephrase the question history_string = history.to_string() question = self.rephrase_chain.run({ "history": history_string.strip(), "question": userinput, "stop": "##", }).strip() rephrased = True else: question = userinput rephrased = False if self.show_thinking and rephrased and userinput != question: send(Response(response="I think you're asking: " + question, still_thinking=True)) docs = self.doc_index.similarity_search(question, k=self.top_k) task_info = '\n'.join([f'Content: {doc.page_content}\nSource: {doc.metadata["url"]}' for doc in docs]) result = self.chain.run({ "task_info": task_info, "question": question, "stop": "User", }) result = result.strip() history.add_interaction(userinput, result) send(Response(result))
[ "langchain.chains.LLMChain", "langchain.llms.OpenAI", "langchain.prompts.PromptTemplate" ]
[((2418, 2494), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['task_info', 'question']", 'template': 'TEMPLATE'}), "(input_variables=['task_info', 'question'], template=TEMPLATE)\n", (2432, 2494), False, 'from langchain.prompts import PromptTemplate\n'), ((2549, 2587), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.0)', 'max_tokens': '(-1)'}), '(temperature=0.0, max_tokens=-1)\n', (2555, 2587), False, 'from langchain.llms import OpenAI\n'), ((2609, 2651), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.prompt'}), '(llm=self.llm, prompt=self.prompt)\n', (2617, 2651), False, 'from langchain.chains import LLMChain\n'), ((2823, 2911), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['history', 'question']", 'template': 'REPHRASE_TEMPLATE'}), "(input_variables=['history', 'question'], template=\n REPHRASE_TEMPLATE)\n", (2837, 2911), False, 'from langchain.prompts import PromptTemplate\n'), ((2972, 3023), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.rephrase_prompt'}), '(llm=self.llm, prompt=self.rephrase_prompt)\n', (2980, 3023), False, 'from langchain.chains import LLMChain\n')]
from typing import List from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import Chroma import langchain.docstore.document as docstore from loguru import logger from settings import COLLECTION_NAME, PERSIST_DIRECTORY from .vortex_pdf_parser import VortexPdfParser from .vortext_content_iterator import VortexContentIterator class VortexIngester: def __init__(self, content_folder: str): self.content_folder = content_folder def ingest(self) -> None: vortex_content_iterator = VortexContentIterator(self.content_folder) vortex_pdf_parser = VortexPdfParser() chunks: List[docstore.Document] = [] for document in vortex_content_iterator: vortex_pdf_parser.set_pdf_file_path(document) document_chunks = vortex_pdf_parser.clean_text_to_docs() chunks.extend(document_chunks) logger.info(f"Extracted {len(chunks)} chunks from {document}") embeddings = OpenAIEmbeddings(client=None) logger.info("Loaded embeddings") vector_store = Chroma.from_documents( chunks, embeddings, collection_name=COLLECTION_NAME, persist_directory=PERSIST_DIRECTORY, ) logger.info("Created Chroma vector store") vector_store.persist() logger.info("Persisted Chroma vector store")
[ "langchain.embeddings.OpenAIEmbeddings", "langchain.vectorstores.Chroma.from_documents" ]
[((985, 1014), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'client': 'None'}), '(client=None)\n', (1001, 1014), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1023, 1055), 'loguru.logger.info', 'logger.info', (['"""Loaded embeddings"""'], {}), "('Loaded embeddings')\n", (1034, 1055), False, 'from loguru import logger\n'), ((1079, 1194), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['chunks', 'embeddings'], {'collection_name': 'COLLECTION_NAME', 'persist_directory': 'PERSIST_DIRECTORY'}), '(chunks, embeddings, collection_name=COLLECTION_NAME,\n persist_directory=PERSIST_DIRECTORY)\n', (1100, 1194), False, 'from langchain.vectorstores import Chroma\n'), ((1259, 1301), 'loguru.logger.info', 'logger.info', (['"""Created Chroma vector store"""'], {}), "('Created Chroma vector store')\n", (1270, 1301), False, 'from loguru import logger\n'), ((1341, 1385), 'loguru.logger.info', 'logger.info', (['"""Persisted Chroma vector store"""'], {}), "('Persisted Chroma vector store')\n", (1352, 1385), False, 'from loguru import logger\n')]
# -*- coding: utf-8 -*- import os import re import sys sys.path.append('.') sys.path.append('..') from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser from langchain.prompts import StringPromptTemplate from langchain import OpenAI, GoogleSearchAPIWrapper, LLMChain from typing import List, Union, Callable from langchain.schema import AgentAction, AgentFinish from langchain.vectorstores import FAISS from langchain.embeddings import OpenAIEmbeddings from langchain.schema import Document from utils.configs import configs os.environ["GOOGLE_CSE_ID"] = configs['tools']['google_cse_id'] os.environ["GOOGLE_API_KEY"] = configs['tools']['google_api_key'] os.environ["OPENAI_API_KEY"] = configs['openai_api_key'] # Set up the base template template = """Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools: {tools} Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question Begin! Remember to speak as a pirate when giving your final answer. Use lots of "Arg"s Question: {input} {agent_scratchpad}""" def fake_func(inp: str) -> str: return "foo" def get_tools(query): docs = retriever.get_relevant_documents(query) return [ALL_TOOLS[d.metadata["index"]] for d in docs] # Set up a prompt template class CustomPromptTemplate(StringPromptTemplate): # The template to use template: str ############## NEW ###################### # The list of tools available tools_getter: Callable def format(self, **kwargs) -> str: # Get the intermediate steps (AgentAction, Observation tuples) # Format them in a particular way intermediate_steps = kwargs.pop("intermediate_steps") thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\nObservation: {observation}\nThought: " # Set the agent_scratchpad variable to that value kwargs["agent_scratchpad"] = thoughts ############## NEW ###################### tools = self.tools_getter(kwargs["input"]) # Create a tools variable from the list of tools provided kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) # Create a list of tool names for the tools provided kwargs["tool_names"] = ", ".join([tool.name for tool in tools]) print(self.template.format(**kwargs)) return self.template.format(**kwargs) class CustomOutputParser(AgentOutputParser): def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]: # Check if agent should finish if "Final Answer:" in llm_output: return AgentFinish( # Return values is generally always a dictionary with a single `output` key # It is not recommended to try anything else at the moment :) return_values={"output": llm_output.split("Final Answer:")[-1].strip()}, log=llm_output, ) # Parse out the action and action input regex = r"Action: (.*?)[\n]*Action Input:[\s]*(.*)" match = re.search(regex, llm_output, re.DOTALL) if not match: raise ValueError(f"Could not parse LLM output: `{llm_output}`") action = match.group(1).strip() action_input = match.group(2) # Return the action and action input return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output) if __name__ == '__main__': # Define which tools the agent can use to answer user queries search = GoogleSearchAPIWrapper() search_tool = Tool( name="Search", func=search.run, description="useful for when you need to answer questions about current events" ) fake_tools = [ Tool( name=f"foo-{i}", func=fake_func, description=f"a silly function that you can use to get more information about the number {i}" ) for i in range(99) ] ALL_TOOLS = [search_tool] + fake_tools # tools retrieval tool_lib = configs['demo_agents']['tool_faiss_index'] if os.path.exists(tool_lib): vector_store = FAISS.load_local(tool_lib, OpenAIEmbeddings()) else: docs = [Document(page_content=t.description, metadata={"index": i}) for i, t in enumerate(ALL_TOOLS)] vector_store = FAISS.from_documents(docs, OpenAIEmbeddings()) vector_store.save_local(tool_lib) retriever = vector_store.as_retriever() prompt = CustomPromptTemplate( template=template, tools_getter=get_tools, # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically # This includes the `intermediate_steps` variable because that is needed input_variables=["input", "intermediate_steps"] ) output_parser = CustomOutputParser() model_name = configs['model_name'] llm = OpenAI(model_name=model_name, temperature=0) # LLM chain consisting of the LLM and a prompt llm_chain = LLMChain(llm=llm, prompt=prompt) query = "What's the weather in SF?" tools = get_tools(query) tool_names = [tool.name for tool in tools] agent = LLMSingleActionAgent( llm_chain=llm_chain, output_parser=output_parser, stop=["\nObservation:"], allowed_tools=tool_names ) agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True) agent_executor.run(query)
[ "langchain.agents.AgentExecutor.from_agent_and_tools", "langchain.agents.LLMSingleActionAgent", "langchain.LLMChain", "langchain.GoogleSearchAPIWrapper", "langchain.schema.Document", "langchain.agents.Tool", "langchain.embeddings.OpenAIEmbeddings", "langchain.OpenAI" ]
[((55, 75), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (70, 75), False, 'import sys\n'), ((76, 97), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (91, 97), False, 'import sys\n'), ((4014, 4038), 'langchain.GoogleSearchAPIWrapper', 'GoogleSearchAPIWrapper', ([], {}), '()\n', (4036, 4038), False, 'from langchain import OpenAI, GoogleSearchAPIWrapper, LLMChain\n'), ((4058, 4180), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'search.run', 'description': '"""useful for when you need to answer questions about current events"""'}), "(name='Search', func=search.run, description=\n 'useful for when you need to answer questions about current events')\n", (4062, 4180), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((4577, 4601), 'os.path.exists', 'os.path.exists', (['tool_lib'], {}), '(tool_lib)\n', (4591, 4601), False, 'import os\n'), ((5400, 5444), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': 'model_name', 'temperature': '(0)'}), '(model_name=model_name, temperature=0)\n', (5406, 5444), False, 'from langchain import OpenAI, GoogleSearchAPIWrapper, LLMChain\n'), ((5512, 5544), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (5520, 5544), False, 'from langchain import OpenAI, GoogleSearchAPIWrapper, LLMChain\n'), ((5674, 5800), 'langchain.agents.LLMSingleActionAgent', 'LLMSingleActionAgent', ([], {'llm_chain': 'llm_chain', 'output_parser': 'output_parser', 'stop': "['\\nObservation:']", 'allowed_tools': 'tool_names'}), "(llm_chain=llm_chain, output_parser=output_parser, stop\n =['\\nObservation:'], allowed_tools=tool_names)\n", (5694, 5800), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((5856, 5930), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent', 'tools': 'tools', 'verbose': '(True)'}), '(agent=agent, tools=tools, verbose=True)\n', (5890, 5930), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((3542, 3581), 're.search', 're.search', (['regex', 'llm_output', 're.DOTALL'], {}), '(regex, llm_output, re.DOTALL)\n', (3551, 3581), False, 'import re\n'), ((4233, 4375), 'langchain.agents.Tool', 'Tool', ([], {'name': 'f"""foo-{i}"""', 'func': 'fake_func', 'description': 'f"""a silly function that you can use to get more information about the number {i}"""'}), "(name=f'foo-{i}', func=fake_func, description=\n f'a silly function that you can use to get more information about the number {i}'\n )\n", (4237, 4375), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((4653, 4671), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (4669, 4671), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((4699, 4758), 'langchain.schema.Document', 'Document', ([], {'page_content': 't.description', 'metadata': "{'index': i}"}), "(page_content=t.description, metadata={'index': i})\n", (4707, 4758), False, 'from langchain.schema import Document\n'), ((4843, 4861), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (4859, 4861), False, 'from langchain.embeddings import OpenAIEmbeddings\n')]
import base64 from email.message import EmailMessage from typing import List, Optional, Type from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.pydantic_v1 import BaseModel, Field from langchain.tools.gmail.base import GmailBaseTool class CreateDraftSchema(BaseModel): """Input for CreateDraftTool.""" message: str = Field( ..., description="The message to include in the draft.", ) to: List[str] = Field( ..., description="The list of recipients.", ) subject: str = Field( ..., description="The subject of the message.", ) cc: Optional[List[str]] = Field( None, description="The list of CC recipients.", ) bcc: Optional[List[str]] = Field( None, description="The list of BCC recipients.", ) class GmailCreateDraft(GmailBaseTool): """Tool that creates a draft email for Gmail.""" name: str = "create_gmail_draft" description: str = ( "Use this tool to create a draft email with the provided message fields." ) args_schema: Type[CreateDraftSchema] = CreateDraftSchema def _prepare_draft_message( self, message: str, to: List[str], subject: str, cc: Optional[List[str]] = None, bcc: Optional[List[str]] = None, ) -> dict: draft_message = EmailMessage() draft_message.set_content(message) draft_message["To"] = ", ".join(to) draft_message["Subject"] = subject if cc is not None: draft_message["Cc"] = ", ".join(cc) if bcc is not None: draft_message["Bcc"] = ", ".join(bcc) encoded_message = base64.urlsafe_b64encode(draft_message.as_bytes()).decode() return {"message": {"raw": encoded_message}} def _run( self, message: str, to: List[str], subject: str, cc: Optional[List[str]] = None, bcc: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: create_message = self._prepare_draft_message(message, to, subject, cc, bcc) draft = ( self.api_resource.users() .drafts() .create(userId="me", body=create_message) .execute() ) output = f'Draft created. Draft Id: {draft["id"]}' return output except Exception as e: raise Exception(f"An error occurred: {e}")
[ "langchain.pydantic_v1.Field" ]
[((359, 421), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The message to include in the draft."""'}), "(..., description='The message to include in the draft.')\n", (364, 421), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((465, 514), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The list of recipients."""'}), "(..., description='The list of recipients.')\n", (470, 514), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((557, 610), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The subject of the message."""'}), "(..., description='The subject of the message.')\n", (562, 610), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((664, 717), 'langchain.pydantic_v1.Field', 'Field', (['None'], {'description': '"""The list of CC recipients."""'}), "(None, description='The list of CC recipients.')\n", (669, 717), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((772, 826), 'langchain.pydantic_v1.Field', 'Field', (['None'], {'description': '"""The list of BCC recipients."""'}), "(None, description='The list of BCC recipients.')\n", (777, 826), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1390, 1404), 'email.message.EmailMessage', 'EmailMessage', ([], {}), '()\n', (1402, 1404), False, 'from email.message import EmailMessage\n')]
from langchain import PromptTemplate from langchain.chains.summarize import load_summarize_chain from langchain.chains.question_answering import load_qa_chain from langchain.llms import OpenAI from langchain.docstore.document import Document base_prompt = """A profound and powerful writer, you have been given a context text and a search query, {0}. You must write an in-depth analysis, highlighting the significance of {0} in larger context's meaning as well as INCLUDE AS MANY SPECIFIC QUOTATIONS AS POSSIBLE (marked with quotes) from the context and note what page you found them from. Try to prioritize quotations in responses that should be about 1000 characters total. """ def summarize_context(search_term: str, contexts: list[str], openai_api_key: str): try: if openai_api_key: llm = OpenAI(temperature=0, openai_api_key=openai_api_key) else: llm = OpenAI(temperature=0) docs = [Document(page_content=context) for context in contexts] # have to do a little weird acrobatics here because summarize cannot take more than one input # so have to construct the prompt template string after we interpolate the characters final_prompt = base_prompt.format(search_term) + "\n{text}\n\nSUMMARY:" final_prompt_template = PromptTemplate(template = final_prompt, input_variables=["text"]) llm_summarize = load_summarize_chain(llm, chain_type="map_reduce", return_intermediate_steps=True, map_prompt=final_prompt_template, combine_prompt=final_prompt_template) global_summary = llm_summarize({"input_documents": docs}, return_only_outputs=True) if (len(global_summary["output_text"]) > 400): return global_summary["output_text"] else: # To augment the summary with more details that don't get lost, we extract some info from the summaries doc_summaries = [Document(page_content=summary) for summary in global_summary["intermediate_steps"]] qa_chain = load_qa_chain(llm, chain_type="stuff") query = "What is the significance of {0} in the context and quotes (include quotations) to back up your reasoning".format(search_term) additional_context = qa_chain({"input_documents": doc_summaries, "question": query}, return_only_outputs=True) return global_summary["output_text"] + additional_context["output_text"] except Exception as e: print("Error generating summary: ", e) raise e
[ "langchain.chains.question_answering.load_qa_chain", "langchain.chains.summarize.load_summarize_chain", "langchain.docstore.document.Document", "langchain.llms.OpenAI", "langchain.PromptTemplate" ]
[((1309, 1372), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'final_prompt', 'input_variables': "['text']"}), "(template=final_prompt, input_variables=['text'])\n", (1323, 1372), False, 'from langchain import PromptTemplate\n'), ((1399, 1561), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['llm'], {'chain_type': '"""map_reduce"""', 'return_intermediate_steps': '(True)', 'map_prompt': 'final_prompt_template', 'combine_prompt': 'final_prompt_template'}), "(llm, chain_type='map_reduce',\n return_intermediate_steps=True, map_prompt=final_prompt_template,\n combine_prompt=final_prompt_template)\n", (1419, 1561), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((821, 873), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'openai_api_key': 'openai_api_key'}), '(temperature=0, openai_api_key=openai_api_key)\n', (827, 873), False, 'from langchain.llms import OpenAI\n'), ((907, 928), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (913, 928), False, 'from langchain.llms import OpenAI\n'), ((945, 975), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'context'}), '(page_content=context)\n', (953, 975), False, 'from langchain.docstore.document import Document\n'), ((2016, 2054), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (2029, 2054), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((1909, 1939), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'summary'}), '(page_content=summary)\n', (1917, 1939), False, 'from langchain.docstore.document import Document\n')]
import streamlit as st from langchain.llms import OpenAI from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import Chroma from langchain.chains import RetrievalQA def generate_response(uploaded_file, openai_api_key, query_text): # Load document if file is uploaded if uploaded_file is not None: documents = [uploaded_file.read().decode()] # Split documents into chunks text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.create_documents(documents) # Select embeddings embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key) # Create a vectorstore from documents db = Chroma.from_documents(texts, embeddings) # Create retriever interface retriever = db.as_retriever() # Create QA chain qa = RetrievalQA.from_chain_type(llm=OpenAI(openai_api_key=openai_api_key), chain_type='stuff', retriever=retriever) return qa.run(query_text) # Page title st.set_page_config(page_title='🦜🔗 Ask the Doc App') st.title('🦜🔗 Ask the Doc App') # File upload uploaded_file = st.file_uploader('Upload an article', type='txt') # Query text query_text = st.text_input('Enter your question:', placeholder = 'Please provide a short summary.', disabled=not uploaded_file) # Form input and query result = [] with st.form('myform', clear_on_submit=True): openai_api_key = st.text_input('OpenAI API Key', type='password', disabled=not (uploaded_file and query_text)) submitted = st.form_submit_button('Submit', disabled=not(uploaded_file and query_text)) if submitted and openai_api_key.startswith('sk-'): with st.spinner('Calculating...'): response = generate_response(uploaded_file, openai_api_key, query_text) result.append(response) del openai_api_key if len(result): st.info(response)
[ "langchain.text_splitter.CharacterTextSplitter", "langchain.embeddings.OpenAIEmbeddings", "langchain.llms.OpenAI", "langchain.vectorstores.Chroma.from_documents" ]
[((1040, 1091), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""🦜🔗 Ask the Doc App"""'}), "(page_title='🦜🔗 Ask the Doc App')\n", (1058, 1091), True, 'import streamlit as st\n'), ((1092, 1122), 'streamlit.title', 'st.title', (['"""🦜🔗 Ask the Doc App"""'], {}), "('🦜🔗 Ask the Doc App')\n", (1100, 1122), True, 'import streamlit as st\n'), ((1154, 1203), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload an article"""'], {'type': '"""txt"""'}), "('Upload an article', type='txt')\n", (1170, 1203), True, 'import streamlit as st\n'), ((1230, 1347), 'streamlit.text_input', 'st.text_input', (['"""Enter your question:"""'], {'placeholder': '"""Please provide a short summary."""', 'disabled': '(not uploaded_file)'}), "('Enter your question:', placeholder=\n 'Please provide a short summary.', disabled=not uploaded_file)\n", (1243, 1347), True, 'import streamlit as st\n'), ((495, 550), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (516, 550), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((646, 693), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'openai_api_key'}), '(openai_api_key=openai_api_key)\n', (662, 693), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((745, 785), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {}), '(texts, embeddings)\n', (766, 785), False, 'from langchain.vectorstores import Chroma\n'), ((1386, 1425), 'streamlit.form', 'st.form', (['"""myform"""'], {'clear_on_submit': '(True)'}), "('myform', clear_on_submit=True)\n", (1393, 1425), True, 'import streamlit as st\n'), ((1448, 1546), 'streamlit.text_input', 'st.text_input', (['"""OpenAI API Key"""'], {'type': '"""password"""', 'disabled': '(not (uploaded_file and query_text))'}), "('OpenAI API Key', type='password', disabled=not (\n uploaded_file and query_text))\n", (1461, 1546), True, 'import streamlit as st\n'), ((1558, 1634), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit"""'], {'disabled': '(not (uploaded_file and query_text))'}), "('Submit', disabled=not (uploaded_file and query_text))\n", (1579, 1634), True, 'import streamlit as st\n'), ((1904, 1921), 'streamlit.info', 'st.info', (['response'], {}), '(response)\n', (1911, 1921), True, 'import streamlit as st\n'), ((916, 953), 'langchain.llms.OpenAI', 'OpenAI', ([], {'openai_api_key': 'openai_api_key'}), '(openai_api_key=openai_api_key)\n', (922, 953), False, 'from langchain.llms import OpenAI\n'), ((1702, 1730), 'streamlit.spinner', 'st.spinner', (['"""Calculating..."""'], {}), "('Calculating...')\n", (1712, 1730), True, 'import streamlit as st\n')]
import os import os.path as osp from typing import List from tqdm import tqdm from langchain.docstore.document import Document from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import NLTKTextSplitter from langchain.vectorstores.faiss import FAISS import pandas as pd import nltk nltk.download('punkt') PROCESSED_CSV_DIRECTORY = "processed" # Directory to save processed CSV file def create_docs() -> List[Document]: docs = [] df = pd.read_csv(osp.join(PROCESSED_CSV_DIRECTORY, 'scraped.csv')) for index, row in df.iterrows(): doc = Document(page_content=row['text'], metadata={"source": row['url']}) docs.append(doc) return docs docs = create_docs() doc_chunks = [] seen_chunks = set() total_websites = set() total_words = 0 splitter = NLTKTextSplitter(chunk_size=1024) for source in tqdm(docs): for chunk in splitter.split_text(source.page_content): if chunk not in seen_chunks: doc_chunks.append( Document(page_content=chunk, metadata=source.metadata)) total_words += len(chunk.split()) total_websites.add(source.metadata['source']) seen_chunks.add(chunk) print(f'Total websites: {len(total_websites)}') print(f'Total chunks: {len(doc_chunks)}') print(f'Total words: {total_words}') print(f'Avg words per chunk: {int(total_words / len(doc_chunks))}') print(f'Estimated embedding cost: ${total_words / 0.75 / 1000 * 0.0004:.2f}') search_index = FAISS.from_documents(doc_chunks, OpenAIEmbeddings(model='text-embedding-ada-002')) # persistent search index search_index.save_local("search_index")
[ "langchain.text_splitter.NLTKTextSplitter", "langchain.docstore.document.Document", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((316, 338), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (329, 338), False, 'import nltk\n'), ((810, 843), 'langchain.text_splitter.NLTKTextSplitter', 'NLTKTextSplitter', ([], {'chunk_size': '(1024)'}), '(chunk_size=1024)\n', (826, 843), False, 'from langchain.text_splitter import NLTKTextSplitter\n'), ((858, 868), 'tqdm.tqdm', 'tqdm', (['docs'], {}), '(docs)\n', (862, 868), False, 'from tqdm import tqdm\n'), ((1527, 1575), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""'}), "(model='text-embedding-ada-002')\n", (1543, 1575), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((490, 538), 'os.path.join', 'osp.join', (['PROCESSED_CSV_DIRECTORY', '"""scraped.csv"""'], {}), "(PROCESSED_CSV_DIRECTORY, 'scraped.csv')\n", (498, 538), True, 'import os.path as osp\n'), ((591, 658), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': "row['text']", 'metadata': "{'source': row['url']}"}), "(page_content=row['text'], metadata={'source': row['url']})\n", (599, 658), False, 'from langchain.docstore.document import Document\n'), ((1013, 1067), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'source.metadata'}), '(page_content=chunk, metadata=source.metadata)\n', (1021, 1067), False, 'from langchain.docstore.document import Document\n')]
""" 相关资料: llama-cpp-python文档:https://llama-cpp-python.readthedocs.io/en/latest/ 前提: 1.安装C++环境 https://developer.microsoft.com/en-us/windows/downloads/windows-sdk/ 勾选“使用C++桌面开发” 2.安装模块 pip install llama-cpp-python pip install llama-cpp-python[server] 3.运行服务 python3 -m llama_cpp.server --model “模型路径” # http://localhost:8000/v1 """ import time import os import gradio as gr from langchain.document_loaders import DirectoryLoader from langchain.llms import ChatGLM from langchain.llms.llamacpp import LlamaCpp from langchain.prompts import PromptTemplate from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings.huggingface import HuggingFaceEmbeddings from langchain.vectorstores import Chroma from langchain.chains import RetrievalQA # 加载embedding embedding_model_dict = { "ernie-tiny": "nghuyong/ernie-3.0-nano-zh", "ernie-base": "nghuyong/ernie-3.0-base-zh", "text2vec": "GanymedeNil/text2vec-large-chinese", "text2vec2": "uer/sbert-base-chinese-nli", "text2vec3": "shibing624/text2vec-base-chinese", } def load_documents(directory="documents"): """ 加载books下的文件,进行拆分 :param directory: :return: """ loader = DirectoryLoader(directory) documents = loader.load() text_spliter = CharacterTextSplitter(chunk_size=256, chunk_overlap=0) split_docs = text_spliter.split_documents(documents) return split_docs def load_embedding_model(model_name="ernie-tiny"): """ 加载embedding模型 :param model_name: :return: """ encode_kwargs = {"normalize_embeddings": False} model_kwargs = {"device": "cuda:0"} return HuggingFaceEmbeddings( model_name=embedding_model_dict[model_name], model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) def store_chroma(docs, embeddings, persist_directory="VectorStore"): """ 讲文档向量化,存入向量数据库 :param docs: :param embeddings: :param persist_directory: :return: """ db = Chroma.from_documents(docs, embeddings, persist_directory=persist_directory) db.persist() return db # 加载embedding模型 embeddings = load_embedding_model('text2vec3') # 加载数据库 if not os.path.exists('VectorStore'): documents = load_documents() db = store_chroma(documents, embeddings) else: db = Chroma(persist_directory='VectorStore', embedding_function=embeddings) # 创建llm # llm = ChatGLM( # endpoint_url='http://127.0.0.1:8000', # max_token=80000, # top_p=0.9 # ) llm = LlamaCpp( model_path=r"G:\models\llama2\llama-2-7b-chat-q4\llama-2-7b-chat.Q4_0.gguf", n_ctx=2048, stop=['Human:'] ) # 创建qa QA_CHAIN_PROMPT = PromptTemplate.from_template("""Human: 根据下面的上下文(context)内容回答问题。 如果你不知道答案,就回答不知道,不要试图编造答案。 答案最多3句话,保持答案简介。 总是在答案结束时说”谢谢你的提问!“ {context} 问题:{question} Assistant: """) retriever = db.as_retriever() qa = RetrievalQA.from_chain_type( llm=llm, retriever=retriever, verbose=True, chain_type_kwargs={"prompt": QA_CHAIN_PROMPT} ) def add_text(history, text): history = history + [(text, None)] return history, gr.update(value="", interactive=False) def add_file(history, file): """ 上传文件后的回调函数,将上传的文件向量化存入数据库 :param history: :param file: :return: """ global qa directory = os.path.dirname(file.name) documents = load_documents(directory) db = store_chroma(documents, embeddings) retriever = db.as_retriever() qa.retriever = retriever history = history + [((file.name,), None)] return history def bot(history): """ 聊天调用的函数 :param history: :return: """ message = history[-1][0] if isinstance(message, tuple): response = "文件上传成功!!" else: response = qa({"query": message})['result'] history[-1][1] = "" for character in response: history[-1][1] += character time.sleep(0.05) yield history with gr.Blocks() as demo: chatbot = gr.Chatbot( [], elem_id="chatbot", bubble_full_width=False, avatar_images=(None, (os.path.join(os.path.dirname(__file__), "avatar.png"))), ) with gr.Row(): txt = gr.Textbox( scale=4, show_label=False, placeholder="Enter text and press enter, or upload an image", container=False, ) btn = gr.UploadButton("📁", file_types=['txt']) txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then( bot, chatbot, chatbot ) txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False) file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then( bot, chatbot, chatbot ) demo.queue() if __name__ == "__main__": demo.launch()
[ "langchain.text_splitter.CharacterTextSplitter", "langchain.document_loaders.DirectoryLoader", "langchain.embeddings.huggingface.HuggingFaceEmbeddings", "langchain.chains.RetrievalQA.from_chain_type", "langchain.llms.llamacpp.LlamaCpp", "langchain.vectorstores.Chroma.from_documents", "langchain.prompts.PromptTemplate.from_template", "langchain.vectorstores.Chroma" ]
[((2537, 2663), 'langchain.llms.llamacpp.LlamaCpp', 'LlamaCpp', ([], {'model_path': '"""G:\\\\models\\\\llama2\\\\llama-2-7b-chat-q4\\\\llama-2-7b-chat.Q4_0.gguf"""', 'n_ctx': '(2048)', 'stop': "['Human:']"}), "(model_path=\n 'G:\\\\models\\\\llama2\\\\llama-2-7b-chat-q4\\\\llama-2-7b-chat.Q4_0.gguf',\n n_ctx=2048, stop=['Human:'])\n", (2545, 2663), False, 'from langchain.llms.llamacpp import LlamaCpp\n'), ((2691, 2865), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""Human:\n根据下面的上下文(context)内容回答问题。\n如果你不知道答案,就回答不知道,不要试图编造答案。\n答案最多3句话,保持答案简介。\n总是在答案结束时说”谢谢你的提问!“\n{context}\n问题:{question}\nAssistant:\n"""'], {}), '(\n """Human:\n根据下面的上下文(context)内容回答问题。\n如果你不知道答案,就回答不知道,不要试图编造答案。\n答案最多3句话,保持答案简介。\n总是在答案结束时说”谢谢你的提问!“\n{context}\n问题:{question}\nAssistant:\n"""\n )\n', (2719, 2865), False, 'from langchain.prompts import PromptTemplate\n'), ((2891, 3013), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'retriever': 'retriever', 'verbose': '(True)', 'chain_type_kwargs': "{'prompt': QA_CHAIN_PROMPT}"}), "(llm=llm, retriever=retriever, verbose=True,\n chain_type_kwargs={'prompt': QA_CHAIN_PROMPT})\n", (2918, 3013), False, 'from langchain.chains import RetrievalQA\n'), ((1249, 1275), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['directory'], {}), '(directory)\n', (1264, 1275), False, 'from langchain.document_loaders import DirectoryLoader\n'), ((1325, 1379), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(256)', 'chunk_overlap': '(0)'}), '(chunk_size=256, chunk_overlap=0)\n', (1346, 1379), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1685, 1811), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'embedding_model_dict[model_name]', 'model_kwargs': 'model_kwargs', 'encode_kwargs': 'encode_kwargs'}), '(model_name=embedding_model_dict[model_name],\n model_kwargs=model_kwargs, encode_kwargs=encode_kwargs)\n', (1706, 1811), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((2036, 2112), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['docs', 'embeddings'], {'persist_directory': 'persist_directory'}), '(docs, embeddings, persist_directory=persist_directory)\n', (2057, 2112), False, 'from langchain.vectorstores import Chroma\n'), ((2224, 2253), 'os.path.exists', 'os.path.exists', (['"""VectorStore"""'], {}), "('VectorStore')\n", (2238, 2253), False, 'import os\n'), ((2348, 2418), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""VectorStore"""', 'embedding_function': 'embeddings'}), "(persist_directory='VectorStore', embedding_function=embeddings)\n", (2354, 2418), False, 'from langchain.vectorstores import Chroma\n'), ((3314, 3340), 'os.path.dirname', 'os.path.dirname', (['file.name'], {}), '(file.name)\n', (3329, 3340), False, 'import os\n'), ((3939, 3950), 'gradio.Blocks', 'gr.Blocks', ([], {}), '()\n', (3948, 3950), True, 'import gradio as gr\n'), ((3118, 3156), 'gradio.update', 'gr.update', ([], {'value': '""""""', 'interactive': '(False)'}), "(value='', interactive=False)\n", (3127, 3156), True, 'import gradio as gr\n'), ((3893, 3909), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (3903, 3909), False, 'import time\n'), ((4161, 4169), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (4167, 4169), True, 'import gradio as gr\n'), ((4185, 4306), 'gradio.Textbox', 'gr.Textbox', ([], {'scale': '(4)', 'show_label': '(False)', 'placeholder': '"""Enter text and press enter, or upload an image"""', 'container': '(False)'}), "(scale=4, show_label=False, placeholder=\n 'Enter text and press enter, or upload an image', container=False)\n", (4195, 4306), True, 'import gradio as gr\n'), ((4375, 4415), 'gradio.UploadButton', 'gr.UploadButton', (['"""📁"""'], {'file_types': "['txt']"}), "('📁', file_types=['txt'])\n", (4390, 4415), True, 'import gradio as gr\n'), ((4564, 4591), 'gradio.update', 'gr.update', ([], {'interactive': '(True)'}), '(interactive=True)\n', (4573, 4591), True, 'import gradio as gr\n'), ((4101, 4126), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4116, 4126), False, 'import os\n')]
from langchain.tools import tool from graph_chain import get_results @tool("graph-tool") def graph_tool(query:str) -> str: """Tool for returning aggregations of Manager or Company or Industry data or if answer is dependent on relationships between a Company and other objects. Use this tool second and to verify results of vector-graph-tool. """ return get_results(query)
[ "langchain.tools.tool" ]
[((71, 89), 'langchain.tools.tool', 'tool', (['"""graph-tool"""'], {}), "('graph-tool')\n", (75, 89), False, 'from langchain.tools import tool\n'), ((366, 384), 'graph_chain.get_results', 'get_results', (['query'], {}), '(query)\n', (377, 384), False, 'from graph_chain import get_results\n')]
import matplotlib.pyplot as plt import numpy as np import openai import os import pyaudio import pyttsx3 import threading import tkinter as tk import queue import wave import whisper from langchain import OpenAI, SQLDatabase from langchain.agents.agent_toolkits import SQLDatabaseToolkit from langchain.agents import create_sql_agent from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from tkinter import scrolledtext AUDIO_FORMAT = pyaudio.paInt16 CHANNELS = 1 FRAME_RATE = 16000 CHUNK = 1024 # Get OpenAI API key openai.api_key = os.environ["OPENAI_API_KEY"] s2_password = "<password>" s2_host = "<host>" s2_db = "timeseries_db" db = SQLDatabase.from_uri(f"mysql+pymysql://admin:{s2_password}@{s2_host}:3306/{s2_db}") llm = OpenAI( model_name = "gpt-3.5-turbo-instruct", temperature = 0, verbose = False ) toolkit = SQLDatabaseToolkit(db = db, llm = llm) agent_executor = create_sql_agent( llm = OpenAI( model_name = "gpt-3.5-turbo-instruct", temperature = 0 ), toolkit = toolkit, verbose = False ) model = whisper.load_model("base.en") # GUI class class AudioRecorderGUI: def __init__(self, root): self.root = root self.root.title("Audio Recorder") self.start_button = tk.Button(root, text = "Start Recording", command = self.start_recording) self.start_button.pack(pady = 10) self.stop_button = tk.Button(root, text = "Stop Recording", command = self.stop_recording, state = tk.DISABLED) self.stop_button.pack(pady = 5) self.exit_button = tk.Button(root, text = "Exit", command = self.exit_program) self.exit_button.pack(pady = 5) self.transcription_box = scrolledtext.ScrolledText(root, height = 5, width = 60) self.transcription_box.pack(padx = 10, pady = 10) self.recording_timer = None self.audio_filename = "audio_recording.wav" self.fig, self.ax = plt.subplots() self.canvas = FigureCanvasTkAgg(self.fig, master = self.root) self.canvas_widget = self.canvas.get_tk_widget() self.canvas_widget.pack(side = tk.TOP, fill = tk.BOTH, expand = 1) self.audio_array = np.array([]) self.update_waveform = False def update_waveform_plot(self): while self.update_waveform and not self.stop_event.is_set(): data = self.audio_queue.queue[-1] if not self.audio_queue.empty() else np.zeros(1024) self.audio_array = np.frombuffer(data, dtype = np.int16) self.ax.clear() self.ax.plot(self.audio_array, color = "r") self.ax.set_title("Real-Time Audio Waveform") self.ax.set_xlabel("Time (samples)") self.ax.set_ylabel("Amplitude") self.ax.set_ylim(-128, 128) self.ax.set_xlim(0, len(self.audio_array)) self.canvas.draw() self.root.update() def speak_audio(self, text): engine = pyttsx3.init() engine.setProperty("voice", "english_us") engine.setProperty("rate", 150) engine.say(text) engine.runAndWait() engine.stop() def start_recording(self): self.transcription_box.delete(1.0, tk.END) self.stop_event = threading.Event() self.audio_queue = queue.Queue() self.record_thread = threading.Thread(target = self.record_audio) self.record_thread.start() self.recording_timer = self.root.after(20000, self.stop_recording) self.update_waveform = True self.update_waveform_plot_thread = threading.Thread(target = self.update_waveform_plot) self.update_waveform_plot_thread.start() self.start_button.config(state = tk.DISABLED) self.stop_button.config(state = tk.NORMAL) def stop_recording(self): if self.recording_timer: self.root.after_cancel(self.recording_timer) self.recording_timer = None self.stop_event.set() self.record_thread.join() transcription = self.transcribe_audio(self.audio_filename) self.transcription_box.insert( tk.END, "Transcription:\n" + transcription + "\n" ) speak_thread = threading.Thread(target = self.speak_audio, args = (agent_executor.run(transcription),)) speak_thread.start() self.start_button.config(state = tk.NORMAL) self.stop_button.config(state = tk.DISABLED) def record_audio(self): audio = pyaudio.PyAudio() stream = audio.open( format = AUDIO_FORMAT, channels = CHANNELS, rate = FRAME_RATE, input = True, frames_per_buffer = CHUNK ) while not self.stop_event.is_set(): data = stream.read(CHUNK) self.audio_queue.put(data) stream.stop_stream() stream.close() audio.terminate() with wave.open(self.audio_filename, "wb") as wf: wf.setnchannels(CHANNELS) wf.setsampwidth(audio.get_sample_size(AUDIO_FORMAT)) wf.setframerate(FRAME_RATE) wf.writeframes(b''.join(list(self.audio_queue.queue))) def transcribe_audio(self, filename): with open(filename, "rb") as audio_file: # transcript = openai.Audio.transcribe( # model = "whisper-1", # file = audio_file, # language = "en" # ) transcript = model.transcribe(filename) return transcript["text"].strip() def exit_program(self): self.root.destroy() def main(): root = tk.Tk() app = AudioRecorderGUI(root) root.mainloop() if __name__ == "__main__": main()
[ "langchain.agents.agent_toolkits.SQLDatabaseToolkit", "langchain.SQLDatabase.from_uri", "langchain.OpenAI" ]
[((652, 740), 'langchain.SQLDatabase.from_uri', 'SQLDatabase.from_uri', (['f"""mysql+pymysql://admin:{s2_password}@{s2_host}:3306/{s2_db}"""'], {}), "(\n f'mysql+pymysql://admin:{s2_password}@{s2_host}:3306/{s2_db}')\n", (672, 740), False, 'from langchain import OpenAI, SQLDatabase\n'), ((743, 816), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""', 'temperature': '(0)', 'verbose': '(False)'}), "(model_name='gpt-3.5-turbo-instruct', temperature=0, verbose=False)\n", (749, 816), False, 'from langchain import OpenAI, SQLDatabase\n'), ((848, 882), 'langchain.agents.agent_toolkits.SQLDatabaseToolkit', 'SQLDatabaseToolkit', ([], {'db': 'db', 'llm': 'llm'}), '(db=db, llm=llm)\n', (866, 882), False, 'from langchain.agents.agent_toolkits import SQLDatabaseToolkit\n'), ((1073, 1102), 'whisper.load_model', 'whisper.load_model', (['"""base.en"""'], {}), "('base.en')\n", (1091, 1102), False, 'import whisper\n'), ((5634, 5641), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (5639, 5641), True, 'import tkinter as tk\n'), ((933, 991), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo-instruct', temperature=0)\n", (939, 991), False, 'from langchain import OpenAI, SQLDatabase\n'), ((1266, 1335), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Start Recording"""', 'command': 'self.start_recording'}), "(root, text='Start Recording', command=self.start_recording)\n", (1275, 1335), True, 'import tkinter as tk\n'), ((1410, 1501), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Stop Recording"""', 'command': 'self.stop_recording', 'state': 'tk.DISABLED'}), "(root, text='Stop Recording', command=self.stop_recording, state=\n tk.DISABLED)\n", (1419, 1501), True, 'import tkinter as tk\n'), ((1571, 1626), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Exit"""', 'command': 'self.exit_program'}), "(root, text='Exit', command=self.exit_program)\n", (1580, 1626), True, 'import tkinter as tk\n'), ((1705, 1756), 'tkinter.scrolledtext.ScrolledText', 'scrolledtext.ScrolledText', (['root'], {'height': '(5)', 'width': '(60)'}), '(root, height=5, width=60)\n', (1730, 1756), False, 'from tkinter import scrolledtext\n'), ((1937, 1951), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1949, 1951), True, 'import matplotlib.pyplot as plt\n'), ((1974, 2019), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['self.fig'], {'master': 'self.root'}), '(self.fig, master=self.root)\n', (1991, 2019), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((2181, 2193), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2189, 2193), True, 'import numpy as np\n'), ((2948, 2962), 'pyttsx3.init', 'pyttsx3.init', ([], {}), '()\n', (2960, 2962), False, 'import pyttsx3\n'), ((3237, 3254), 'threading.Event', 'threading.Event', ([], {}), '()\n', (3252, 3254), False, 'import threading\n'), ((3282, 3295), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (3293, 3295), False, 'import queue\n'), ((3326, 3368), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.record_audio'}), '(target=self.record_audio)\n', (3342, 3368), False, 'import threading\n'), ((3562, 3612), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.update_waveform_plot'}), '(target=self.update_waveform_plot)\n', (3578, 3612), False, 'import threading\n'), ((4496, 4513), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (4511, 4513), False, 'import pyaudio\n'), ((2467, 2502), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'np.int16'}), '(data, dtype=np.int16)\n', (2480, 2502), True, 'import numpy as np\n'), ((4931, 4967), 'wave.open', 'wave.open', (['self.audio_filename', '"""wb"""'], {}), "(self.audio_filename, 'wb')\n", (4940, 4967), False, 'import wave\n'), ((2421, 2435), 'numpy.zeros', 'np.zeros', (['(1024)'], {}), '(1024)\n', (2429, 2435), True, 'import numpy as np\n')]
from typing import Optional, Type from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.pydantic_v1 import BaseModel, Field from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class WriteFileInput(BaseModel): """Input for WriteFileTool.""" file_path: str = Field(..., description="name of file") text: str = Field(..., description="text to write to file") append: bool = Field( default=False, description="Whether to append to an existing file." ) class WriteFileTool(BaseFileToolMixin, BaseTool): """Tool that writes a file to disk.""" name: str = "write_file" args_schema: Type[BaseModel] = WriteFileInput description: str = "Write file to disk" def _run( self, file_path: str, text: str, append: bool = False, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: write_path = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path) try: write_path.parent.mkdir(exist_ok=True, parents=False) mode = "a" if append else "w" with write_path.open(mode, encoding="utf-8") as f: f.write(text) return f"File written successfully to {file_path}." except Exception as e: return "Error: " + str(e) # TODO: Add aiofiles method
[ "langchain.pydantic_v1.Field", "langchain.tools.file_management.utils.INVALID_PATH_TEMPLATE.format" ]
[((415, 453), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""name of file"""'}), "(..., description='name of file')\n", (420, 453), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((470, 517), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""text to write to file"""'}), "(..., description='text to write to file')\n", (475, 517), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((537, 611), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to append to an existing file."""'}), "(default=False, description='Whether to append to an existing file.')\n", (542, 611), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1153, 1220), 'langchain.tools.file_management.utils.INVALID_PATH_TEMPLATE.format', 'INVALID_PATH_TEMPLATE.format', ([], {'arg_name': '"""file_path"""', 'value': 'file_path'}), "(arg_name='file_path', value=file_path)\n", (1181, 1220), False, 'from langchain.tools.file_management.utils import INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError\n')]
import argparse from typing import Optional from langchain.llms.ollama import Ollama from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from termcolor import colored class RubberDuck: """ This class is a wrapper around the Ollama model. """ def __init__(self, model: str = "codellama") -> None: """ This function initializes the RubberDuck class. Args: model (str, optional): The model to be used. Defaults to "codellama". """ self.system_prompt = """You are a pair progamming tool to help developers debug, think through design, and write code. Help the user think through their approach and provide feedback on the code.""" self.llm = Ollama(model=model, callbacks=[StreamingStdOutCallbackHandler()], system=self.system_prompt) def call_llama(self, code: str = "", prompt: Optional[str] = None, chain: bool = False) -> None: """ This function calls the Ollama model to provide feedback on the given code. Args: code (str): The code to be reviewed. prompt (Optional[str]): Custom prompt to be used. Defaults to None. """ if prompt is None: prompt = "review the code, find any issues if any, suggest cleanups if any:" + code else: prompt = prompt + code self.llm(prompt) if chain: while(True): prompt = input(colored("\n What's on your mind? \n ", 'green')) self.llm(prompt) def read_files_from_dir(directory: str) -> str: """ This function reads all the files from a directory and returns the concatenated string. Args: directory (str): The directory to be processed. Returns: str: The concatenated string of all the files. """ import os files = os.listdir(directory) code = "" for file in files: code += open(directory + "/" + file).read() return code def ducky() -> None: """ This function parses the command line arguments and calls the Ollama model. """ parser = argparse.ArgumentParser() parser.add_argument("--prompt", "-p", help="Custom prompt to be used", default=None) parser.add_argument("--file", "-f", help="The file to be processed", default=None) parser.add_argument("--directory", "-d", help="The directory to be processed", default=None) parser.add_argument("--chain", "-c", help="Chain the output of the previous command to the next command", action="store_true", default=False) parser.add_argument("--model", "-m", help="The model to be used", default="codellama") args, _ = parser.parse_known_args() # My testing has shown that the codellama:7b-python is good for returning python code from the program. # My intention with this tool was to give more general feedback and have back a back and forth with the user. rubber_ducky = RubberDuck(model=args.model) if args.file is None and args.directory is None: if args.chain: while(True): prompt = input(colored("\n What's on your mind? \n ", 'green')) rubber_ducky.call_llama(prompt=prompt, chain=args.chain) else: prompt = input(colored("\n What's on your mind? \n ", 'green')) rubber_ducky.call_llama(prompt=prompt, chain=args.chain) if args.file is not None: code = open(args.file).read() rubber_ducky.call_llama(code=code, prompt=args.prompt, chain=args.chain) elif args.directory is not None: code = read_files_from_dir(args.directory) rubber_ducky.call_llama(code=code, prompt=args.prompt, chain=args.chain) if __name__ == "__main__": ducky()
[ "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler" ]
[((1887, 1908), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1897, 1908), False, 'import os\n'), ((2146, 2171), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2169, 2171), False, 'import argparse\n'), ((3288, 3337), 'termcolor.colored', 'colored', (['"""\n What\'s on your mind? \n """', '"""green"""'], {}), '("""\n What\'s on your mind? \n """, \'green\')\n', (3295, 3337), False, 'from termcolor import colored\n'), ((784, 816), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (814, 816), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((1481, 1530), 'termcolor.colored', 'colored', (['"""\n What\'s on your mind? \n """', '"""green"""'], {}), '("""\n What\'s on your mind? \n """, \'green\')\n', (1488, 1530), False, 'from termcolor import colored\n'), ((3125, 3174), 'termcolor.colored', 'colored', (['"""\n What\'s on your mind? \n """', '"""green"""'], {}), '("""\n What\'s on your mind? \n """, \'green\')\n', (3132, 3174), False, 'from termcolor import colored\n')]
from langchain.chat_models import ChatOpenAI from langchain.schema import HumanMessage from log10.langchain import Log10Callback from log10.llm import Log10Config log10_callback = Log10Callback(log10_config=Log10Config()) messages = [ HumanMessage(content="You are a ping pong machine"), HumanMessage(content="Ping?"), ] llm = ChatOpenAI( model_name="gpt-3.5-turbo", callbacks=[log10_callback], temperature=0.5, tags=["test"], ) completion = llm.predict_messages(messages, tags=["foobar"]) print(completion) print(log10_callback.last_completion_url()) llm = ChatOpenAI( model_name="gpt-3.5-turbo", callbacks=[log10_callback], temperature=0.5, tags=["test"], ) messages.append(HumanMessage(content="Pong!")) completion = llm.predict_messages(messages, tags=["foobar"]) print(completion) print(log10_callback.last_completion_url())
[ "langchain.schema.HumanMessage", "langchain.chat_models.ChatOpenAI" ]
[((341, 443), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'callbacks': '[log10_callback]', 'temperature': '(0.5)', 'tags': "['test']"}), "(model_name='gpt-3.5-turbo', callbacks=[log10_callback],\n temperature=0.5, tags=['test'])\n", (351, 443), False, 'from langchain.chat_models import ChatOpenAI\n'), ((590, 692), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'callbacks': '[log10_callback]', 'temperature': '(0.5)', 'tags': "['test']"}), "(model_name='gpt-3.5-turbo', callbacks=[log10_callback],\n temperature=0.5, tags=['test'])\n", (600, 692), False, 'from langchain.chat_models import ChatOpenAI\n'), ((244, 295), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""You are a ping pong machine"""'}), "(content='You are a ping pong machine')\n", (256, 295), False, 'from langchain.schema import HumanMessage\n'), ((301, 330), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""Ping?"""'}), "(content='Ping?')\n", (313, 330), False, 'from langchain.schema import HumanMessage\n'), ((724, 753), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""Pong!"""'}), "(content='Pong!')\n", (736, 753), False, 'from langchain.schema import HumanMessage\n'), ((210, 223), 'log10.llm.Log10Config', 'Log10Config', ([], {}), '()\n', (221, 223), False, 'from log10.llm import Log10Config\n')]
from langchain.document_loaders import DirectoryLoader from langchain.text_splitter import CharacterTextSplitter import os import pinecone from langchain.vectorstores import Pinecone from langchain.embeddings.openai import OpenAIEmbeddings from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI import streamlit as st from dotenv import load_dotenv load_dotenv() PINECONE_API_KEY = os.getenv('PINECONE_API_KEY') PINECONE_ENV = os.getenv('PINECONE_ENV') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY def doc_preprocessing(): loader = DirectoryLoader( 'data/', glob='**/*.pdf', # only the PDFs show_progress=True ) docs = loader.load() text_splitter = CharacterTextSplitter( chunk_size=1000, chunk_overlap=0 ) docs_split = text_splitter.split_documents(docs) return docs_split @st.cache_resource def embedding_db(): # we use the openAI embedding model embeddings = OpenAIEmbeddings() pinecone.init( api_key=PINECONE_API_KEY, environment=PINECONE_ENV ) docs_split = doc_preprocessing() doc_db = Pinecone.from_documents( docs_split, embeddings, index_name='langchain-demo-indexes' ) return doc_db llm = ChatOpenAI() doc_db = embedding_db() def retrieval_answer(query): qa = RetrievalQA.from_chain_type( llm=llm, chain_type='stuff', retriever=doc_db.as_retriever(), ) query = query result = qa.run(query) return result def main(): st.title("Question and Answering App powered by LLM and Pinecone") text_input = st.text_input("Ask your query...") if st.button("Ask Query"): if len(text_input)>0: st.info("Your Query: " + text_input) answer = retrieval_answer(text_input) st.success(answer) if __name__ == "__main__": main()
[ "langchain.text_splitter.CharacterTextSplitter", "langchain.document_loaders.DirectoryLoader", "langchain.vectorstores.Pinecone.from_documents", "langchain.chat_models.ChatOpenAI", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((393, 406), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (404, 406), False, 'from dotenv import load_dotenv\n'), ((431, 460), 'os.getenv', 'os.getenv', (['"""PINECONE_API_KEY"""'], {}), "('PINECONE_API_KEY')\n", (440, 460), False, 'import os\n'), ((477, 502), 'os.getenv', 'os.getenv', (['"""PINECONE_ENV"""'], {}), "('PINECONE_ENV')\n", (486, 502), False, 'import os\n'), ((521, 548), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (530, 548), False, 'import os\n'), ((1382, 1394), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1392, 1394), False, 'from langchain.chat_models import ChatOpenAI\n'), ((642, 703), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['"""data/"""'], {'glob': '"""**/*.pdf"""', 'show_progress': '(True)'}), "('data/', glob='**/*.pdf', show_progress=True)\n", (657, 703), False, 'from langchain.document_loaders import DirectoryLoader\n'), ((805, 860), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (826, 860), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1066, 1084), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1082, 1084), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1090, 1155), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'PINECONE_API_KEY', 'environment': 'PINECONE_ENV'}), '(api_key=PINECONE_API_KEY, environment=PINECONE_ENV)\n', (1103, 1155), False, 'import pinecone\n'), ((1233, 1322), 'langchain.vectorstores.Pinecone.from_documents', 'Pinecone.from_documents', (['docs_split', 'embeddings'], {'index_name': '"""langchain-demo-indexes"""'}), "(docs_split, embeddings, index_name=\n 'langchain-demo-indexes')\n", (1256, 1322), False, 'from langchain.vectorstores import Pinecone\n'), ((1662, 1728), 'streamlit.title', 'st.title', (['"""Question and Answering App powered by LLM and Pinecone"""'], {}), "('Question and Answering App powered by LLM and Pinecone')\n", (1670, 1728), True, 'import streamlit as st\n'), ((1749, 1783), 'streamlit.text_input', 'st.text_input', (['"""Ask your query..."""'], {}), "('Ask your query...')\n", (1762, 1783), True, 'import streamlit as st\n'), ((1793, 1815), 'streamlit.button', 'st.button', (['"""Ask Query"""'], {}), "('Ask Query')\n", (1802, 1815), True, 'import streamlit as st\n'), ((1861, 1897), 'streamlit.info', 'st.info', (["('Your Query: ' + text_input)"], {}), "('Your Query: ' + text_input)\n", (1868, 1897), True, 'import streamlit as st\n'), ((1962, 1980), 'streamlit.success', 'st.success', (['answer'], {}), '(answer)\n', (1972, 1980), True, 'import streamlit as st\n')]
import os import langchain from langchain import ( agents, prompts, chains, llms ) class BOAgent: def __init__( self, tools, memory, model="text-davinci-003", temp=0.1, max_steps=30, ): self.openai_key = os.getenv("OPENAI_API_KEY") self.memory = memory # Initialize LLM if model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4"): self.llm = langchain.chat_models.ChatOpenAI( temperature=temp, openai_api_key=self.openai_key, model_name=model, ) else: self.llm = langchain.OpenAI( temperature=temp, openai_api_key=self.openai_key, model_name=model ) # Initialize agent self.agent = agents.initialize_agent( tools, self.llm, agent="conversational-react-description", verbose=True, max_iterations=max_steps, memory=self.memory ) def run(self, prompt): return self.agent.run(prompt)
[ "langchain.agents.initialize_agent", "langchain.OpenAI", "langchain.chat_models.ChatOpenAI" ]
[((310, 337), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (319, 337), False, 'import os\n'), ((888, 1040), 'langchain.agents.initialize_agent', 'agents.initialize_agent', (['tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'max_iterations': 'max_steps', 'memory': 'self.memory'}), "(tools, self.llm, agent=\n 'conversational-react-description', verbose=True, max_iterations=\n max_steps, memory=self.memory)\n", (911, 1040), False, 'from langchain import agents, prompts, chains, llms\n'), ((491, 596), 'langchain.chat_models.ChatOpenAI', 'langchain.chat_models.ChatOpenAI', ([], {'temperature': 'temp', 'openai_api_key': 'self.openai_key', 'model_name': 'model'}), '(temperature=temp, openai_api_key=self.\n openai_key, model_name=model)\n', (523, 596), False, 'import langchain\n'), ((692, 780), 'langchain.OpenAI', 'langchain.OpenAI', ([], {'temperature': 'temp', 'openai_api_key': 'self.openai_key', 'model_name': 'model'}), '(temperature=temp, openai_api_key=self.openai_key,\n model_name=model)\n', (708, 780), False, 'import langchain\n')]
import importlib.util import logging from typing import Any, Callable, List, Mapping, Optional from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.self_hosted import SelfHostedPipeline from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import Extra DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") logger = logging.getLogger(__name__) def _generate_text( pipeline: Any, prompt: str, *args: Any, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: """Inference function to send to the remote hardware. Accepts a Hugging Face pipeline (or more likely, a key pointing to such a pipeline on the cluster's object store) and returns generated text. """ response = pipeline(prompt, *args, **kwargs) if pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif pipeline.task == "text2text-generation": text = response[0]["generated_text"] elif pipeline.task == "summarization": text = response[0]["summary_text"] else: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: text = enforce_stop_tokens(text, stop) return text def _load_transformer( model_id: str = DEFAULT_MODEL_ID, task: str = DEFAULT_TASK, device: int = 0, model_kwargs: Optional[dict] = None, ) -> Any: """Inference function to send to the remote hardware. Accepts a huggingface model_id and returns a pipeline for the task. """ from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from transformers import pipeline as hf_pipeline _model_kwargs = model_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs) elif task in ("text2text-generation", "summarization"): model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device=device, model_kwargs=_model_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return pipeline class SelfHostedHuggingFaceLLM(SelfHostedPipeline): """HuggingFace Pipeline API to run on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Only supports `text-generation`, `text2text-generation` and `summarization` for now. Example using from_model_id: .. code-block:: python from langchain.llms import SelfHostedHuggingFaceLLM import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceLLM( model_id="google/flan-t5-large", task="text2text-generation", hardware=gpu ) Example passing fn that generates a pipeline (bc the pipeline is not serializable): .. code-block:: python from langchain.llms import SelfHostedHuggingFaceLLM from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def get_pipeline(): model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer ) return pipe hf = SelfHostedHuggingFaceLLM( model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu) """ model_id: str = DEFAULT_MODEL_ID """Hugging Face model_id to load the model.""" task: str = DEFAULT_TASK """Hugging Face task ("text-generation", "text2text-generation" or "summarization").""" device: int = 0 """Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_reqs: List[str] = ["./", "transformers", "torch"] """Requirements to install on hardware to inference the model.""" model_load_fn: Callable = _load_transformer """Function to load the model remotely on the server.""" inference_fn: Callable = _generate_text #: :meta private: """Inference function to send to the remote hardware.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def __init__(self, **kwargs: Any): """Construct the pipeline remotely using an auxiliary function. The load function needs to be importable to be imported and run on the server, i.e. in a module and not a REPL or closure. Then, initialize the remote inference function. """ load_fn_kwargs = { "model_id": kwargs.get("model_id", DEFAULT_MODEL_ID), "task": kwargs.get("task", DEFAULT_TASK), "device": kwargs.get("device", 0), "model_kwargs": kwargs.get("model_kwargs", None), } super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_id": self.model_id}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: return "selfhosted_huggingface_pipeline" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: return self.client( pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs )
[ "langchain.llms.utils.enforce_stop_tokens" ]
[((457, 484), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (474, 484), False, 'import logging\n'), ((1983, 2039), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (2012, 2039), False, 'from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer\n'), ((3399, 3502), 'transformers.pipeline', 'hf_pipeline', ([], {'task': 'task', 'model': 'model', 'tokenizer': 'tokenizer', 'device': 'device', 'model_kwargs': '_model_kwargs'}), '(task=task, model=model, tokenizer=tokenizer, device=device,\n model_kwargs=_model_kwargs)\n', (3410, 3502), True, 'from transformers import pipeline as hf_pipeline\n'), ((1434, 1465), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (1453, 1465), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2739, 2764), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2762, 2764), False, 'import torch\n'), ((2108, 2171), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (2144, 2171), False, 'from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer\n'), ((2256, 2320), 'transformers.AutoModelForSeq2SeqLM.from_pretrained', 'AutoModelForSeq2SeqLM.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (2293, 2320), False, 'from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer\n')]
from collections import deque from langchain import LLMChain, PromptTemplate from langchain.chains import LLMChain from langchain.llms import BaseLLM from langchain.prompts import PromptTemplate from modules.memory import MemoryModule from typing import Dict, List class ReasoningModule: def __init__(self, llm, memory_module: MemoryModule, verbose: bool = True): self.task_list = deque() self.completed_task_list = deque() self.memory_module = memory_module self.task_creation_chain = TaskCreationChain.from_llm(llm, verbose) self.task_prioritization_chain = TaskPrioritizationChain.from_llm(llm, verbose) self.milestone_chain = MilestoneChain.from_llm(llm, verbose) def initialize_tasks(self): milestones = self.milestone_chain.run(objective=self.memory_module.objective) self.memory_module.store(str(milestones)) for milestone in milestones: self.task_list.append({"task_name": milestone}) self.task_list = deque(self.prioritize_tasks(0)) def update_tasks(self, task: dict, result: dict): incomplete_tasks = [t["task_name"] for t in self.task_list] task_description = task["task_name"] incomplete_tasks = "\n".join(incomplete_tasks) if len(self.task_list) == 0: incomplete_tasks = "all" objective = self.memory_module.objective response = self.task_creation_chain.run( result=result, task_description=task_description, incomplete_tasks=incomplete_tasks, objective=objective, ) new_tasks = response.split("\n") new_tasks = [{"task_name": task_name} for task_name in new_tasks if task_name.strip()] this_task_id = int("".join(filter(str.isdigit, task["task_id"]))) if isinstance(task["task_id"], str) else task["task_id"] task_id_counter = this_task_id for new_task in new_tasks: task_id_counter += 1 new_task.update({"task_id": task_id_counter}) self.task_list.append(new_task) self.task_list = deque(self.prioritize_tasks(this_task_id)) def prioritize_tasks(self, this_task_id: int) -> List[Dict]: """Prioritize tasks.""" task_names = [t["task_name"] for t in self.task_list] task_names = "\n".join(task_names) objective = self.memory_module.objective next_task_id = this_task_id + 1 response = self.task_prioritization_chain.run(task_names=task_names, next_task_id=next_task_id, objective=objective) new_tasks = response.split("\n") prioritized_task_list = [] task_id_counter = this_task_id for task_string in new_tasks: if not task_string.strip(): continue task_parts = task_string.strip().split(".", 1) if len(task_parts) == 2: task_id_counter += 1 # task_id = task_parts[0].strip() task_name = task_parts[1].strip() prioritized_task_list.append({"task_id": task_id_counter, "task_name": task_name}) return prioritized_task_list class TaskCreationChain(LLMChain): """Chain to generate tasks.""" @classmethod def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain: """Get the response parser.""" task_creation_template = ( "As a task creation AI, create new tasks with the objective: {objective}.\n" "Last completed task's result: {result}.\n" "Task description: {task_description}.\n" "Incomplete tasks: {incomplete_tasks}\n\n" "Ensure tasks are actionable and achievable by an agent with limited resources.\n" "Create short, finite tasks. Avoid continuous tasks like monitoring or testing.\n" "Consider if a new task is essential for reaching the objective.\n" "Return tasks as an array.\n" ) prompt = PromptTemplate( template=task_creation_template, input_variables=["result", "task_description", "incomplete_tasks", "objective"], ) return cls(prompt=prompt, llm=llm, verbose=verbose) class TaskPrioritizationChain(LLMChain): """Chain to prioritize tasks.""" @classmethod def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain: """Get the response parser.""" task_prioritization_template = ( "As a task prioritization AI, format and prioritize tasks: {task_names}\n" "Objective: {objective}\n\n" "Return prioritized tasks as a numbered list starting with {next_task_id}.\n" ) prompt = PromptTemplate( template=task_prioritization_template, input_variables=["task_names", "next_task_id", "objective"], ) return cls(prompt=prompt, llm=llm, verbose=verbose) class MilestoneChain(LLMChain): """Chain to generate milestones.""" @classmethod def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain: """Get the response parser.""" milestone_template = "As a milestone AI, generate milestones for the objective: {objective}.\n" "Return milestones as an array.\n" return cls(llm=llm, prompt=PromptTemplate(input_variables=["objective"], template=milestone_template), verbose=verbose) def run(self, objective: str) -> List[str]: """Run the chain.""" return self.generate_milestones(objective=objective) def generate_milestones(self, objective: str) -> List[str]: """Generate milestones.""" response = self.predict(objective=objective) return response.strip().split("\n") if response else []
[ "langchain.prompts.PromptTemplate" ]
[((395, 402), 'collections.deque', 'deque', ([], {}), '()\n', (400, 402), False, 'from collections import deque\n'), ((438, 445), 'collections.deque', 'deque', ([], {}), '()\n', (443, 445), False, 'from collections import deque\n'), ((3982, 4114), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'task_creation_template', 'input_variables': "['result', 'task_description', 'incomplete_tasks', 'objective']"}), "(template=task_creation_template, input_variables=['result',\n 'task_description', 'incomplete_tasks', 'objective'])\n", (3996, 4114), False, 'from langchain.prompts import PromptTemplate\n'), ((4700, 4819), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'task_prioritization_template', 'input_variables': "['task_names', 'next_task_id', 'objective']"}), "(template=task_prioritization_template, input_variables=[\n 'task_names', 'next_task_id', 'objective'])\n", (4714, 4819), False, 'from langchain.prompts import PromptTemplate\n'), ((5287, 5361), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['objective']", 'template': 'milestone_template'}), "(input_variables=['objective'], template=milestone_template)\n", (5301, 5361), False, 'from langchain.prompts import PromptTemplate\n')]
## This is a fork/based from https://gist.github.com/wiseman/4a706428eaabf4af1002a07a114f61d6 from io import StringIO import sys import os from typing import Dict, Optional from langchain.agents import load_tools from langchain.agents import initialize_agent from langchain.agents.tools import Tool from langchain.llms import OpenAI base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1') model_name = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo') class PythonREPL: """Simulates a standalone Python REPL.""" def __init__(self): pass def run(self, command: str) -> str: """Run command and returns anything printed.""" old_stdout = sys.stdout sys.stdout = mystdout = StringIO() try: exec(command, globals()) sys.stdout = old_stdout output = mystdout.getvalue() except Exception as e: sys.stdout = old_stdout output = str(e) return output llm = OpenAI(temperature=0.0, openai_api_base=base_path, model_name=model_name) python_repl = Tool( "Python REPL", PythonREPL().run, """A Python shell. Use this to execute python commands. Input should be a valid python command. If you expect output it should be printed out.""", ) tools = [python_repl] agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True) agent.run("What is the 10th fibonacci number?")
[ "langchain.agents.initialize_agent", "langchain.llms.OpenAI" ]
[((348, 409), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (362, 409), False, 'import os\n'), ((423, 468), 'os.environ.get', 'os.environ.get', (['"""MODEL_NAME"""', '"""gpt-3.5-turbo"""'], {}), "('MODEL_NAME', 'gpt-3.5-turbo')\n", (437, 468), False, 'import os\n'), ((1003, 1076), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.0)', 'openai_api_base': 'base_path', 'model_name': 'model_name'}), '(temperature=0.0, openai_api_base=base_path, model_name=model_name)\n', (1009, 1076), False, 'from langchain.llms import OpenAI\n'), ((1345, 1424), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""zero-shot-react-description"""', 'verbose': '(True)'}), "(tools, llm, agent='zero-shot-react-description', verbose=True)\n", (1361, 1424), False, 'from langchain.agents import initialize_agent\n'), ((741, 751), 'io.StringIO', 'StringIO', ([], {}), '()\n', (749, 751), False, 'from io import StringIO\n')]
## This is a fork/based from https://gist.github.com/wiseman/4a706428eaabf4af1002a07a114f61d6 from io import StringIO import sys import os from typing import Dict, Optional from langchain.agents import load_tools from langchain.agents import initialize_agent from langchain.agents.tools import Tool from langchain.llms import OpenAI base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1') model_name = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo') class PythonREPL: """Simulates a standalone Python REPL.""" def __init__(self): pass def run(self, command: str) -> str: """Run command and returns anything printed.""" old_stdout = sys.stdout sys.stdout = mystdout = StringIO() try: exec(command, globals()) sys.stdout = old_stdout output = mystdout.getvalue() except Exception as e: sys.stdout = old_stdout output = str(e) return output llm = OpenAI(temperature=0.0, openai_api_base=base_path, model_name=model_name) python_repl = Tool( "Python REPL", PythonREPL().run, """A Python shell. Use this to execute python commands. Input should be a valid python command. If you expect output it should be printed out.""", ) tools = [python_repl] agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True) agent.run("What is the 10th fibonacci number?")
[ "langchain.agents.initialize_agent", "langchain.llms.OpenAI" ]
[((348, 409), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (362, 409), False, 'import os\n'), ((423, 468), 'os.environ.get', 'os.environ.get', (['"""MODEL_NAME"""', '"""gpt-3.5-turbo"""'], {}), "('MODEL_NAME', 'gpt-3.5-turbo')\n", (437, 468), False, 'import os\n'), ((1003, 1076), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.0)', 'openai_api_base': 'base_path', 'model_name': 'model_name'}), '(temperature=0.0, openai_api_base=base_path, model_name=model_name)\n', (1009, 1076), False, 'from langchain.llms import OpenAI\n'), ((1345, 1424), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""zero-shot-react-description"""', 'verbose': '(True)'}), "(tools, llm, agent='zero-shot-react-description', verbose=True)\n", (1361, 1424), False, 'from langchain.agents import initialize_agent\n'), ((741, 751), 'io.StringIO', 'StringIO', ([], {}), '()\n', (749, 751), False, 'from io import StringIO\n')]
"""Utility functions for mlflow.langchain.""" import contextlib import json import logging import os import shutil import types import warnings from functools import lru_cache from importlib.util import find_spec from typing import NamedTuple import cloudpickle import yaml from packaging import version from packaging.version import Version import mlflow from mlflow.utils.class_utils import _get_class_from_string _AGENT_PRIMITIVES_FILE_NAME = "agent_primitive_args.json" _AGENT_PRIMITIVES_DATA_KEY = "agent_primitive_data" _AGENT_DATA_FILE_NAME = "agent.yaml" _AGENT_DATA_KEY = "agent_data" _TOOLS_DATA_FILE_NAME = "tools.pkl" _TOOLS_DATA_KEY = "tools_data" _LOADER_FN_FILE_NAME = "loader_fn.pkl" _LOADER_FN_KEY = "loader_fn" _LOADER_ARG_KEY = "loader_arg" _PERSIST_DIR_NAME = "persist_dir_data" _PERSIST_DIR_KEY = "persist_dir" _MODEL_DATA_YAML_FILE_NAME = "model.yaml" _MODEL_DATA_PKL_FILE_NAME = "model.pkl" _MODEL_DATA_FOLDER_NAME = "model" _MODEL_DATA_KEY = "model_data" _MODEL_TYPE_KEY = "model_type" _RUNNABLE_LOAD_KEY = "runnable_load" _BASE_LOAD_KEY = "base_load" _CONFIG_LOAD_KEY = "config_load" _MODEL_LOAD_KEY = "model_load" _UNSUPPORTED_MODEL_ERROR_MESSAGE = ( "MLflow langchain flavor only supports subclasses of " "langchain.chains.base.Chain, langchain.agents.agent.AgentExecutor, " "langchain.schema.BaseRetriever, langchain.schema.runnable.RunnableSequence, " "langchain.schema.runnable.RunnableLambda, " "langchain.schema.runnable.RunnableParallel, " "langchain.schema.runnable.RunnablePassthrough, " "langchain.schema.runnable.passthrough.RunnableAssign instances, " "found {instance_type}" ) _UNSUPPORTED_MODEL_WARNING_MESSAGE = ( "MLflow does not guarantee support for Chains outside of the subclasses of LLMChain, found %s" ) _UNSUPPORTED_LLM_WARNING_MESSAGE = ( "MLflow does not guarantee support for LLMs outside of HuggingFaceHub and OpenAI, found %s" ) _UNSUPPORTED_LANGCHAIN_VERSION_ERROR_MESSAGE = ( "Saving {instance_type} models is only supported in langchain 0.0.194 and above." ) logger = logging.getLogger(__name__) @lru_cache def base_lc_types(): import langchain.agents.agent import langchain.chains.base import langchain.schema return ( langchain.chains.base.Chain, langchain.agents.agent.AgentExecutor, langchain.schema.BaseRetriever, ) @lru_cache def picklable_runnable_types(): """ Runnable types that can be pickled and unpickled by cloudpickle. """ from langchain.chat_models.base import SimpleChatModel from langchain.prompts import ChatPromptTemplate types = ( SimpleChatModel, ChatPromptTemplate, ) try: from langchain.schema.runnable import ( RunnableLambda, RunnablePassthrough, ) types += (RunnableLambda, RunnablePassthrough) except ImportError: pass return types @lru_cache def lc_runnable_with_steps_types(): # import them separately because they are added # in different versions of langchain try: from langchain.schema.runnable import RunnableSequence types = (RunnableSequence,) except ImportError: types = () try: from langchain.schema.runnable import RunnableParallel types += (RunnableParallel,) except ImportError: pass return types def lc_runnable_assign_types(): try: from langchain.schema.runnable.passthrough import RunnableAssign return (RunnableAssign,) except ImportError: return () def lc_runnable_branch_types(): try: from langchain.schema.runnable import RunnableBranch return (RunnableBranch,) except ImportError: return () def lc_runnables_types(): return ( picklable_runnable_types() + lc_runnable_with_steps_types() + lc_runnable_branch_types() + lc_runnable_assign_types() ) def supported_lc_types(): return base_lc_types() + lc_runnables_types() @lru_cache def runnables_supports_batch_types(): try: from langchain.schema.runnable import ( RunnableLambda, RunnableSequence, ) types = (RunnableSequence, RunnableLambda) except ImportError: types = () try: from langchain.schema.runnable import RunnableParallel types += (RunnableParallel,) except ImportError: pass return types @lru_cache def custom_type_to_loader_dict(): # helper function to load output_parsers from config def _load_output_parser(config: dict) -> dict: """Load output parser.""" from langchain.schema.output_parser import StrOutputParser output_parser_type = config.pop("_type", None) if output_parser_type == "default": return StrOutputParser(**config) else: raise ValueError(f"Unsupported output parser {output_parser_type}") return {"default": _load_output_parser} class _SpecialChainInfo(NamedTuple): loader_arg: str def _get_special_chain_info_or_none(chain): for ( special_chain_class, loader_arg, ) in _get_map_of_special_chain_class_to_loader_arg().items(): if isinstance(chain, special_chain_class): return _SpecialChainInfo(loader_arg=loader_arg) @lru_cache def _get_map_of_special_chain_class_to_loader_arg(): import langchain from mlflow.langchain.retriever_chain import _RetrieverChain class_name_to_loader_arg = { "langchain.chains.RetrievalQA": "retriever", "langchain.chains.APIChain": "requests_wrapper", "langchain.chains.HypotheticalDocumentEmbedder": "embeddings", } # NB: SQLDatabaseChain was migrated to langchain_experimental beginning with version 0.0.247 if version.parse(langchain.__version__) <= version.parse("0.0.246"): class_name_to_loader_arg["langchain.chains.SQLDatabaseChain"] = "database" else: if find_spec("langchain_experimental"): # Add this entry only if langchain_experimental is installed class_name_to_loader_arg["langchain_experimental.sql.SQLDatabaseChain"] = "database" class_to_loader_arg = { _RetrieverChain: "retriever", } for class_name, loader_arg in class_name_to_loader_arg.items(): try: cls = _get_class_from_string(class_name) class_to_loader_arg[cls] = loader_arg except Exception: logger.warning( "Unexpected import failure for class '%s'. Please file an issue at" " https://github.com/mlflow/mlflow/issues/.", class_name, exc_info=True, ) return class_to_loader_arg @lru_cache def _get_supported_llms(): import langchain.chat_models import langchain.llms llms = {langchain.llms.openai.OpenAI, langchain.llms.huggingface_hub.HuggingFaceHub} if hasattr(langchain.llms, "Databricks"): llms.add(langchain.llms.Databricks) if hasattr(langchain.llms, "Mlflow"): llms.add(langchain.llms.Mlflow) if hasattr(langchain.chat_models, "ChatDatabricks"): llms.add(langchain.chat_models.ChatDatabricks) if hasattr(langchain.chat_models, "ChatMlflow"): llms.add(langchain.chat_models.ChatMlflow) return llms def _validate_and_wrap_lc_model(lc_model, loader_fn): import langchain.agents.agent import langchain.chains.base import langchain.chains.llm import langchain.llms.huggingface_hub import langchain.llms.openai import langchain.schema if isinstance(lc_model, str): if os.path.basename(os.path.abspath(lc_model)) != "chain.py": raise mlflow.MlflowException.invalid_parameter_value( f"If {lc_model} is a string, it must be the path to a file " "named `chain.py` on the local filesystem." ) return lc_model if not isinstance(lc_model, supported_lc_types()): raise mlflow.MlflowException.invalid_parameter_value( _UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=type(lc_model).__name__) ) _SUPPORTED_LLMS = _get_supported_llms() if isinstance(lc_model, langchain.chains.llm.LLMChain) and not any( isinstance(lc_model.llm, supported_llm) for supported_llm in _SUPPORTED_LLMS ): logger.warning( _UNSUPPORTED_LLM_WARNING_MESSAGE, type(lc_model.llm).__name__, ) if isinstance(lc_model, langchain.agents.agent.AgentExecutor) and not any( isinstance(lc_model.agent.llm_chain.llm, supported_llm) for supported_llm in _SUPPORTED_LLMS ): logger.warning( _UNSUPPORTED_LLM_WARNING_MESSAGE, type(lc_model.agent.llm_chain.llm).__name__, ) if special_chain_info := _get_special_chain_info_or_none(lc_model): if isinstance(lc_model, langchain.chains.RetrievalQA) and version.parse( langchain.__version__ ) < version.parse("0.0.194"): raise mlflow.MlflowException.invalid_parameter_value( _UNSUPPORTED_LANGCHAIN_VERSION_ERROR_MESSAGE.format( instance_type=type(lc_model).__name__ ) ) if loader_fn is None: raise mlflow.MlflowException.invalid_parameter_value( f"For {type(lc_model).__name__} models, a `loader_fn` must be provided." ) if not isinstance(loader_fn, types.FunctionType): raise mlflow.MlflowException.invalid_parameter_value( "The `loader_fn` must be a function that returns a {loader_arg}.".format( loader_arg=special_chain_info.loader_arg ) ) # If lc_model is a retriever, wrap it in a _RetrieverChain if isinstance(lc_model, langchain.schema.BaseRetriever): from mlflow.langchain.retriever_chain import _RetrieverChain if loader_fn is None: raise mlflow.MlflowException.invalid_parameter_value( f"For {type(lc_model).__name__} models, a `loader_fn` must be provided." ) if not isinstance(loader_fn, types.FunctionType): raise mlflow.MlflowException.invalid_parameter_value( "The `loader_fn` must be a function that returns a retriever." ) lc_model = _RetrieverChain(retriever=lc_model) return lc_model def _save_base_lcs(model, path, loader_fn=None, persist_dir=None): import langchain.agents.agent import langchain.chains.base import langchain.chains.llm model_data_path = os.path.join(path, _MODEL_DATA_YAML_FILE_NAME) model_data_kwargs = { _MODEL_DATA_KEY: _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY: _BASE_LOAD_KEY, } if isinstance(model, langchain.chains.llm.LLMChain): model.save(model_data_path) elif isinstance(model, langchain.agents.agent.AgentExecutor): if model.agent and model.agent.llm_chain: model.agent.llm_chain.save(model_data_path) if model.agent: agent_data_path = os.path.join(path, _AGENT_DATA_FILE_NAME) model.save_agent(agent_data_path) model_data_kwargs[_AGENT_DATA_KEY] = _AGENT_DATA_FILE_NAME if model.tools: tools_data_path = os.path.join(path, _TOOLS_DATA_FILE_NAME) try: with open(tools_data_path, "wb") as f: cloudpickle.dump(model.tools, f) except Exception as e: raise mlflow.MlflowException( "Error when attempting to pickle the AgentExecutor tools. " "This model likely does not support serialization." ) from e model_data_kwargs[_TOOLS_DATA_KEY] = _TOOLS_DATA_FILE_NAME else: raise mlflow.MlflowException.invalid_parameter_value( "For initializing the AgentExecutor, tools must be provided." ) key_to_ignore = ["llm_chain", "agent", "tools", "callback_manager"] temp_dict = {k: v for k, v in model.__dict__.items() if k not in key_to_ignore} agent_primitive_path = os.path.join(path, _AGENT_PRIMITIVES_FILE_NAME) with open(agent_primitive_path, "w") as config_file: json.dump(temp_dict, config_file, indent=4) model_data_kwargs[_AGENT_PRIMITIVES_DATA_KEY] = _AGENT_PRIMITIVES_FILE_NAME elif special_chain_info := _get_special_chain_info_or_none(model): # Save loader_fn by pickling loader_fn_path = os.path.join(path, _LOADER_FN_FILE_NAME) with open(loader_fn_path, "wb") as f: cloudpickle.dump(loader_fn, f) model_data_kwargs[_LOADER_FN_KEY] = _LOADER_FN_FILE_NAME model_data_kwargs[_LOADER_ARG_KEY] = special_chain_info.loader_arg if persist_dir is not None: if os.path.exists(persist_dir): # Save persist_dir by copying into subdir _PERSIST_DIR_NAME persist_dir_data_path = os.path.join(path, _PERSIST_DIR_NAME) shutil.copytree(persist_dir, persist_dir_data_path) model_data_kwargs[_PERSIST_DIR_KEY] = _PERSIST_DIR_NAME else: raise mlflow.MlflowException.invalid_parameter_value( "The directory provided for persist_dir does not exist." ) # Save model model.save(model_data_path) elif isinstance(model, langchain.chains.base.Chain): logger.warning( _UNSUPPORTED_MODEL_WARNING_MESSAGE, type(model).__name__, ) model.save(model_data_path) else: raise mlflow.MlflowException.invalid_parameter_value( _UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=type(model).__name__) ) return model_data_kwargs def _load_from_pickle(path): with open(path, "rb") as f: return cloudpickle.load(f) def _load_from_json(path): with open(path) as f: return json.load(f) def _load_from_yaml(path): with open(path) as f: return yaml.safe_load(f) def _get_path_by_key(root_path, key, conf): key_path = conf.get(key) return os.path.join(root_path, key_path) if key_path else None def _load_base_lcs( local_model_path, conf, ): lc_model_path = os.path.join( local_model_path, conf.get(_MODEL_DATA_KEY, _MODEL_DATA_YAML_FILE_NAME) ) agent_path = _get_path_by_key(local_model_path, _AGENT_DATA_KEY, conf) tools_path = _get_path_by_key(local_model_path, _TOOLS_DATA_KEY, conf) agent_primitive_path = _get_path_by_key(local_model_path, _AGENT_PRIMITIVES_DATA_KEY, conf) loader_fn_path = _get_path_by_key(local_model_path, _LOADER_FN_KEY, conf) persist_dir = _get_path_by_key(local_model_path, _PERSIST_DIR_KEY, conf) model_type = conf.get(_MODEL_TYPE_KEY) loader_arg = conf.get(_LOADER_ARG_KEY) from langchain.chains.loading import load_chain from mlflow.langchain.retriever_chain import _RetrieverChain if loader_arg is not None: if loader_fn_path is None: raise mlflow.MlflowException.invalid_parameter_value( "Missing file for loader_fn which is required to build the model." ) loader_fn = _load_from_pickle(loader_fn_path) kwargs = {loader_arg: loader_fn(persist_dir)} if model_type == _RetrieverChain.__name__: model = _RetrieverChain.load(lc_model_path, **kwargs).retriever else: model = load_chain(lc_model_path, **kwargs) elif agent_path is None and tools_path is None: model = load_chain(lc_model_path) else: from langchain.agents import initialize_agent llm = load_chain(lc_model_path) tools = [] kwargs = {} if os.path.exists(tools_path): tools = _load_from_pickle(tools_path) else: raise mlflow.MlflowException( "Missing file for tools which is required to build the AgentExecutor object." ) if os.path.exists(agent_primitive_path): kwargs = _load_from_json(agent_primitive_path) model = initialize_agent(tools=tools, llm=llm, agent_path=agent_path, **kwargs) return model def register_pydantic_serializer(): """ Helper function to pickle pydantic fields for pydantic v1. Pydantic's Cython validators are not serializable. https://github.com/cloudpipe/cloudpickle/issues/408 """ import pydantic if Version(pydantic.__version__) >= Version("2.0.0"): return import pydantic.fields def custom_serializer(obj): return { "name": obj.name, # outer_type_ is the original type for ModelFields, # while type_ can be updated later with the nested type # like int for List[int]. "type_": obj.outer_type_, "class_validators": obj.class_validators, "model_config": obj.model_config, "default": obj.default, "default_factory": obj.default_factory, "required": obj.required, "final": obj.final, "alias": obj.alias, "field_info": obj.field_info, } def custom_deserializer(kwargs): return pydantic.fields.ModelField(**kwargs) def _CloudPicklerReducer(obj): return custom_deserializer, (custom_serializer(obj),) warnings.warn( "Using custom serializer to pickle pydantic.fields.ModelField classes, " "this might miss some fields and validators. To avoid this, " "please upgrade pydantic to v2 using `pip install pydantic -U` with " "langchain 0.0.267 and above." ) cloudpickle.CloudPickler.dispatch[pydantic.fields.ModelField] = _CloudPicklerReducer def unregister_pydantic_serializer(): import pydantic if Version(pydantic.__version__) >= Version("2.0.0"): return cloudpickle.CloudPickler.dispatch.pop(pydantic.fields.ModelField, None) @contextlib.contextmanager def register_pydantic_v1_serializer_cm(): try: register_pydantic_serializer() yield finally: unregister_pydantic_serializer()
[ "langchain.schema.output_parser.StrOutputParser", "langchain.agents.initialize_agent", "langchain.chains.loading.load_chain" ]
[((2074, 2101), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2091, 2101), False, 'import logging\n'), ((10682, 10728), 'os.path.join', 'os.path.join', (['path', '_MODEL_DATA_YAML_FILE_NAME'], {}), '(path, _MODEL_DATA_YAML_FILE_NAME)\n', (10694, 10728), False, 'import os\n'), ((17545, 17796), 'warnings.warn', 'warnings.warn', (['"""Using custom serializer to pickle pydantic.fields.ModelField classes, this might miss some fields and validators. To avoid this, please upgrade pydantic to v2 using `pip install pydantic -U` with langchain 0.0.267 and above."""'], {}), "(\n 'Using custom serializer to pickle pydantic.fields.ModelField classes, this might miss some fields and validators. To avoid this, please upgrade pydantic to v2 using `pip install pydantic -U` with langchain 0.0.267 and above.'\n )\n", (17558, 17796), False, 'import warnings\n'), ((18062, 18133), 'cloudpickle.CloudPickler.dispatch.pop', 'cloudpickle.CloudPickler.dispatch.pop', (['pydantic.fields.ModelField', 'None'], {}), '(pydantic.fields.ModelField, None)\n', (18099, 18133), False, 'import cloudpickle\n'), ((5832, 5868), 'packaging.version.parse', 'version.parse', (['langchain.__version__'], {}), '(langchain.__version__)\n', (5845, 5868), False, 'from packaging import version\n'), ((5872, 5896), 'packaging.version.parse', 'version.parse', (['"""0.0.246"""'], {}), "('0.0.246')\n", (5885, 5896), False, 'from packaging import version\n'), ((6002, 6037), 'importlib.util.find_spec', 'find_spec', (['"""langchain_experimental"""'], {}), "('langchain_experimental')\n", (6011, 6037), False, 'from importlib.util import find_spec\n'), ((10434, 10469), 'mlflow.langchain.retriever_chain._RetrieverChain', '_RetrieverChain', ([], {'retriever': 'lc_model'}), '(retriever=lc_model)\n', (10449, 10469), False, 'from mlflow.langchain.retriever_chain import _RetrieverChain\n'), ((14012, 14031), 'cloudpickle.load', 'cloudpickle.load', (['f'], {}), '(f)\n', (14028, 14031), False, 'import cloudpickle\n'), ((14102, 14114), 'json.load', 'json.load', (['f'], {}), '(f)\n', (14111, 14114), False, 'import json\n'), ((14185, 14202), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (14199, 14202), False, 'import yaml\n'), ((14289, 14322), 'os.path.join', 'os.path.join', (['root_path', 'key_path'], {}), '(root_path, key_path)\n', (14301, 14322), False, 'import os\n'), ((16628, 16657), 'packaging.version.Version', 'Version', (['pydantic.__version__'], {}), '(pydantic.__version__)\n', (16635, 16657), False, 'from packaging.version import Version\n'), ((16661, 16677), 'packaging.version.Version', 'Version', (['"""2.0.0"""'], {}), "('2.0.0')\n", (16668, 16677), False, 'from packaging.version import Version\n'), ((17405, 17441), 'pydantic.fields.ModelField', 'pydantic.fields.ModelField', ([], {}), '(**kwargs)\n', (17431, 17441), False, 'import pydantic\n'), ((17991, 18020), 'packaging.version.Version', 'Version', (['pydantic.__version__'], {}), '(pydantic.__version__)\n', (17998, 18020), False, 'from packaging.version import Version\n'), ((18024, 18040), 'packaging.version.Version', 'Version', (['"""2.0.0"""'], {}), "('2.0.0')\n", (18031, 18040), False, 'from packaging.version import Version\n'), ((4848, 4873), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '(**config)\n', (4863, 4873), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((6381, 6415), 'mlflow.utils.class_utils._get_class_from_string', '_get_class_from_string', (['class_name'], {}), '(class_name)\n', (6403, 6415), False, 'from mlflow.utils.class_utils import _get_class_from_string\n'), ((7750, 7909), 'mlflow.MlflowException.invalid_parameter_value', 'mlflow.MlflowException.invalid_parameter_value', (['f"""If {lc_model} is a string, it must be the path to a file named `chain.py` on the local filesystem."""'], {}), "(\n f'If {lc_model} is a string, it must be the path to a file named `chain.py` on the local filesystem.'\n )\n", (7796, 7909), False, 'import mlflow\n'), ((10274, 10389), 'mlflow.MlflowException.invalid_parameter_value', 'mlflow.MlflowException.invalid_parameter_value', (['"""The `loader_fn` must be a function that returns a retriever."""'], {}), "(\n 'The `loader_fn` must be a function that returns a retriever.')\n", (10320, 10389), False, 'import mlflow\n'), ((12255, 12302), 'os.path.join', 'os.path.join', (['path', '_AGENT_PRIMITIVES_FILE_NAME'], {}), '(path, _AGENT_PRIMITIVES_FILE_NAME)\n', (12267, 12302), False, 'import os\n'), ((15215, 15334), 'mlflow.MlflowException.invalid_parameter_value', 'mlflow.MlflowException.invalid_parameter_value', (['"""Missing file for loader_fn which is required to build the model."""'], {}), "(\n 'Missing file for loader_fn which is required to build the model.')\n", (15261, 15334), False, 'import mlflow\n'), ((15629, 15664), 'langchain.chains.loading.load_chain', 'load_chain', (['lc_model_path'], {}), '(lc_model_path, **kwargs)\n', (15639, 15664), False, 'from langchain.chains.loading import load_chain\n'), ((15733, 15758), 'langchain.chains.loading.load_chain', 'load_chain', (['lc_model_path'], {}), '(lc_model_path)\n', (15743, 15758), False, 'from langchain.chains.loading import load_chain\n'), ((15838, 15863), 'langchain.chains.loading.load_chain', 'load_chain', (['lc_model_path'], {}), '(lc_model_path)\n', (15848, 15863), False, 'from langchain.chains.loading import load_chain\n'), ((15915, 15941), 'os.path.exists', 'os.path.exists', (['tools_path'], {}), '(tools_path)\n', (15929, 15941), False, 'import os\n'), ((16169, 16205), 'os.path.exists', 'os.path.exists', (['agent_primitive_path'], {}), '(agent_primitive_path)\n', (16183, 16205), False, 'import os\n'), ((16283, 16354), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'llm', 'agent_path': 'agent_path'}), '(tools=tools, llm=llm, agent_path=agent_path, **kwargs)\n', (16299, 16354), False, 'from langchain.agents import initialize_agent\n'), ((7690, 7715), 'os.path.abspath', 'os.path.abspath', (['lc_model'], {}), '(lc_model)\n', (7705, 7715), False, 'import os\n'), ((8986, 9022), 'packaging.version.parse', 'version.parse', (['langchain.__version__'], {}), '(langchain.__version__)\n', (8999, 9022), False, 'from packaging import version\n'), ((9047, 9071), 'packaging.version.parse', 'version.parse', (['"""0.0.194"""'], {}), "('0.0.194')\n", (9060, 9071), False, 'from packaging import version\n'), ((11176, 11217), 'os.path.join', 'os.path.join', (['path', '_AGENT_DATA_FILE_NAME'], {}), '(path, _AGENT_DATA_FILE_NAME)\n', (11188, 11217), False, 'import os\n'), ((11390, 11431), 'os.path.join', 'os.path.join', (['path', '_TOOLS_DATA_FILE_NAME'], {}), '(path, _TOOLS_DATA_FILE_NAME)\n', (11402, 11431), False, 'import os\n'), ((11918, 12032), 'mlflow.MlflowException.invalid_parameter_value', 'mlflow.MlflowException.invalid_parameter_value', (['"""For initializing the AgentExecutor, tools must be provided."""'], {}), "(\n 'For initializing the AgentExecutor, tools must be provided.')\n", (11964, 12032), False, 'import mlflow\n'), ((12376, 12419), 'json.dump', 'json.dump', (['temp_dict', 'config_file'], {'indent': '(4)'}), '(temp_dict, config_file, indent=4)\n', (12385, 12419), False, 'import json\n'), ((12639, 12679), 'os.path.join', 'os.path.join', (['path', '_LOADER_FN_FILE_NAME'], {}), '(path, _LOADER_FN_FILE_NAME)\n', (12651, 12679), False, 'import os\n'), ((15539, 15584), 'mlflow.langchain.retriever_chain._RetrieverChain.load', '_RetrieverChain.load', (['lc_model_path'], {}), '(lc_model_path, **kwargs)\n', (15559, 15584), False, 'from mlflow.langchain.retriever_chain import _RetrieverChain\n'), ((16025, 16136), 'mlflow.MlflowException', 'mlflow.MlflowException', (['"""Missing file for tools which is required to build the AgentExecutor object."""'], {}), "(\n 'Missing file for tools which is required to build the AgentExecutor object.'\n )\n", (16047, 16136), False, 'import mlflow\n'), ((12738, 12768), 'cloudpickle.dump', 'cloudpickle.dump', (['loader_fn', 'f'], {}), '(loader_fn, f)\n', (12754, 12768), False, 'import cloudpickle\n'), ((12961, 12988), 'os.path.exists', 'os.path.exists', (['persist_dir'], {}), '(persist_dir)\n', (12975, 12988), False, 'import os\n'), ((11524, 11556), 'cloudpickle.dump', 'cloudpickle.dump', (['model.tools', 'f'], {}), '(model.tools, f)\n', (11540, 11556), False, 'import cloudpickle\n'), ((11614, 11756), 'mlflow.MlflowException', 'mlflow.MlflowException', (['"""Error when attempting to pickle the AgentExecutor tools. This model likely does not support serialization."""'], {}), "(\n 'Error when attempting to pickle the AgentExecutor tools. This model likely does not support serialization.'\n )\n", (11636, 11756), False, 'import mlflow\n'), ((13106, 13143), 'os.path.join', 'os.path.join', (['path', '_PERSIST_DIR_NAME'], {}), '(path, _PERSIST_DIR_NAME)\n', (13118, 13143), False, 'import os\n'), ((13160, 13211), 'shutil.copytree', 'shutil.copytree', (['persist_dir', 'persist_dir_data_path'], {}), '(persist_dir, persist_dir_data_path)\n', (13175, 13211), False, 'import shutil\n'), ((13324, 13433), 'mlflow.MlflowException.invalid_parameter_value', 'mlflow.MlflowException.invalid_parameter_value', (['"""The directory provided for persist_dir does not exist."""'], {}), "(\n 'The directory provided for persist_dir does not exist.')\n", (13370, 13433), False, 'import mlflow\n')]
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import langchain from langchain.llms import Replicate from flask import Flask from flask import request import os import requests import json class WhatsAppClient: API_URL = "https://graph.facebook.com/v17.0/" WHATSAPP_API_TOKEN = "<Temporary access token from your WhatsApp API Setup>" WHATSAPP_CLOUD_NUMBER_ID = "<Phone number ID from your WhatsApp API Setup>" def __init__(self): self.headers = { "Authorization": f"Bearer {self.WHATSAPP_API_TOKEN}", "Content-Type": "application/json", } self.API_URL = self.API_URL + self.WHATSAPP_CLOUD_NUMBER_ID def send_text_message(self,message, phone_number): payload = { "messaging_product": 'whatsapp', "to": phone_number, "type": "text", "text": { "preview_url": False, "body": message } } response = requests.post(f"{self.API_URL}/messages", json=payload,headers=self.headers) print(response.status_code) assert response.status_code == 200, "Error sending message" return response.status_code os.environ["REPLICATE_API_TOKEN"] = "<your replicate api token>" llama2_13b_chat = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d" llm = Replicate( model=llama2_13b_chat, model_kwargs={"temperature": 0.01, "top_p": 1, "max_new_tokens":500} ) client = WhatsAppClient() app = Flask(__name__) @app.route("/") def hello_llama(): return "<p>Hello Llama 2</p>" @app.route('/msgrcvd', methods=['POST', 'GET']) def msgrcvd(): message = request.args.get('message') #client.send_template_message("hello_world", "en_US", "14086745477") answer = llm(message) print(message) print(answer) client.send_text_message(llm(message), "14086745477") return message + "<p/>" + answer
[ "langchain.llms.Replicate" ]
[((1502, 1609), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1511, 1609), False, 'from langchain.llms import Replicate\n'), ((1647, 1662), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1652, 1662), False, 'from flask import Flask\n'), ((1815, 1842), 'flask.request.args.get', 'request.args.get', (['"""message"""'], {}), "('message')\n", (1831, 1842), False, 'from flask import request\n'), ((1101, 1178), 'requests.post', 'requests.post', (['f"""{self.API_URL}/messages"""'], {'json': 'payload', 'headers': 'self.headers'}), "(f'{self.API_URL}/messages', json=payload, headers=self.headers)\n", (1114, 1178), False, 'import requests\n')]
import langchain from langchain.cache import InMemoryCache from langchain.chains import LLMChain from langchain.llms import OpenAI from langchain.prompts import PromptTemplate langchain.llm_cache = InMemoryCache() llm = OpenAI(temperature=0.9) prompt = PromptTemplate( input_variables=["product"], template="What is a good name for a company that makes {product}?", ) chain = LLMChain(llm=llm, prompt=prompt) if __name__ == "__main__": # Run the chain only specifying the input variable. print(chain.run("colorful socks"))
[ "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate", "langchain.cache.InMemoryCache", "langchain.llms.OpenAI" ]
[((199, 214), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (212, 214), False, 'from langchain.cache import InMemoryCache\n'), ((223, 246), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (229, 246), False, 'from langchain.llms import OpenAI\n'), ((256, 372), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['product']", 'template': '"""What is a good name for a company that makes {product}?"""'}), "(input_variables=['product'], template=\n 'What is a good name for a company that makes {product}?')\n", (270, 372), False, 'from langchain.prompts import PromptTemplate\n'), ((389, 421), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (397, 421), False, 'from langchain.chains import LLMChain\n')]
import time from dotenv import load_dotenv import langchain from langchain.llms import OpenAI from langchain.callbacks import get_openai_callback from langchain.cache import InMemoryCache load_dotenv() # to make caching obvious, we use a slow model llm = OpenAI(model_name="text-davinci-002") langchain.llm_cache = InMemoryCache() with get_openai_callback() as cb: start = time.time() result = llm("What doesn't fall far from the tree?") print(result) end = time.time() print("--- cb") print(str(cb) + f" ({end - start:.2f} seconds)") with get_openai_callback() as cb2: start = time.time() result2 = llm("What doesn't fall far from the tree?") result3 = llm("What doesn't fall far from the tree?") end = time.time() print(result2) print(result3) print("--- cb2") print(str(cb2) + f" ({end - start:.2f} seconds)")
[ "langchain.cache.InMemoryCache", "langchain.llms.OpenAI", "langchain.callbacks.get_openai_callback" ]
[((189, 202), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (200, 202), False, 'from dotenv import load_dotenv\n'), ((257, 294), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""text-davinci-002"""'}), "(model_name='text-davinci-002')\n", (263, 294), False, 'from langchain.llms import OpenAI\n'), ((318, 333), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (331, 333), False, 'from langchain.cache import InMemoryCache\n'), ((340, 361), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (359, 361), False, 'from langchain.callbacks import get_openai_callback\n'), ((381, 392), 'time.time', 'time.time', ([], {}), '()\n', (390, 392), False, 'import time\n'), ((478, 489), 'time.time', 'time.time', ([], {}), '()\n', (487, 489), False, 'import time\n'), ((569, 590), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (588, 590), False, 'from langchain.callbacks import get_openai_callback\n'), ((611, 622), 'time.time', 'time.time', ([], {}), '()\n', (620, 622), False, 'import time\n'), ((749, 760), 'time.time', 'time.time', ([], {}), '()\n', (758, 760), False, 'import time\n')]
import langchain from langchain.chains.summarize import load_summarize_chain from langchain.docstore.document import Document from langchain.text_splitter import CharacterTextSplitter from steamship import File, Task from steamship.invocable import PackageService, post from steamship_langchain.cache import SteamshipCache from steamship_langchain.llms import OpenAI class SummarizeAudioPackage(PackageService): def __init__(self, **kwargs): super().__init__(**kwargs) langchain.llm_cache = SteamshipCache(client=self.client) self.llm = OpenAI(client=self.client, cache=True) @post("summarize_file") def summarize_file(self, file_handle: str) -> str: file = File.get(self.client, handle=file_handle) text_splitter = CharacterTextSplitter() texts = [] for block in file.blocks: texts.extend(text_splitter.split_text(block.text)) docs = [Document(page_content=t) for t in texts] chain = load_summarize_chain(self.llm, chain_type="map_reduce") return chain.run(docs) @post("summarize_audio_file") def summarize_audio_file(self, file_handle: str) -> Task[str]: transcriber = self.client.use_plugin("whisper-s2t-blockifier") audio_file = File.get(self.client, handle=file_handle) transcribe_task = audio_file.blockify(plugin_instance=transcriber.handle) return self.invoke_later( "summarize_file", wait_on_tasks=[transcribe_task], arguments={"file_handle": audio_file.handle}, )
[ "langchain.text_splitter.CharacterTextSplitter", "langchain.chains.summarize.load_summarize_chain", "langchain.docstore.document.Document" ]
[((613, 635), 'steamship.invocable.post', 'post', (['"""summarize_file"""'], {}), "('summarize_file')\n", (617, 635), False, 'from steamship.invocable import PackageService, post\n'), ((1078, 1106), 'steamship.invocable.post', 'post', (['"""summarize_audio_file"""'], {}), "('summarize_audio_file')\n", (1082, 1106), False, 'from steamship.invocable import PackageService, post\n'), ((514, 548), 'steamship_langchain.cache.SteamshipCache', 'SteamshipCache', ([], {'client': 'self.client'}), '(client=self.client)\n', (528, 548), False, 'from steamship_langchain.cache import SteamshipCache\n'), ((568, 606), 'steamship_langchain.llms.OpenAI', 'OpenAI', ([], {'client': 'self.client', 'cache': '(True)'}), '(client=self.client, cache=True)\n', (574, 606), False, 'from steamship_langchain.llms import OpenAI\n'), ((706, 747), 'steamship.File.get', 'File.get', (['self.client'], {'handle': 'file_handle'}), '(self.client, handle=file_handle)\n', (714, 747), False, 'from steamship import File, Task\n'), ((772, 795), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {}), '()\n', (793, 795), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((985, 1040), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['self.llm'], {'chain_type': '"""map_reduce"""'}), "(self.llm, chain_type='map_reduce')\n", (1005, 1040), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((1266, 1307), 'steamship.File.get', 'File.get', (['self.client'], {'handle': 'file_handle'}), '(self.client, handle=file_handle)\n', (1274, 1307), False, 'from steamship import File, Task\n'), ((928, 952), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 't'}), '(page_content=t)\n', (936, 952), False, 'from langchain.docstore.document import Document\n')]
import langchain import os import streamlit as st import requests import sounddevice as sd import wavio os.environ["OPENAI_API_KEY"]="ADD KEY" import openai from openai import OpenAI client=OpenAI() from langchain.prompts import ChatPromptTemplate from langchain.chat_models import ChatOpenAI from langchain.prompts import HumanMessagePromptTemplate from langchain.schema.messages import SystemMessage chat_template = ChatPromptTemplate.from_messages( [ SystemMessage( content=( "You are a presonal assistant for {your name] and your name is luna " "if the user call you by any other name than luna you need to correct him by your orginal name." "And for every output you can also use the username in the answer which will be nice gesture" "you can act more,like an human speaking more than an ai replying to the message" "Consider the user as your friend" "Speak like a friend" "Be more creative and funny way" ) ), HumanMessagePromptTemplate.from_template("{text}"), ] ) llm = ChatOpenAI() # Record audio def record_audio(filename, duration, fs): print("Recording audio...") recording = sd.rec(int(duration * fs), samplerate=fs, channels=2) sd.wait() wavio.write(filename, recording, fs, sampwidth=2) print("Audio recorded and saved as", filename) ## Streamlit UI st.set_page_config(page_title="Personal voice assistant ") website_heading = "I am Your Personal Voice assistant" # Display as a heading st.markdown(f"<h1 style='text-align: center; color: #a274a3;'>{website_heading}</h1>", unsafe_allow_html=True) st.write("Speak here") if st.button(label="Click here to speak"): audio_filename = "input.wav" duration = 5 # Duration of the recording in seconds fs = 44100 # Sample rate record_audio(audio_filename, duration, fs) ## user input recorded and stores ##converting to text using whisper audio_file= open("input.wav", "rb") transcript = client.audio.translations.create( model="whisper-1", file=audio_file) a=transcript.text # st.write(a) print(a) ##model a=llm(chat_template.format_messages(text=a)) a=a.content ##audio output speech_file_path ="speech.mp3" response = client.audio.speech.create( model="tts-1", voice="nova", input=a) response.stream_to_file(speech_file_path) st.audio("speech.mp3")
[ "langchain.prompts.HumanMessagePromptTemplate.from_template", "langchain.schema.messages.SystemMessage", "langchain.chat_models.ChatOpenAI" ]
[((191, 199), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (197, 199), False, 'from openai import OpenAI\n'), ((1151, 1163), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1161, 1163), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1462, 1520), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Personal voice assistant """'}), "(page_title='Personal voice assistant ')\n", (1480, 1520), True, 'import streamlit as st\n'), ((1601, 1720), 'streamlit.markdown', 'st.markdown', (['f"""<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>"""'], {'unsafe_allow_html': '(True)'}), '(\n f"<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>",\n unsafe_allow_html=True)\n', (1612, 1720), True, 'import streamlit as st\n'), ((1714, 1736), 'streamlit.write', 'st.write', (['"""Speak here"""'], {}), "('Speak here')\n", (1722, 1736), True, 'import streamlit as st\n'), ((1740, 1778), 'streamlit.button', 'st.button', ([], {'label': '"""Click here to speak"""'}), "(label='Click here to speak')\n", (1749, 1778), True, 'import streamlit as st\n'), ((1329, 1338), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (1336, 1338), True, 'import sounddevice as sd\n'), ((1343, 1392), 'wavio.write', 'wavio.write', (['filename', 'recording', 'fs'], {'sampwidth': '(2)'}), '(filename, recording, fs, sampwidth=2)\n', (1354, 1392), False, 'import wavio\n'), ((2490, 2512), 'streamlit.audio', 'st.audio', (['"""speech.mp3"""'], {}), "('speech.mp3')\n", (2498, 2512), True, 'import streamlit as st\n'), ((468, 916), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way"""'}), "(content=\n 'You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way'\n )\n", (481, 916), False, 'from langchain.schema.messages import SystemMessage\n'), ((1084, 1134), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (1124, 1134), False, 'from langchain.prompts import HumanMessagePromptTemplate\n')]
# Copyright (c) Meta Platforms, Inc. and affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import langchain from langchain.llms import OpenAI from langchain import PromptTemplate from configparser import ConfigParser from langchain.cache import InMemoryCache from posttext.src.posttext import PostText langchain.llm_cache = InMemoryCache() class ViewEngine: def __init__(self, path): print(path) config = ConfigParser(comment_prefixes=None) config.read(os.path.join(path, 'config.ini')) self.posttext = PostText(config, path) self.llm = OpenAI() def verbalize(self, query: str, answer: str): template = "Question: {query}\n\nAnswer: {context}\n\n Can you summarize the answer in a single sentence (durations are in seconds)?" prompt = PromptTemplate( input_variables=["query", "context"], template=template) return self.llm(prompt.format(query=query, context=answer)) def flatten(self, lst): """Flatten a nested list """ flattened_list = [] for item in lst: if isinstance(item, tuple): item = list(item) if isinstance(item, list): flattened_list.extend(self.flatten(item)) else: flattened_list.append(item) return flattened_list def query(self, query: str): """Answer a query using a View-based QA method. """ formattedprompt, sqlquery_before, sqlquery, view_res, eng_answer, provenance_ids, retrieval_res = self.posttext.query(query) # print(provenance_ids) provenance_ids = self.flatten(provenance_ids) # sources = [tpl[1][1] for tpl in sources] return {"question": query, "view_res": view_res, "eng_answer": eng_answer, "answer": eng_answer, "sources": provenance_ids, "sql": sqlquery, "sql_before": sqlquery_before} if __name__ == '__main__': # engine = ViewEngine("public/digital_data/") engine = ViewEngine("personal-data/app_data/") # print(engine.query("How many cities did I visit when I travel to Japan?"))
[ "langchain.cache.InMemoryCache", "langchain.llms.OpenAI", "langchain.PromptTemplate" ]
[((847, 862), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (860, 862), False, 'from langchain.cache import InMemoryCache\n'), ((951, 986), 'configparser.ConfigParser', 'ConfigParser', ([], {'comment_prefixes': 'None'}), '(comment_prefixes=None)\n', (963, 986), False, 'from configparser import ConfigParser\n'), ((1065, 1087), 'posttext.src.posttext.PostText', 'PostText', (['config', 'path'], {}), '(config, path)\n', (1073, 1087), False, 'from posttext.src.posttext import PostText\n'), ((1107, 1115), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (1113, 1115), False, 'from langchain.llms import OpenAI\n'), ((1326, 1397), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query', 'context']", 'template': 'template'}), "(input_variables=['query', 'context'], template=template)\n", (1340, 1397), False, 'from langchain import PromptTemplate\n'), ((1007, 1039), 'os.path.join', 'os.path.join', (['path', '"""config.ini"""'], {}), "(path, 'config.ini')\n", (1019, 1039), False, 'import os\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Project : AI. @by PyCharm # @File : chatbase # @Time : 2023/7/5 15:29 # @Author : betterme # @WeChat : meutils # @Software : PyCharm # @Description : from meutils.pipe import * from langchain.schema import Document from langchain.chat_models import ChatOpenAI from langchain.cache import InMemoryCache from langchain.memory import ConversationBufferWindowMemory from langchain.embeddings import OpenAIEmbeddings from langchain.embeddings.base import Embeddings from langchain.vectorstores import VectorStore, DocArrayInMemorySearch, Zilliz, FAISS from langchain.callbacks import AsyncIteratorCallbackHandler from langchain.chains.question_answering import load_qa_chain from langchain.chains.qa_with_sources import load_qa_with_sources_chain # 输出SOURCE废token from langchain.chains import ConversationChain from langchain.document_loaders import DirectoryLoader, PyMuPDFLoader # import langchain # # langchain.verbose = True # langchain.debug = True class ChatBase(object): """ ChatBase().create_index().search().run(query='1+1') """ def __init__(self, model="gpt-3.5-turbo", embeddings: Embeddings = OpenAIEmbeddings(chunk_size=100), k=1, temperature=0): self.memory = ConversationBufferWindowMemory(memory_key="chat_history", return_messages=True, k=k) self.memory_messages = self.memory.chat_memory.messages self.embeddings = embeddings # todo: 本地向量 self.llm = ChatOpenAI(model=model, temperature=temperature, streaming=True) self.chain = load_qa_chain(self.llm, chain_type="stuff") # map_rerank 重排序 # self._docs = None self._index = None self._input = None def create_index(self, docs: List[Document], vectorstore: VectorStore = DocArrayInMemorySearch): # 主要耗时,缓存是否生效 self._index = vectorstore.from_documents(docs, self.embeddings) # 向量阶段:可以多线程走缓存? return self def search(self, query, k: int = 5, threshold: float = 0.7, **kwargs): docs_scores = self._index.similarity_search_with_score(query, k=k, **kwargs) self._docs = [] for doc, score in docs_scores: if score > threshold: doc.metadata['score'] = score doc.metadata['page_content'] = doc.page_content self._docs.append(doc) self._input = {"input_documents": self._docs, "question": query} # todo: input_func return self def run(self): return self.chain.run(self._input) # 流式
[ "langchain.chains.question_answering.load_qa_chain", "langchain.embeddings.OpenAIEmbeddings", "langchain.memory.ConversationBufferWindowMemory", "langchain.chat_models.ChatOpenAI" ]
[((1213, 1245), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'chunk_size': '(100)'}), '(chunk_size=100)\n', (1229, 1245), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1307, 1396), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'k': 'k'}), "(memory_key='chat_history', return_messages=\n True, k=k)\n", (1337, 1396), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((1526, 1590), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': 'model', 'temperature': 'temperature', 'streaming': '(True)'}), '(model=model, temperature=temperature, streaming=True)\n', (1536, 1590), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1612, 1655), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['self.llm'], {'chain_type': '"""stuff"""'}), "(self.llm, chain_type='stuff')\n", (1625, 1655), False, 'from langchain.chains.question_answering import load_qa_chain\n')]
import langchain from langchain.chat_models.base import BaseChatModel, SimpleChatModel from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatResult, HumanMessage, SystemMessage, ) from typing import Any, Dict, List, Mapping, Optional, Sequence, TypedDict import websocket import uuid import json from .general import get_open_port class MessageDict(TypedDict): role: str content: str class RequestDict(TypedDict): messages: List[MessageDict] temperature: float request_id: str class ResponseDict(TypedDict): content: str request_id: str class ChatWindowAI(BaseChatModel): model_name: str = "window" """Model name to use.""" temperature: float = 0 """What sampling temperature to use.""" streaming: bool = False """Whether to stream the results.""" request_timeout: int = 3600 """Timeout in seconds for the request.""" @property def _llm_type(self) -> str: """Return type of chat model.""" return "window-chat" def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None ) -> ChatResult: output_str = self._call(messages, stop=stop) message = AIMessage(content=output_str) generation = ChatGeneration(message=message) result = ChatResult(generations=[generation]) return result async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None ) -> ChatResult: return self._generate(messages, stop=stop) def _call( self, messages: List[BaseMessage], stop: Optional[List[str]] = None ) -> str: request_id = str(uuid.uuid4()) request: RequestDict = { "messages": [], "temperature": self.temperature, "request_id": request_id, } for message in messages: role = "user" # default role is user if isinstance(message, HumanMessage): role = "user" elif isinstance(message, AIMessage): role = "assistant" elif isinstance(message, SystemMessage): role = "system" request["messages"].append( { "role": role, "content": message.content, } ) ws = websocket.WebSocket() port = get_open_port() ws.connect(f"ws://127.0.0.1:{port}/windowmodel") ws.send(json.dumps(request)) message = ws.recv() ws.close() response: ResponseDict = json.loads(message) response_content = response["content"] response_request_id = response["request_id"] # sanity check that response corresponds to request if request_id != response_request_id: raise ValueError( f"Invalid request ID: {response_request_id}, expected: {request_id}" ) return response_content
[ "langchain.schema.AIMessage", "langchain.schema.ChatResult", "langchain.schema.ChatGeneration" ]
[((1236, 1265), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'output_str'}), '(content=output_str)\n', (1245, 1265), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatResult, HumanMessage, SystemMessage\n'), ((1287, 1318), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (1301, 1318), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatResult, HumanMessage, SystemMessage\n'), ((1336, 1372), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': '[generation]'}), '(generations=[generation])\n', (1346, 1372), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatResult, HumanMessage, SystemMessage\n'), ((2389, 2410), 'websocket.WebSocket', 'websocket.WebSocket', ([], {}), '()\n', (2408, 2410), False, 'import websocket\n'), ((2617, 2636), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (2627, 2636), False, 'import json\n'), ((1701, 1713), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1711, 1713), False, 'import uuid\n'), ((2515, 2534), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (2525, 2534), False, 'import json\n')]
''' Example script to automatically write a screenplay from a newsgroup post using agents with Crew.ai (https://github.com/joaomdmoura/crewAI) You can also try it out with a personal email with many replies back and forth and see it turn into a movie script. Demonstrates: - multiple API endpoints (offical Mistral, Together.ai, Anyscale) - running single tasks: spam detection and scoring - running a crew to create a screenplay from a newsgroup post by first analyzing the text, creating a dialogue and ultimately formatting it Additional endpoints requirements: pip install langchain_mistralai pip install langchain-together Author: Toon Beerten ([email protected]) License: MIT ''' import os import re from crewai import Agent, Task, Crew, Process from langchain.agents import AgentType, initialize_agent, load_tools from langchain.chat_models import openai #endpoint specific imports import langchain_mistralai from langchain_mistralai.chat_models import ChatMistralAI from langchain_community.llms import Together from langchain_community.chat_models import ChatAnyscale ## Choose here which API endpoint to use, uncomment only one: # Official Mistral: benefit of having access to mistral-medium # Together.ai: lots of models to choose from # Anyscale: cheapest at the time of writing #endpoint = 'mistral_official' #endpoint = 'togetherai' endpoint = 'mistral_official' #put you API keys here mistral_key = '' togetherai_key = '' anyscale_key = '' #model choice: i already have good results with mistralai/Mistral-7B-Instruct-v0.2 if endpoint == 'mistral_official': mixtral=ChatMistralAI(mistral_api_key=mistral_key, model="mistral-tiny",temperature=0.6) elif endpoint == 'togetherai': #i get timeouts using Together() , so i use ChatOpenAI() instead #mixtral = Together(model="mistralai/Mistral-7B-Instruct-v0.2", together_api_key=togetherai_key ) #or mistralai/Mixtral-8x7B-Instruct-v0.1 mixtral= openai.ChatOpenAI(base_url="https://api.together.xyz/v1", api_key=togetherai_key, temperature=0.5, model="mistralai/Mistral-7B-Instruct-v0.2") elif endpoint == 'anyscale': mixtral = ChatAnyscale(model='mistralai/Mistral-7B-Instruct-v0.1', api_key=anyscale_key, streaming=False) ## Define Agents spamfilter = Agent( role='spamfilter', goal='''Decide whether a text is spam or not.''', backstory='You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language.', llm=mixtral, verbose=True, allow_delegation=False ) analyst = Agent( role='analyse', goal='''You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain.''', backstory='You are an expert discussion analyst.', llm=mixtral, verbose=True, allow_delegation=False ) scriptwriter = Agent( role='scriptwriter', goal='Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals.', backstory='''You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes.''', llm=mixtral, verbose=True, allow_delegation=False ) formatter = Agent( role='formatter', goal='''Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling).''', backstory='You are an expert text formatter.', llm=mixtral, verbose=True, allow_delegation=False ) scorer = Agent( role='scorer', goal='''You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest: Scale: 1-3: Poor - The dialogue has significant issues that prevent effective communication. 4-6: Average - The dialogue has some good points but also has notable weaknesses. 7-9: Good - The dialogue is mostly effective with minor issues. 10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues. Factors to Consider: Clarity: How clear is the exchange? Are the statements and responses easy to understand? Relevance: Do the responses stay on topic and contribute to the conversation's purpose? Conciseness: Is the dialogue free of unnecessary information or redundancy? Politeness: Are the participants respectful and considerate in their interaction? Engagement: Do the participants seem interested and actively involved in the dialogue? Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions? Coherence: Does the dialogue make logical sense as a whole? Responsiveness: Do the participants address each other's points adequately? Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue? Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue? ''', backstory='You are an expert at scoring conversations on a scale of 1 to 10.', llm=mixtral, verbose=True, allow_delegation=False ) #this is one example of a public post in the newsgroup alt.atheism #try it out yourself by replacing this with your own email thread or text or ... discussion = '''From: [email protected] (Keith Allan Schneider) Subject: Re: <Political Atheists? Organization: California Institute of Technology, Pasadena Lines: 50 NNTP-Posting-Host: punisher.caltech.edu [email protected] (Robert Beauchaine) writes: >>I think that about 70% (or so) people approve of the >>death penalty, even realizing all of its shortcomings. Doesn't this make >>it reasonable? Or are *you* the sole judge of reasonability? >Aside from revenge, what merits do you find in capital punishment? Are we talking about me, or the majority of the people that support it? Anyway, I think that "revenge" or "fairness" is why most people are in favor of the punishment. If a murderer is going to be punished, people that think that he should "get what he deserves." Most people wouldn't think it would be fair for the murderer to live, while his victim died. >Revenge? Petty and pathetic. Perhaps you think that it is petty and pathetic, but your views are in the minority. >We have a local televised hot topic talk show that very recently >did a segment on capital punishment. Each and every advocate of >the use of this portion of our system of "jurisprudence" cited the >main reason for supporting it: "That bastard deserved it". True >human compassion, forgiveness, and sympathy. Where are we required to have compassion, forgiveness, and sympathy? If someone wrongs me, I will take great lengths to make sure that his advantage is removed, or a similar situation is forced upon him. If someone kills another, then we can apply the golden rule and kill this person in turn. Is not our entire moral system based on such a concept? Or, are you stating that human life is sacred, somehow, and that it should never be violated? This would sound like some sort of religious view. >>I mean, how reasonable is imprisonment, really, when you think about it? >>Sure, the person could be released if found innocent, but you still >>can't undo the imiprisonment that was served. Perhaps we shouldn't >>imprision people if we could watch them closely instead. The cost would >>probably be similar, especially if we just implanted some sort of >>electronic device. >Would you rather be alive in prison or dead in the chair? Once a criminal has committed a murder, his desires are irrelevant. And, you still have not answered my question. If you are concerned about the death penalty due to the possibility of the execution of an innocent, then why isn't this same concern shared with imprisonment. Shouldn't we, by your logic, administer as minimum as punishment as possible, to avoid violating the liberty or happiness of an innocent person? keith ''' # Filter out spam and vulgar posts task0 = Task(description='Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n' + discussion, agent=spamfilter) result = task0.execute() if "STOP" in result: #stop here and proceed to next post print('This spam message will be filtered out') # process post with a crew of agents, ultimately delivering a well formatted dialogue task1 = Task(description='Analyse in much detail the following discussion:\n### DISCUSSION:\n' + discussion, agent=analyst) task2 = Task(description='Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes.', agent=scriptwriter) task3 = Task(description='''Format the script exactly like this: ## (person 1): (first text line from person 1) ## (person 2): (first text line from person 2) ## (person 1): (second text line from person 1) ## (person 2): (second text line from person 2) ''', agent=formatter) crew = Crew( agents=[analyst, scriptwriter,formatter], tasks=[task1, task2, task3], verbose=2, # Crew verbose more will let you know what tasks are being worked on, you can set it to 1 or 2 to different logging levels process=Process.sequential # Sequential process will have tasks executed one after the other and the outcome of the previous one is passed as extra content into this next. ) result = crew.kickoff() #get rid of directions and actions between brackets, eg: (smiling) result = re.sub(r'\(.*?\)', '', result) print('===================== end result from crew ===================================') print(result) print('===================== score ==================================================') task4 = Task(description='Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n'+result, agent=scorer) score = task4.execute() score = score.split('\n')[0] #sometimes an explanation comes after score, ignore print(f'Scoring the dialogue as: {score}/10')
[ "langchain.chat_models.openai.ChatOpenAI", "langchain_community.chat_models.ChatAnyscale", "langchain_mistralai.chat_models.ChatMistralAI" ]
[((2292, 2556), 'crewai.Agent', 'Agent', ([], {'role': '"""spamfilter"""', 'goal': '"""Decide whether a text is spam or not."""', 'backstory': '"""You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='spamfilter', goal='Decide whether a text is spam or not.',\n backstory=\n 'You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language.'\n , llm=mixtral, verbose=True, allow_delegation=False)\n", (2297, 2556), False, 'from crewai import Agent, Task, Crew, Process\n'), ((2581, 2886), 'crewai.Agent', 'Agent', ([], {'role': '"""analyse"""', 'goal': '"""You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain."""', 'backstory': '"""You are an expert discussion analyst."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='analyse', goal=\n 'You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain.'\n , backstory='You are an expert discussion analyst.', llm=mixtral,\n verbose=True, allow_delegation=False)\n", (2586, 2886), False, 'from crewai import Agent, Task, Crew, Process\n'), ((2916, 3352), 'crewai.Agent', 'Agent', ([], {'role': '"""scriptwriter"""', 'goal': '"""Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals."""', 'backstory': '"""You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='scriptwriter', goal=\n 'Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals.'\n , backstory=\n 'You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes.'\n , llm=mixtral, verbose=True, allow_delegation=False)\n", (2921, 3352), False, 'from crewai import Agent, Task, Crew, Process\n'), ((3375, 3631), 'crewai.Agent', 'Agent', ([], {'role': '"""formatter"""', 'goal': '"""Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling)."""', 'backstory': '"""You are an expert text formatter."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='formatter', goal=\n 'Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling).'\n , backstory='You are an expert text formatter.', llm=mixtral, verbose=\n True, allow_delegation=False)\n", (3380, 3631), False, 'from crewai import Agent, Task, Crew, Process\n'), ((3654, 5254), 'crewai.Agent', 'Agent', ([], {'role': '"""scorer"""', 'goal': '"""You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:\n Scale:\n 1-3: Poor - The dialogue has significant issues that prevent effective communication.\n 4-6: Average - The dialogue has some good points but also has notable weaknesses.\n 7-9: Good - The dialogue is mostly effective with minor issues.\n 10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.\n Factors to Consider:\n Clarity: How clear is the exchange? Are the statements and responses easy to understand?\n Relevance: Do the responses stay on topic and contribute to the conversation\'s purpose?\n Conciseness: Is the dialogue free of unnecessary information or redundancy?\n Politeness: Are the participants respectful and considerate in their interaction?\n Engagement: Do the participants seem interested and actively involved in the dialogue?\n Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?\n Coherence: Does the dialogue make logical sense as a whole?\n Responsiveness: Do the participants address each other\'s points adequately?\n Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?\n Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?\n """', 'backstory': '"""You are an expert at scoring conversations on a scale of 1 to 10."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), '(role=\'scorer\', goal=\n """You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:\n Scale:\n 1-3: Poor - The dialogue has significant issues that prevent effective communication.\n 4-6: Average - The dialogue has some good points but also has notable weaknesses.\n 7-9: Good - The dialogue is mostly effective with minor issues.\n 10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.\n Factors to Consider:\n Clarity: How clear is the exchange? Are the statements and responses easy to understand?\n Relevance: Do the responses stay on topic and contribute to the conversation\'s purpose?\n Conciseness: Is the dialogue free of unnecessary information or redundancy?\n Politeness: Are the participants respectful and considerate in their interaction?\n Engagement: Do the participants seem interested and actively involved in the dialogue?\n Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?\n Coherence: Does the dialogue make logical sense as a whole?\n Responsiveness: Do the participants address each other\'s points adequately?\n Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?\n Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?\n """\n , backstory=\n \'You are an expert at scoring conversations on a scale of 1 to 10.\',\n llm=mixtral, verbose=True, allow_delegation=False)\n', (3659, 5254), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8227, 8430), 'crewai.Task', 'Task', ([], {'description': '("""Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n"""\n + discussion)', 'agent': 'spamfilter'}), '(description=\n """Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n"""\n + discussion, agent=spamfilter)\n', (8231, 8430), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8659, 8785), 'crewai.Task', 'Task', ([], {'description': '("""Analyse in much detail the following discussion:\n### DISCUSSION:\n""" +\n discussion)', 'agent': 'analyst'}), '(description=\n """Analyse in much detail the following discussion:\n### DISCUSSION:\n""" +\n discussion, agent=analyst)\n', (8663, 8785), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8784, 8991), 'crewai.Task', 'Task', ([], {'description': '"""Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes."""', 'agent': 'scriptwriter'}), "(description=\n 'Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes.'\n , agent=scriptwriter)\n", (8788, 8991), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8991, 9332), 'crewai.Task', 'Task', ([], {'description': '"""Format the script exactly like this:\n ## (person 1):\n (first text line from person 1)\n \n ## (person 2):\n (first text line from person 2)\n \n ## (person 1):\n (second text line from person 1)\n \n ## (person 2):\n (second text line from person 2)\n \n """', 'agent': 'formatter'}), '(description=\n """Format the script exactly like this:\n ## (person 1):\n (first text line from person 1)\n \n ## (person 2):\n (first text line from person 2)\n \n ## (person 1):\n (second text line from person 1)\n \n ## (person 2):\n (second text line from person 2)\n \n """\n , agent=formatter)\n', (8995, 9332), False, 'from crewai import Agent, Task, Crew, Process\n'), ((9344, 9463), 'crewai.Crew', 'Crew', ([], {'agents': '[analyst, scriptwriter, formatter]', 'tasks': '[task1, task2, task3]', 'verbose': '(2)', 'process': 'Process.sequential'}), '(agents=[analyst, scriptwriter, formatter], tasks=[task1, task2, task3],\n verbose=2, process=Process.sequential)\n', (9348, 9463), False, 'from crewai import Agent, Task, Crew, Process\n'), ((9847, 9878), 're.sub', 're.sub', (['"""\\\\(.*?\\\\)"""', '""""""', 'result'], {}), "('\\\\(.*?\\\\)', '', result)\n", (9853, 9878), False, 'import re\n'), ((10082, 10288), 'crewai.Task', 'Task', ([], {'description': '("""Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n"""\n + result)', 'agent': 'scorer'}), '(description=\n """Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n"""\n + result, agent=scorer)\n', (10086, 10288), False, 'from crewai import Agent, Task, Crew, Process\n'), ((1635, 1720), 'langchain_mistralai.chat_models.ChatMistralAI', 'ChatMistralAI', ([], {'mistral_api_key': 'mistral_key', 'model': '"""mistral-tiny"""', 'temperature': '(0.6)'}), "(mistral_api_key=mistral_key, model='mistral-tiny',\n temperature=0.6)\n", (1648, 1720), False, 'from langchain_mistralai.chat_models import ChatMistralAI\n'), ((1970, 2122), 'langchain.chat_models.openai.ChatOpenAI', 'openai.ChatOpenAI', ([], {'base_url': '"""https://api.together.xyz/v1"""', 'api_key': 'togetherai_key', 'temperature': '(0.5)', 'model': '"""mistralai/Mistral-7B-Instruct-v0.2"""'}), "(base_url='https://api.together.xyz/v1', api_key=\n togetherai_key, temperature=0.5, model='mistralai/Mistral-7B-Instruct-v0.2'\n )\n", (1987, 2122), False, 'from langchain.chat_models import openai\n'), ((2157, 2257), 'langchain_community.chat_models.ChatAnyscale', 'ChatAnyscale', ([], {'model': '"""mistralai/Mistral-7B-Instruct-v0.1"""', 'api_key': 'anyscale_key', 'streaming': '(False)'}), "(model='mistralai/Mistral-7B-Instruct-v0.1', api_key=\n anyscale_key, streaming=False)\n", (2169, 2257), False, 'from langchain_community.chat_models import ChatAnyscale\n')]
"""Chat agent with question answering """ import os from utils.giphy import GiphyAPIWrapper from dataclasses import dataclass from langchain.chains import LLMChain, LLMRequestsChain from langchain import Wikipedia, OpenAI from langchain.agents.react.base import DocstoreExplorer from langchain.agents import ( ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent, ) from langchain.prompts import PromptTemplate from langchain.chains.conversation.memory import ConversationBufferMemory from langchain.agents.conversational.base import ConversationalAgent from datetime import datetime import langchain from langchain.cache import InMemoryCache langchain.llm_cache = InMemoryCache() news_api_key = os.environ["NEWS_API_KEY"] tmdb_bearer_token = os.environ["TMDB_API_KEY"] @dataclass class ChatAgent: agent_executor: AgentExecutor = None def _get_docstore_agent(self): docstore = DocstoreExplorer(Wikipedia()) docstore_tools = [ Tool(name="Search", func=docstore.search, description="Search wikipedia"), Tool( name="Lookup", func=docstore.lookup, description="Lookup a wikipedia page", ), ] docstore_llm = OpenAI(temperature=0, model_name="gpt-3.5-turbo") docstore_agent = initialize_agent( docstore_tools, docstore_llm, agent="react-docstore", verbose=True ) return docstore_agent def _get_requests_llm_tool(self): template = """ Extracted: {requests_result}""" PROMPT = PromptTemplate( input_variables=["requests_result"], template=template, ) def lambda_func(input): out = LLMRequestsChain( llm_chain=LLMChain( llm=OpenAI(temperature=0), prompt=PROMPT, verbose=True ) ).run(input) return out.strip() return lambda_func def __init__(self, *, conversation_chain: LLMChain = None, history_array): date = datetime.today().strftime("%B %d, %Y") # set up a Wikipedia docstore agent docstore_agent = self._get_docstore_agent() giphy = GiphyAPIWrapper() # tool_names = get_all_tool_names() tool_names = [ "wolfram-alpha", "llm-math", "open-meteo-api", "news-api", "tmdb-api", "wikipedia", ] requests_tool = self._get_requests_llm_tool() tools = load_tools( tool_names, llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", verbose=True), news_api_key=news_api_key, tmdb_bearer_token=tmdb_bearer_token, ) # Tweak some of the tool descriptions for tool in tools: if tool.name == "Calculator": tool.description = ( "Use this only to solve numeric math problems and to do arithmetic." ) tools = tools + [ Tool( name="WikipediaSearch", description="Useful for answering a wide range of factual, scientific, academic, political and historical questions.", func=docstore_agent.run, ), Tool( name="GiphySearch", func=giphy.run, return_direct=True, description="useful for when you need to find a gif or picture, and for adding humor to your replies. Input should be a query, and output will be an html embed code which you MUST include in your Final Answer.", ), Tool( name="Requests", func=requests_tool, description="A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page.", ), ] # set up the google search tool if the env var is set if "GOOGLE_API_KEY" in os.environ: from langchain.utilities import GoogleSearchAPIWrapper tools.append( Tool( name="Search", func=GoogleSearchAPIWrapper().run, description="Use this tool for questions relating to current events, or when you can't find an answer using any of the other tools.", ) ) # set up the serpapi search tool if the env var is set if "SERPAPI_API_KEY" in os.environ: from langchain import SerpAPIWrapper serpapi = SerpAPIWrapper() tools.append( Tool( name="Search", func=serpapi.run, description="Use this tool for questions relating to current events, or when you can't find an answer using any of the other tools.", ) ) ai_prefix = "AI" human_prefix = "Human" prefix = f"""{ai_prefix} is a large language model. {ai_prefix} is represented by a 🤖. {ai_prefix} uses a light, humorous tone, and very frequently includes emojis its responses. Responses with code examples should be formatted in code blocks using <pre><code></code></pre> tags. {ai_prefix} is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, {ai_prefix} is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. If {ai_prefix} can't provide a good response, it will truthfully answer that it can't help with the user's request. Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist. TOOLS: ------ Assistant has access to the following tools: """ suffix = f""" The current date is {date}. Questions that refer to a specific date or time period will be interpreted relative to this date. Questions that refer to a specific date or time period will be interpreted relative to this date. After you answer the question, you MUST to determine which langauge your answer is written in, and append the language code to the end of the Final Answer, within parentheses, like this (en-US). Begin! Previous conversation history: {{chat_history}} New input: {{input}} {{agent_scratchpad}} """ memory = ConversationBufferMemory(memory_key="chat_history") for item in history_array: memory.save_context( {f"{ai_prefix}": item["prompt"]}, {f"{human_prefix}": item["response"]} ) llm = OpenAI(temperature=0.5, model_name="gpt-3.5-turbo") llm_chain = LLMChain( llm=llm, prompt=ConversationalAgent.create_prompt( tools, ai_prefix=ai_prefix, human_prefix=human_prefix, prefix=prefix, suffix=suffix, ), verbose=True, ) agent_obj = ConversationalAgent( llm_chain=llm_chain, ai_prefix=ai_prefix, verbose=True ) self.agent_executor = AgentExecutor.from_agent_and_tools( agent=agent_obj, tools=tools, verbose=True, max_iterations=5, memory=memory )
[ "langchain.agents.initialize_agent", "langchain.agents.AgentExecutor.from_agent_and_tools", "langchain.utilities.GoogleSearchAPIWrapper", "langchain.Wikipedia", "langchain.agents.conversational.base.ConversationalAgent", "langchain.SerpAPIWrapper", "langchain.agents.conversational.base.ConversationalAgent.create_prompt", "langchain.agents.Tool", "langchain.chains.conversation.memory.ConversationBufferMemory", "langchain.cache.InMemoryCache", "langchain.prompts.PromptTemplate", "langchain.OpenAI" ]
[((726, 741), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (739, 741), False, 'from langchain.cache import InMemoryCache\n'), ((1293, 1342), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (1299, 1342), False, 'from langchain import Wikipedia, OpenAI\n'), ((1368, 1456), 'langchain.agents.initialize_agent', 'initialize_agent', (['docstore_tools', 'docstore_llm'], {'agent': '"""react-docstore"""', 'verbose': '(True)'}), "(docstore_tools, docstore_llm, agent='react-docstore',\n verbose=True)\n", (1384, 1456), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((1625, 1695), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['requests_result']", 'template': 'template'}), "(input_variables=['requests_result'], template=template)\n", (1639, 1695), False, 'from langchain.prompts import PromptTemplate\n'), ((2261, 2278), 'utils.giphy.GiphyAPIWrapper', 'GiphyAPIWrapper', ([], {}), '()\n', (2276, 2278), False, 'from utils.giphy import GiphyAPIWrapper\n'), ((6817, 6868), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""'}), "(memory_key='chat_history')\n", (6841, 6868), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((7054, 7105), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0.5)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0.5, model_name='gpt-3.5-turbo')\n", (7060, 7105), False, 'from langchain import Wikipedia, OpenAI\n'), ((7448, 7523), 'langchain.agents.conversational.base.ConversationalAgent', 'ConversationalAgent', ([], {'llm_chain': 'llm_chain', 'ai_prefix': 'ai_prefix', 'verbose': '(True)'}), '(llm_chain=llm_chain, ai_prefix=ai_prefix, verbose=True)\n', (7467, 7523), False, 'from langchain.agents.conversational.base import ConversationalAgent\n'), ((7577, 7693), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent_obj', 'tools': 'tools', 'verbose': '(True)', 'max_iterations': '(5)', 'memory': 'memory'}), '(agent=agent_obj, tools=tools, verbose=\n True, max_iterations=5, memory=memory)\n', (7611, 7693), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((976, 987), 'langchain.Wikipedia', 'Wikipedia', ([], {}), '()\n', (985, 987), False, 'from langchain import Wikipedia, OpenAI\n'), ((1028, 1101), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'docstore.search', 'description': '"""Search wikipedia"""'}), "(name='Search', func=docstore.search, description='Search wikipedia')\n", (1032, 1101), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((1115, 1200), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Lookup"""', 'func': 'docstore.lookup', 'description': '"""Lookup a wikipedia page"""'}), "(name='Lookup', func=docstore.lookup, description='Lookup a wikipedia page'\n )\n", (1119, 1200), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((4690, 4706), 'langchain.SerpAPIWrapper', 'SerpAPIWrapper', ([], {}), '()\n', (4704, 4706), False, 'from langchain import SerpAPIWrapper\n'), ((2108, 2124), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2122, 2124), False, 'from datetime import datetime\n'), ((2637, 2700), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'verbose': '(True)'}), "(temperature=0, model_name='gpt-3.5-turbo', verbose=True)\n", (2643, 2700), False, 'from langchain import Wikipedia, OpenAI\n'), ((3099, 3281), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""WikipediaSearch"""', 'description': '"""Useful for answering a wide range of factual, scientific, academic, political and historical questions."""', 'func': 'docstore_agent.run'}), "(name='WikipediaSearch', description=\n 'Useful for answering a wide range of factual, scientific, academic, political and historical questions.'\n , func=docstore_agent.run)\n", (3103, 3281), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((3348, 3630), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""GiphySearch"""', 'func': 'giphy.run', 'return_direct': '(True)', 'description': '"""useful for when you need to find a gif or picture, and for adding humor to your replies. Input should be a query, and output will be an html embed code which you MUST include in your Final Answer."""'}), "(name='GiphySearch', func=giphy.run, return_direct=True, description=\n 'useful for when you need to find a gif or picture, and for adding humor to your replies. Input should be a query, and output will be an html embed code which you MUST include in your Final Answer.'\n )\n", (3352, 3630), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((3713, 3947), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Requests"""', 'func': 'requests_tool', 'description': '"""A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page."""'}), "(name='Requests', func=requests_tool, description=\n 'A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page.'\n )\n", (3717, 3947), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((4750, 4931), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'serpapi.run', 'description': '"""Use this tool for questions relating to current events, or when you can\'t find an answer using any of the other tools."""'}), '(name=\'Search\', func=serpapi.run, description=\n "Use this tool for questions relating to current events, or when you can\'t find an answer using any of the other tools."\n )\n', (4754, 4931), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((7176, 7299), 'langchain.agents.conversational.base.ConversationalAgent.create_prompt', 'ConversationalAgent.create_prompt', (['tools'], {'ai_prefix': 'ai_prefix', 'human_prefix': 'human_prefix', 'prefix': 'prefix', 'suffix': 'suffix'}), '(tools, ai_prefix=ai_prefix, human_prefix=\n human_prefix, prefix=prefix, suffix=suffix)\n', (7209, 7299), False, 'from langchain.agents.conversational.base import ConversationalAgent\n'), ((4294, 4318), 'langchain.utilities.GoogleSearchAPIWrapper', 'GoogleSearchAPIWrapper', ([], {}), '()\n', (4316, 4318), False, 'from langchain.utilities import GoogleSearchAPIWrapper\n'), ((1860, 1881), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (1866, 1881), False, 'from langchain import Wikipedia, OpenAI\n')]
import os import pinecone from rich.console import Console from rich.markdown import Markdown import langchain from langchain.prompts import PromptTemplate from langchain.chains import RetrievalQA from langchain.embeddings import OpenAIEmbeddings from langchain.llms import OpenAI from langchain.vectorstores import Pinecone # langchain.debug = True OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") PINECONE_API_KEY = os.getenv("PINECONE_KEY") PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENVIRONMENT") PINECONE_INDEX = os.getenv("PINECONE_INDEX") embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY) pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT) index = pinecone.Index(PINECONE_INDEX) vector_store = Pinecone(index, embeddings, "text") prompt_template = """ You are a question-answering bot for Airbyte company employees and will be provided relevant context from Notion pages on the Airbyte company knowlege base. Whenever you are asked a question you answer with a helpful answer if you can, along with the links to those relevant pages for further information. If you are not sure, you will say that you are not sure but still provide links if anything might be helpful to the questioner. Only use the provided context. Do not guess and do not use prior knowlege. Please provide your response in markdown format, starting with a level 2 header that describes the answer under a reasonable summary header. Notion context for this question: {context} Question: {question} Please provide a helpful answer along one or more URLs that would be helpful for finding additional information: """ prompt = PromptTemplate( template=prompt_template, input_variables=["context", "question"] ) qa = RetrievalQA.from_chain_type( llm=OpenAI(temperature=0, openai_api_key=OPENAI_API_KEY), chain_type="stuff", retriever=vector_store.as_retriever(), chain_type_kwargs={"prompt": prompt}, ) console = Console() console.print(Markdown("\n------\n> What do you want to know?")) console.print("") while True: try: query = input("") except KeyboardInterrupt: console.print("\n") console.print(Markdown("_Goodbye!_ 👋")) exit(0) answer = qa.run(query) console.print(Markdown(answer)) console.print(Markdown("\n------\n> What else do you want to know?\n")) console.print("\n")
[ "langchain.llms.OpenAI", "langchain.embeddings.OpenAIEmbeddings", "langchain.prompts.PromptTemplate", "langchain.vectorstores.Pinecone" ]
[((371, 398), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (380, 398), False, 'import os\n'), ((418, 443), 'os.getenv', 'os.getenv', (['"""PINECONE_KEY"""'], {}), "('PINECONE_KEY')\n", (427, 443), False, 'import os\n'), ((467, 500), 'os.getenv', 'os.getenv', (['"""PINECONE_ENVIRONMENT"""'], {}), "('PINECONE_ENVIRONMENT')\n", (476, 500), False, 'import os\n'), ((518, 545), 'os.getenv', 'os.getenv', (['"""PINECONE_INDEX"""'], {}), "('PINECONE_INDEX')\n", (527, 545), False, 'import os\n'), ((560, 607), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (576, 607), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((608, 681), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'PINECONE_API_KEY', 'environment': 'PINECONE_ENVIRONMENT'}), '(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)\n', (621, 681), False, 'import pinecone\n'), ((690, 720), 'pinecone.Index', 'pinecone.Index', (['PINECONE_INDEX'], {}), '(PINECONE_INDEX)\n', (704, 720), False, 'import pinecone\n'), ((736, 771), 'langchain.vectorstores.Pinecone', 'Pinecone', (['index', 'embeddings', '"""text"""'], {}), "(index, embeddings, 'text')\n", (744, 771), False, 'from langchain.vectorstores import Pinecone\n'), ((1643, 1728), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (1657, 1728), False, 'from langchain.prompts import PromptTemplate\n'), ((1950, 1959), 'rich.console.Console', 'Console', ([], {}), '()\n', (1957, 1959), False, 'from rich.console import Console\n'), ((1975, 2026), 'rich.markdown.Markdown', 'Markdown', (['"""\n------\n> What do you want to know?"""'], {}), '("""\n------\n> What do you want to know?""")\n', (1983, 2026), False, 'from rich.markdown import Markdown\n'), ((1774, 1826), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'openai_api_key': 'OPENAI_API_KEY'}), '(temperature=0, openai_api_key=OPENAI_API_KEY)\n', (1780, 1826), False, 'from langchain.llms import OpenAI\n'), ((2259, 2275), 'rich.markdown.Markdown', 'Markdown', (['answer'], {}), '(answer)\n', (2267, 2275), False, 'from rich.markdown import Markdown\n'), ((2296, 2353), 'rich.markdown.Markdown', 'Markdown', (['"""\n------\n> What else do you want to know?\n"""'], {}), '("""\n------\n> What else do you want to know?\n""")\n', (2304, 2353), False, 'from rich.markdown import Markdown\n'), ((2171, 2195), 'rich.markdown.Markdown', 'Markdown', (['"""_Goodbye!_ 👋"""'], {}), "('_Goodbye!_ 👋')\n", (2179, 2195), False, 'from rich.markdown import Markdown\n')]
from typing import Union, Callable, List, Dict, Any, TypeVar from lionagi.libs.sys_util import SysUtil T = TypeVar("T") def to_langchain_document(datanode: T, **kwargs: Any) -> Any: """ Converts a generic data node into a Langchain Document. This function transforms a node, typically from another data schema, into a Langchain Document format. It requires the source node to have a `to_dict` method to convert it into a dictionary, then it renames specific keys to match the Langchain Document schema before creating a Langchain Document object. Args: datanode (T): The data node to convert. Must have a `to_dict` method. **kwargs: Additional keyword arguments to be passed to the Langchain Document constructor. Returns: Any: An instance of `LangchainDocument` populated with data from the input node. """ SysUtil.check_import("langchain") from langchain.schema import Document as LangchainDocument dnode = datanode.to_dict() SysUtil.change_dict_key(dnode, old_key="content", new_key="page_content") SysUtil.change_dict_key(dnode, old_key="lc_id", new_key="id_") dnode = {**dnode, **kwargs} return LangchainDocument(**dnode) def langchain_loader( loader: Union[str, Callable], loader_args: List[Any] = [], loader_kwargs: Dict[str, Any] = {}, ) -> Any: """ Initializes and uses a specified loader to load data within the Langchain ecosystem. This function supports dynamically selecting a loader by name or directly using a loader function. It passes specified arguments and keyword arguments to the loader for data retrieval or processing. Args: loader (Union[str, Callable]): A string representing the loader's name or a callable loader function. loader_args (List[Any], optional): A list of positional arguments for the loader. loader_kwargs (Dict[str, Any], optional): A dictionary of keyword arguments for the loader. Returns: Any: The result returned by the loader function, typically data loaded into a specified format. Raises: ValueError: If the loader cannot be initialized or fails to load data. Examples: >>> data = langchain_loader("json_loader", loader_args=["data.json"]) >>> isinstance(data, dict) True """ SysUtil.check_import("langchain") import langchain_community.document_loaders as document_loaders try: if isinstance(loader, str): loader = getattr(document_loaders, loader) else: loader = loader except Exception as e: raise ValueError(f"Invalid loader: {loader}. Error: {e}") try: loader_obj = loader(*loader_args, **loader_kwargs) data = loader_obj.load() return data except Exception as e: raise ValueError(f"Failed to load. Error: {e}") def langchain_text_splitter( data: Union[str, List], splitter: Union[str, Callable], splitter_args: List[Any] = None, splitter_kwargs: Dict[str, Any] = None, ) -> List[str]: """ Splits text or a list of texts using a specified Langchain text splitter. This function allows for dynamic selection of a text splitter, either by name or as a function, to split text or documents into chunks. The splitter can be configured with additional arguments and keyword arguments. Args: data (Union[str, List]): The text or list of texts to be split. splitter (Union[str, Callable]): The name of the splitter function or the splitter function itself. splitter_args (List[Any], optional): Positional arguments to pass to the splitter function. splitter_kwargs (Dict[str, Any], optional): Keyword arguments to pass to the splitter function. Returns: List[str]: A list of text chunks produced by the text splitter. Raises: ValueError: If the splitter is invalid or fails during the split operation. """ splitter_args = splitter_args or [] splitter_kwargs = splitter_kwargs or {} SysUtil.check_import("langchain") import langchain_text_splitters as text_splitter try: if isinstance(splitter, str): splitter = getattr(text_splitter, splitter) else: splitter = splitter except Exception as e: raise ValueError(f"Invalid text splitter: {splitter}. Error: {e}") try: splitter_obj = splitter(*splitter_args, **splitter_kwargs) if isinstance(data, str): chunk = splitter_obj.split_text(data) else: chunk = splitter_obj.split_documents(data) return chunk except Exception as e: raise ValueError(f"Failed to split. Error: {e}")
[ "langchain.schema.Document" ]
[((110, 122), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (117, 122), False, 'from typing import Union, Callable, List, Dict, Any, TypeVar\n'), ((877, 910), 'lionagi.libs.sys_util.SysUtil.check_import', 'SysUtil.check_import', (['"""langchain"""'], {}), "('langchain')\n", (897, 910), False, 'from lionagi.libs.sys_util import SysUtil\n'), ((1010, 1083), 'lionagi.libs.sys_util.SysUtil.change_dict_key', 'SysUtil.change_dict_key', (['dnode'], {'old_key': '"""content"""', 'new_key': '"""page_content"""'}), "(dnode, old_key='content', new_key='page_content')\n", (1033, 1083), False, 'from lionagi.libs.sys_util import SysUtil\n'), ((1088, 1150), 'lionagi.libs.sys_util.SysUtil.change_dict_key', 'SysUtil.change_dict_key', (['dnode'], {'old_key': '"""lc_id"""', 'new_key': '"""id_"""'}), "(dnode, old_key='lc_id', new_key='id_')\n", (1111, 1150), False, 'from lionagi.libs.sys_util import SysUtil\n'), ((1194, 1220), 'langchain.schema.Document', 'LangchainDocument', ([], {}), '(**dnode)\n', (1211, 1220), True, 'from langchain.schema import Document as LangchainDocument\n'), ((2342, 2375), 'lionagi.libs.sys_util.SysUtil.check_import', 'SysUtil.check_import', (['"""langchain"""'], {}), "('langchain')\n", (2362, 2375), False, 'from lionagi.libs.sys_util import SysUtil\n'), ((4063, 4096), 'lionagi.libs.sys_util.SysUtil.check_import', 'SysUtil.check_import', (['"""langchain"""'], {}), "('langchain')\n", (4083, 4096), False, 'from lionagi.libs.sys_util import SysUtil\n')]
import re from typing import Any, Dict, List, Optional, Sequence, Tuple, Union import langchain from langchain import LLMChain from langchain.agents.agent import AgentOutputParser from langchain.schema import AgentAction, AgentFinish, OutputParserException from .prompts import (FINAL_ANSWER_ACTION, FORMAT_INSTRUCTIONS, QUESTION_PROMPT, SUFFIX) class ChatZeroShotOutputParser(AgentOutputParser): def get_format_instructions(self) -> str: return FORMAT_INSTRUCTIONS def parse(self, text: str) -> Union[AgentAction, AgentFinish]: if FINAL_ANSWER_ACTION in text: return AgentFinish( {"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text ) # Remove 'Thought' SUFFIX if text.startswith('Thought:'): text = text[8:] # \s matches against tab/newline/whitespace regex = ( r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" ) match = re.search(regex, text, re.DOTALL) if not match: raise OutputParserException(f"Could not parse LLM output: `{text}`") action = match.group(1).strip() action_input = match.group(2) return AgentAction(action, action_input.strip(" ").strip('"'), text.strip())
[ "langchain.schema.OutputParserException" ]
[((1023, 1056), 're.search', 're.search', (['regex', 'text', 're.DOTALL'], {}), '(regex, text, re.DOTALL)\n', (1032, 1056), False, 'import re\n'), ((1097, 1159), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {}), "(f'Could not parse LLM output: `{text}`')\n", (1118, 1159), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n')]
from typing import List from uuid import uuid4 from langchain.prompts import ChatPromptTemplate from langchain.schema import AIMessage, HumanMessage, SystemMessage from langchain_community.chat_models.fake import FakeListChatModel from honcho import Honcho from honcho.ext.langchain import langchain_message_converter app_name = str(uuid4()) honcho = Honcho( app_name=app_name, base_url="http://localhost:8000" ) # uncomment to use local # honcho = Honcho(app_name=app_name) # uses demo server at https://demo.honcho.dev honcho.initialize() responses = ["Fake LLM Response :)"] llm = FakeListChatModel(responses=responses) system = SystemMessage( content="You are world class technical documentation writer. Be as concise as possible" ) user_name = "CLI-Test" user = honcho.create_user(user_name) session = user.create_session() # def langchain_message_converter(messages: List): # new_messages = [] # for message in messages: # if message.is_user: # new_messages.append(HumanMessage(content=message.content)) # else: # new_messages.append(AIMessage(content=message.content)) # return new_messages def chat(): while True: user_input = input("User: ") if user_input == "exit": session.close() break user_message = HumanMessage(content=user_input) history = list(session.get_messages_generator()) langchain_history = langchain_message_converter(history) prompt = ChatPromptTemplate.from_messages( [system, *langchain_history, user_message] ) chain = prompt | llm response = chain.invoke({}) print(type(response)) print(f"AI: {response.content}") session.create_message(is_user=True, content=user_input) session.create_message(is_user=False, content=response.content) chat()
[ "langchain.prompts.ChatPromptTemplate.from_messages", "langchain_community.chat_models.fake.FakeListChatModel", "langchain.schema.SystemMessage", "langchain.schema.HumanMessage" ]
[((356, 415), 'honcho.Honcho', 'Honcho', ([], {'app_name': 'app_name', 'base_url': '"""http://localhost:8000"""'}), "(app_name=app_name, base_url='http://localhost:8000')\n", (362, 415), False, 'from honcho import Honcho\n'), ((596, 634), 'langchain_community.chat_models.fake.FakeListChatModel', 'FakeListChatModel', ([], {'responses': 'responses'}), '(responses=responses)\n', (613, 634), False, 'from langchain_community.chat_models.fake import FakeListChatModel\n'), ((644, 756), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You are world class technical documentation writer. Be as concise as possible"""'}), "(content=\n 'You are world class technical documentation writer. Be as concise as possible'\n )\n", (657, 756), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((337, 344), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (342, 344), False, 'from uuid import uuid4\n'), ((1338, 1370), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_input'}), '(content=user_input)\n', (1350, 1370), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((1456, 1492), 'honcho.ext.langchain.langchain_message_converter', 'langchain_message_converter', (['history'], {}), '(history)\n', (1483, 1492), False, 'from honcho.ext.langchain import langchain_message_converter\n'), ((1510, 1586), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system, *langchain_history, user_message]'], {}), '([system, *langchain_history, user_message])\n', (1542, 1586), False, 'from langchain.prompts import ChatPromptTemplate\n')]
""" A simple cloud consultant bot that can answer questions about kubernetes, aws and cloud native.""" import langchain from langchain.agents import Tool, AgentType, initialize_agent from langchain.tools import HumanInputRun from langchain.callbacks import HumanApprovalCallbackHandler from langchain.vectorstores import Chroma from langchain.chat_models import ChatOpenAI from langchain.embeddings.openai import OpenAIEmbeddings from langchain.chains import ConversationalRetrievalChain from langchain.memory import ConversationBufferMemory from termcolor import colored from cloud_tool import CloudTool from approval import ApprovalCallBackHandler langchain.debug = False MODEL = "gpt-3.5-turbo" cloud_tool = CloudTool(callbacks=[ApprovalCallBackHandler()]) cloud_tool.description = cloud_tool.description + f"args {cloud_tool.args}".replace( "{", "{{" ).replace("}", "}}") human = HumanInputRun() llm = ChatOpenAI(temperature=0, model=MODEL) embeddings = OpenAIEmbeddings() vectorstore = Chroma(persist_directory="./", embedding_function=embeddings) memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) kubememory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) awsmemory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) docs = ConversationalRetrievalChain.from_llm( llm, vectorstore.as_retriever(), memory=memory ) cloud_tools = [cloud_tool, human] kubectl_agent_chain = initialize_agent( tools=cloud_tools, llm=llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, memory=kubememory, verbose=False, agent_kwargs={ "prefix": """ You are a Kubernetes Command line tool (kubectl) expert. Given an input question, first create a syntactically correct kubectl command to run, then look at the results of the command and return the answer to the input question. If there is no namespace name given please use the "default" namespace. Only return the command. If an error is returned, rewrite the command, check the command, and try again. """, }, ) aws_agent_chain = initialize_agent( cloud_tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, memory=awsmemory, verbose=False, agent_kwargs={ "prefix": """ You are a AWS Command line tool (aws cli) expert. Given an input question, first create a syntactically correct aws cli command to run, then look at the results of the query and return the answer to the input question. You must generate the correct aws cli command to answer he question. Only return the command. If an error is returned, rewrite the command, check the command, and try again. """, }, ) tools = [ Tool( name="Kubernetes QA System", func=docs.run, description="useful for when you need to answer questions about kubernetes or cloud native and from the kubernetes or cloud native documentation. input should be a fully formed question.", ), Tool( name="Kubectl", func=kubectl_agent_chain.run, description="useful for when you need to use kubectl to look up, change or update your kubernetes cluster.", ), Tool( name="Aws CLI", func=aws_agent_chain.run, description="useful for when you need to use aws cli to look up, change or update your AWS setup.", ), human, ] agent_chain = initialize_agent( tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=False, memory=memory, ) def ask_ai(): """Main method to talk to the ai""" print( colored( "Welcome, i am Your AI cloud consultant. How can i help You today?", "green" ) ) try: while True: query = input(colored("You: ", "white", attrs=["bold"])) result = agent_chain.run(input=query) print( colored("Answer: ", "green", attrs=["bold"]), colored(result, "light_green"), ) except (EOFError, KeyboardInterrupt): print("kthxbye") exit() if __name__ == "__main__": ask_ai()
[ "langchain.agents.initialize_agent", "langchain.tools.HumanInputRun", "langchain.memory.ConversationBufferMemory", "langchain.embeddings.openai.OpenAIEmbeddings", "langchain.chat_models.ChatOpenAI", "langchain.agents.Tool", "langchain.vectorstores.Chroma" ]
[((893, 908), 'langchain.tools.HumanInputRun', 'HumanInputRun', ([], {}), '()\n', (906, 908), False, 'from langchain.tools import HumanInputRun\n'), ((917, 955), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': 'MODEL'}), '(temperature=0, model=MODEL)\n', (927, 955), False, 'from langchain.chat_models import ChatOpenAI\n'), ((969, 987), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (985, 987), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1002, 1063), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""./"""', 'embedding_function': 'embeddings'}), "(persist_directory='./', embedding_function=embeddings)\n", (1008, 1063), False, 'from langchain.vectorstores import Chroma\n'), ((1073, 1146), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (1097, 1146), False, 'from langchain.memory import ConversationBufferMemory\n'), ((1160, 1233), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (1184, 1233), False, 'from langchain.memory import ConversationBufferMemory\n'), ((1246, 1319), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (1270, 1319), False, 'from langchain.memory import ConversationBufferMemory\n'), ((1477, 2069), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'cloud_tools', 'llm': 'llm', 'agent': 'AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION', 'memory': 'kubememory', 'verbose': '(False)', 'agent_kwargs': '{\'prefix\':\n """\nYou are a Kubernetes Command line tool (kubectl) expert. \nGiven an input question, first create a syntactically correct kubectl command to run, then look at the results of the command and return the answer to the input question.\nIf there is no namespace name given please use the "default" namespace.\nOnly return the command. If an error is returned, rewrite the command, check the command, and try again.\n\n"""\n }'}), '(tools=cloud_tools, llm=llm, agent=AgentType.\n CHAT_CONVERSATIONAL_REACT_DESCRIPTION, memory=kubememory, verbose=False,\n agent_kwargs={\'prefix\':\n """\nYou are a Kubernetes Command line tool (kubectl) expert. \nGiven an input question, first create a syntactically correct kubectl command to run, then look at the results of the command and return the answer to the input question.\nIf there is no namespace name given please use the "default" namespace.\nOnly return the command. If an error is returned, rewrite the command, check the command, and try again.\n\n"""\n })\n', (1493, 2069), False, 'from langchain.agents import Tool, AgentType, initialize_agent\n'), ((2113, 2682), 'langchain.agents.initialize_agent', 'initialize_agent', (['cloud_tools', 'llm'], {'agent': 'AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION', 'memory': 'awsmemory', 'verbose': '(False)', 'agent_kwargs': '{\'prefix\':\n """\nYou are a AWS Command line tool (aws cli) expert. \nGiven an input question, first create a syntactically correct aws cli command to run, then look at the results of the query and return the answer to the input question.\nYou must generate the correct aws cli command to answer he question. \nOnly return the command. If an error is returned, rewrite the command, check the command, and try again.\n"""\n }'}), '(cloud_tools, llm, agent=AgentType.\n CHAT_CONVERSATIONAL_REACT_DESCRIPTION, memory=awsmemory, verbose=False,\n agent_kwargs={\'prefix\':\n """\nYou are a AWS Command line tool (aws cli) expert. \nGiven an input question, first create a syntactically correct aws cli command to run, then look at the results of the query and return the answer to the input question.\nYou must generate the correct aws cli command to answer he question. \nOnly return the command. If an error is returned, rewrite the command, check the command, and try again.\n"""\n })\n', (2129, 2682), False, 'from langchain.agents import Tool, AgentType, initialize_agent\n'), ((3401, 3519), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION', 'verbose': '(False)', 'memory': 'memory'}), '(tools, llm, agent=AgentType.\n CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=False, memory=memory)\n', (3417, 3519), False, 'from langchain.agents import Tool, AgentType, initialize_agent\n'), ((2723, 2970), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Kubernetes QA System"""', 'func': 'docs.run', 'description': '"""useful for when you need to answer questions about kubernetes or cloud native and from the kubernetes or cloud native documentation. input should be a fully formed question."""'}), "(name='Kubernetes QA System', func=docs.run, description=\n 'useful for when you need to answer questions about kubernetes or cloud native and from the kubernetes or cloud native documentation. input should be a fully formed question.'\n )\n", (2727, 2970), False, 'from langchain.agents import Tool, AgentType, initialize_agent\n'), ((2997, 3166), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Kubectl"""', 'func': 'kubectl_agent_chain.run', 'description': '"""useful for when you need to use kubectl to look up, change or update your kubernetes cluster."""'}), "(name='Kubectl', func=kubectl_agent_chain.run, description=\n 'useful for when you need to use kubectl to look up, change or update your kubernetes cluster.'\n )\n", (3001, 3166), False, 'from langchain.agents import Tool, AgentType, initialize_agent\n'), ((3193, 3349), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Aws CLI"""', 'func': 'aws_agent_chain.run', 'description': '"""useful for when you need to use aws cli to look up, change or update your AWS setup."""'}), "(name='Aws CLI', func=aws_agent_chain.run, description=\n 'useful for when you need to use aws cli to look up, change or update your AWS setup.'\n )\n", (3197, 3349), False, 'from langchain.agents import Tool, AgentType, initialize_agent\n'), ((3613, 3702), 'termcolor.colored', 'colored', (['"""Welcome, i am Your AI cloud consultant. How can i help You today?"""', '"""green"""'], {}), "('Welcome, i am Your AI cloud consultant. How can i help You today?',\n 'green')\n", (3620, 3702), False, 'from termcolor import colored\n'), ((736, 761), 'approval.ApprovalCallBackHandler', 'ApprovalCallBackHandler', ([], {}), '()\n', (759, 761), False, 'from approval import ApprovalCallBackHandler\n'), ((3782, 3823), 'termcolor.colored', 'colored', (['"""You: """', '"""white"""'], {'attrs': "['bold']"}), "('You: ', 'white', attrs=['bold'])\n", (3789, 3823), False, 'from termcolor import colored\n'), ((3910, 3954), 'termcolor.colored', 'colored', (['"""Answer: """', '"""green"""'], {'attrs': "['bold']"}), "('Answer: ', 'green', attrs=['bold'])\n", (3917, 3954), False, 'from termcolor import colored\n'), ((3972, 4002), 'termcolor.colored', 'colored', (['result', '"""light_green"""'], {}), "(result, 'light_green')\n", (3979, 4002), False, 'from termcolor import colored\n')]
from langchain.cache import SQLiteCache import langchain from pydantic import BaseModel from creator.code_interpreter import CodeInterpreter from creator.config.load_config import load_yaml_config import os # Load configuration from YAML yaml_config = load_yaml_config() # Helper function to prepend '~/' to paths if not present def resolve_path(path): if not path.startswith("~"): return os.path.expanduser("~/" + path) return os.path.expanduser(path) project_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..") # Fetch values from the loaded YAML config or set default values _local_skill_library_path = resolve_path(yaml_config.get("LOCAL_SKILL_LIBRARY_PATH", ".cache/open_creator/skill_library")) _remote_skill_library_path = resolve_path(yaml_config.get("REMOTE_SKILL_LIBRARY_PATH", ".cache/open_creator/remote")) _local_skill_library_vectordb_path = resolve_path(yaml_config.get("LOCAL_SKILL_LIBRARY_VECTORD_PATH", ".cache/open_creator/vectordb/")) _prompt_cache_history_path = resolve_path(yaml_config.get("PROMPT_CACHE_HISTORY_PATH", ".cache/open_creator/prompt_cache/")) _logger_cache_path = resolve_path(yaml_config.get("LOGGER_CACHE_PATH", ".cache/open_creator/logs/")) _skill_extract_agent_cache_path = resolve_path(yaml_config.get("SKILL_EXTRACT_AGENT_CACHE_PATH", ".cache/open_creator/llm_cache")) _official_skill_library_path = resolve_path(yaml_config.get("OFFICIAL_SKILL_LIBRARY_PATH", "timedomain/skill-library")) _official_skill_library_template_path = resolve_path(yaml_config.get("OFFICIAL_SKILL_LIBRARY_TEMPLATE_PATH", "timedomain/skill-library-template")) _model = yaml_config.get("MODEL_NAME", "gpt-3.5-turbo-16k-0613") _temperature = yaml_config.get("TEMPERATURE", 0) _run_human_confirm = yaml_config.get("RUN_HUMAN_CONFIRM", False) _use_stream_callback = yaml_config.get("USE_STREAM_CALLBACK", True) _build_in_skill_library_dir = yaml_config.get("BUILD_IN_SKILL_LIBRARY_DIR", "skill_library/open-creator/") _build_in_skill_library_dir = os.path.join(project_dir, _build_in_skill_library_dir) # Ensure directories exist for path in [_skill_extract_agent_cache_path, _local_skill_library_path, _local_skill_library_vectordb_path, _prompt_cache_history_path, _logger_cache_path]: if not os.path.exists(path): os.makedirs(path) if not os.path.exists(_logger_cache_path): open(os.path.join(_logger_cache_path, "output.log"), 'a').close() # Ensure the history file exists if not os.path.exists(_prompt_cache_history_path): open(os.path.join(_prompt_cache_history_path, "history.txt"), 'a').close() build_in_skill_library_dir = os.path.join(os.path.dirname(os.path.abspath(__file__))) build_in_skill_config = { "create": os.path.join(_build_in_skill_library_dir, "create"), "save": os.path.join(_build_in_skill_library_dir, "save"), "search": os.path.join(_build_in_skill_library_dir, "search"), } # Placeholder for any built-in skill configurations class LibraryConfig(BaseModel): local_skill_library_path: str = _local_skill_library_path remote_skill_library_path: str = _remote_skill_library_path local_skill_library_vectordb_path: str = _local_skill_library_vectordb_path prompt_cache_history_path: str = _prompt_cache_history_path logger_cache_path: str = _logger_cache_path skill_extract_agent_cache_path: str = _skill_extract_agent_cache_path model: str = _model temperature: float = _temperature official_skill_library_path: str = _official_skill_library_path official_skill_library_template_path: str = _official_skill_library_template_path build_in_skill_config: dict = build_in_skill_config run_human_confirm: bool = _run_human_confirm use_stream_callback: bool = _use_stream_callback code_interpreter: CodeInterpreter = CodeInterpreter() # prompt paths refactor_agent_prompt_path: str = os.path.join(project_dir, "prompts", "refactor_agent_prompt.md") codeskill_function_schema_path: str = os.path.join(project_dir, "prompts", "codeskill_function_schema.json") creator_agent_prompt_path: str = os.path.join(project_dir, "prompts", "creator_agent_prompt.md") api_doc_path: str = os.path.join(project_dir, "prompts", "api_doc.md") extractor_agent_prompt_path: str = os.path.join(project_dir, "prompts", "extractor_agent_prompt.md") interpreter_agent_prompt_path: str = os.path.join(project_dir, "prompts", "interpreter_agent_prompt.md") tester_agent_prompt_path: str = os.path.join(project_dir, "prompts", "tester_agent_prompt.md") testsummary_function_schema_path: str = os.path.join(project_dir, "prompts", "testsummary_function_schema.json") tips_for_debugging_prompt_path: str = os.path.join(project_dir, "prompts", "tips_for_debugging_prompt.md") tips_for_testing_prompt_path: str = os.path.join(project_dir, "prompts", "tips_for_testing_prompt.md") tips_for_veryfy_prompt_path: str = os.path.join(project_dir, "prompts", "tips_for_veryfy_prompt.md") use_rich: bool = True use_file_logger: bool = False config = LibraryConfig() langchain.llm_cache = SQLiteCache(database_path=f"{config.skill_extract_agent_cache_path}/.langchain.db")
[ "langchain.cache.SQLiteCache" ]
[((254, 272), 'creator.config.load_config.load_yaml_config', 'load_yaml_config', ([], {}), '()\n', (270, 272), False, 'from creator.config.load_config import load_yaml_config\n'), ((2004, 2058), 'os.path.join', 'os.path.join', (['project_dir', '_build_in_skill_library_dir'], {}), '(project_dir, _build_in_skill_library_dir)\n', (2016, 2058), False, 'import os\n'), ((5083, 5171), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'f"""{config.skill_extract_agent_cache_path}/.langchain.db"""'}), "(database_path=\n f'{config.skill_extract_agent_cache_path}/.langchain.db')\n", (5094, 5171), False, 'from langchain.cache import SQLiteCache\n'), ((448, 472), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (466, 472), False, 'import os\n'), ((2312, 2346), 'os.path.exists', 'os.path.exists', (['_logger_cache_path'], {}), '(_logger_cache_path)\n', (2326, 2346), False, 'import os\n'), ((2459, 2501), 'os.path.exists', 'os.path.exists', (['_prompt_cache_history_path'], {}), '(_prompt_cache_history_path)\n', (2473, 2501), False, 'import os\n'), ((2710, 2761), 'os.path.join', 'os.path.join', (['_build_in_skill_library_dir', '"""create"""'], {}), "(_build_in_skill_library_dir, 'create')\n", (2722, 2761), False, 'import os\n'), ((2775, 2824), 'os.path.join', 'os.path.join', (['_build_in_skill_library_dir', '"""save"""'], {}), "(_build_in_skill_library_dir, 'save')\n", (2787, 2824), False, 'import os\n'), ((2840, 2891), 'os.path.join', 'os.path.join', (['_build_in_skill_library_dir', '"""search"""'], {}), "(_build_in_skill_library_dir, 'search')\n", (2852, 2891), False, 'import os\n'), ((3789, 3806), 'creator.code_interpreter.CodeInterpreter', 'CodeInterpreter', ([], {}), '()\n', (3804, 3806), False, 'from creator.code_interpreter import CodeInterpreter\n'), ((3865, 3929), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""refactor_agent_prompt.md"""'], {}), "(project_dir, 'prompts', 'refactor_agent_prompt.md')\n", (3877, 3929), False, 'import os\n'), ((3972, 4042), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""codeskill_function_schema.json"""'], {}), "(project_dir, 'prompts', 'codeskill_function_schema.json')\n", (3984, 4042), False, 'import os\n'), ((4080, 4143), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""creator_agent_prompt.md"""'], {}), "(project_dir, 'prompts', 'creator_agent_prompt.md')\n", (4092, 4143), False, 'import os\n'), ((4168, 4218), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""api_doc.md"""'], {}), "(project_dir, 'prompts', 'api_doc.md')\n", (4180, 4218), False, 'import os\n'), ((4258, 4323), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""extractor_agent_prompt.md"""'], {}), "(project_dir, 'prompts', 'extractor_agent_prompt.md')\n", (4270, 4323), False, 'import os\n'), ((4365, 4432), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""interpreter_agent_prompt.md"""'], {}), "(project_dir, 'prompts', 'interpreter_agent_prompt.md')\n", (4377, 4432), False, 'import os\n'), ((4469, 4531), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""tester_agent_prompt.md"""'], {}), "(project_dir, 'prompts', 'tester_agent_prompt.md')\n", (4481, 4531), False, 'import os\n'), ((4576, 4648), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""testsummary_function_schema.json"""'], {}), "(project_dir, 'prompts', 'testsummary_function_schema.json')\n", (4588, 4648), False, 'import os\n'), ((4691, 4759), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""tips_for_debugging_prompt.md"""'], {}), "(project_dir, 'prompts', 'tips_for_debugging_prompt.md')\n", (4703, 4759), False, 'import os\n'), ((4800, 4866), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""tips_for_testing_prompt.md"""'], {}), "(project_dir, 'prompts', 'tips_for_testing_prompt.md')\n", (4812, 4866), False, 'import os\n'), ((4906, 4971), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""tips_for_veryfy_prompt.md"""'], {}), "(project_dir, 'prompts', 'tips_for_veryfy_prompt.md')\n", (4918, 4971), False, 'import os\n'), ((405, 436), 'os.path.expanduser', 'os.path.expanduser', (["('~/' + path)"], {}), "('~/' + path)\n", (423, 436), False, 'import os\n'), ((518, 543), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (533, 543), False, 'import os\n'), ((2256, 2276), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2270, 2276), False, 'import os\n'), ((2286, 2303), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2297, 2303), False, 'import os\n'), ((2641, 2666), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2656, 2666), False, 'import os\n'), ((2357, 2403), 'os.path.join', 'os.path.join', (['_logger_cache_path', '"""output.log"""'], {}), "(_logger_cache_path, 'output.log')\n", (2369, 2403), False, 'import os\n'), ((2512, 2567), 'os.path.join', 'os.path.join', (['_prompt_cache_history_path', '"""history.txt"""'], {}), "(_prompt_cache_history_path, 'history.txt')\n", (2524, 2567), False, 'import os\n')]
from __future__ import annotations import asyncio import functools import logging import os import warnings from contextlib import contextmanager from contextvars import ContextVar from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast from uuid import UUID, uuid4 import langchain from langchain.callbacks.base import ( BaseCallbackHandler, BaseCallbackManager, ChainManagerMixin, LLMManagerMixin, RunManagerMixin, ToolManagerMixin, ) from langchain.callbacks.openai_info import OpenAICallbackHandler from langchain.callbacks.stdout import StdOutCallbackHandler from langchain.callbacks.tracers.langchain import LangChainTracer from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1 from langchain.callbacks.tracers.schemas import TracerSession from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler from langchain.schema import ( AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string, ) logger = logging.getLogger(__name__) Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar("openai_callback", default=None) tracing_callback_var: ContextVar[Optional[LangChainTracerV1]] = ContextVar( # noqa: E501 "tracing_callback", default=None ) tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar( # noqa: E501 "tracing_callback_v2", default=None ) def _get_debug() -> bool: return langchain.debug @contextmanager def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]: """Get OpenAI callback handler in a context manager.""" cb = OpenAICallbackHandler() openai_callback_var.set(cb) yield cb openai_callback_var.set(None) @contextmanager def tracing_enabled( session_name: str = "default", ) -> Generator[TracerSessionV1, None, None]: """Get Tracer in a context manager.""" cb = LangChainTracerV1() session = cast(TracerSessionV1, cb.load_session(session_name)) tracing_callback_var.set(cb) yield session tracing_callback_var.set(None) @contextmanager def tracing_v2_enabled( session_name: Optional[str] = None, *, example_id: Optional[Union[str, UUID]] = None, tenant_id: Optional[str] = None, session_extra: Optional[Dict[str, Any]] = None, ) -> Generator[TracerSession, None, None]: """Get the experimental tracer handler in a context manager.""" # Issue a warning that this is experimental warnings.warn( "The experimental tracing v2 is in development. " "This is not yet stable and may change in the future." ) if isinstance(example_id, str): example_id = UUID(example_id) cb = LangChainTracer( tenant_id=tenant_id, session_name=session_name, example_id=example_id, session_extra=session_extra, ) session = cb.ensure_session() tracing_v2_callback_var.set(cb) yield session tracing_v2_callback_var.set(None) def _handle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for CallbackManager.""" message_strings: Optional[List[str]] = None for handler in handlers: try: if ignore_condition_name is None or not getattr(handler, ignore_condition_name): getattr(handler, event_name)(*args, **kwargs) except NotImplementedError as e: if event_name == "on_chat_model_start": if message_strings is None: message_strings = [get_buffer_string(m) for m in args[1]] _handle_event( [handler], "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning(f"Error in {event_name} callback: {e}") except Exception as e: logging.warning(f"Error in {event_name} callback: {e}") async def _ahandle_event_for_handler( handler: BaseCallbackHandler, event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: try: if ignore_condition_name is None or not getattr(handler, ignore_condition_name): event = getattr(handler, event_name) if asyncio.iscoroutinefunction(event): await event(*args, **kwargs) else: await asyncio.get_event_loop().run_in_executor(None, functools.partial(event, *args, **kwargs)) except NotImplementedError as e: if event_name == "on_chat_model_start": message_strings = [get_buffer_string(m) for m in args[1]] await _ahandle_event_for_handler( handler, "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning(f"Error in {event_name} callback: {e}") except Exception as e: logger.warning(f"Error in {event_name} callback: {e}") async def _ahandle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for AsyncCallbackManager.""" await asyncio.gather( *( _ahandle_event_for_handler(handler, event_name, ignore_condition_name, *args, **kwargs) for handler in handlers ) ) BRM = TypeVar("BRM", bound="BaseRunManager") class BaseRunManager(RunManagerMixin): """Base class for run manager (a bound callback manager).""" def __init__( self, run_id: UUID, handlers: List[BaseCallbackHandler], inheritable_handlers: List[BaseCallbackHandler], parent_run_id: Optional[UUID] = None, ) -> None: """Initialize run manager.""" self.run_id = run_id self.handlers = handlers self.inheritable_handlers = inheritable_handlers self.parent_run_id = parent_run_id @classmethod def get_noop_manager(cls: Type[BRM]) -> BRM: """Return a manager that doesn't perform any operations.""" return cls(uuid4(), [], []) class RunManager(BaseRunManager): """Sync Run Manager.""" def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received.""" _handle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncRunManager(BaseRunManager): """Async Run Manager.""" async def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received.""" await _ahandle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): """Callback manager for LLM run.""" def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token.""" _handle_event( self.handlers, "on_llm_new_token", "ignore_llm", token=token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" _handle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors.""" _handle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): """Async callback manager for LLM run.""" async def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token.""" await _ahandle_event( self.handlers, "on_llm_new_token", "ignore_llm", token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" await _ahandle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors.""" await _ahandle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForChainRun(RunManager, ChainManagerMixin): """Callback manager for chain run.""" def get_child(self) -> CallbackManager: """Get a child callback manager.""" manager = CallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) return manager def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" _handle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors.""" _handle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received.""" _handle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received.""" _handle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin): """Async callback manager for chain run.""" def get_child(self) -> AsyncCallbackManager: """Get a child callback manager.""" manager = AsyncCallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) return manager async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" await _ahandle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors.""" await _ahandle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received.""" await _ahandle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received.""" await _ahandle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForToolRun(RunManager, ToolManagerMixin): """Callback manager for tool run.""" def get_child(self) -> CallbackManager: """Get a child callback manager.""" manager = CallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) return manager def on_tool_end( self, output: str, **kwargs: Any, ) -> None: """Run when tool ends running.""" _handle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when tool errors.""" _handle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_tool_end_data_model( self, output, **kwargs: Any, ): """Return the data model for the on_tool_end event.""" _handle_event( self.handlers, "on_tool_end_data_model", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin): """Async callback manager for tool run.""" def get_child(self) -> AsyncCallbackManager: """Get a child callback manager.""" manager = AsyncCallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) return manager async def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" await _ahandle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when tool errors.""" await _ahandle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManager(BaseCallbackManager): """Callback manager that can be used to handle callbacks from langchain.""" def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForLLMRun: """Run when LLM starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, prompts, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return CallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForLLMRun: """Run when LLM starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, messages, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) # Re-use the LLM Run Manager since the outputs are treated # the same for now return CallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForChainRun: """Run when chain starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return CallbackManagerForChainRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForToolRun: """Run when tool starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return CallbackManagerForToolRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, ) -> CallbackManager: """Configure the callback manager.""" return _configure(cls, inheritable_callbacks, local_callbacks, verbose) class AsyncCallbackManager(BaseCallbackManager): """Async callback manager that can be used to handle callbacks from LangChain.""" @property def is_async(self) -> bool: """Return whether the handler is async.""" return True async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForLLMRun: """Run when LLM starts running.""" if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, prompts, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) async def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, messages, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) async def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForChainRun: """Run when chain starts running.""" if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForChainRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) async def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForToolRun: """Run when tool starts running.""" if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForToolRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, ) -> AsyncCallbackManager: """Configure the callback manager.""" return _configure(cls, inheritable_callbacks, local_callbacks, verbose) T = TypeVar("T", CallbackManager, AsyncCallbackManager) def _configure( callback_manager_cls: Type[T], inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, ) -> T: """Configure the callback manager.""" callback_manager = callback_manager_cls([]) if inheritable_callbacks or local_callbacks: if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None: inheritable_callbacks_ = inheritable_callbacks or [] callback_manager = callback_manager_cls( handlers=inheritable_callbacks_.copy(), inheritable_handlers=inheritable_callbacks_.copy(), ) else: callback_manager = callback_manager_cls( handlers=inheritable_callbacks.handlers, inheritable_handlers=inheritable_callbacks.inheritable_handlers, parent_run_id=inheritable_callbacks.parent_run_id, ) local_handlers_ = ( local_callbacks if isinstance(local_callbacks, list) else (local_callbacks.handlers if local_callbacks else []) ) for handler in local_handlers_: callback_manager.add_handler(handler, False) tracer = tracing_callback_var.get() open_ai = openai_callback_var.get() tracing_enabled_ = ( os.environ.get("LANGCHAIN_TRACING") is not None or tracer is not None or os.environ.get("LANGCHAIN_HANDLER") is not None ) tracer_v2 = tracing_v2_callback_var.get() tracing_v2_enabled_ = os.environ.get("LANGCHAIN_TRACING_V2") is not None or tracer_v2 is not None tracer_session = os.environ.get("LANGCHAIN_SESSION") debug = _get_debug() if tracer_session is None: tracer_session = "default" if verbose or debug or tracing_enabled_ or tracing_v2_enabled_ or open_ai is not None: if verbose and not any(isinstance(handler, StdOutCallbackHandler) for handler in callback_manager.handlers): if debug: pass else: callback_manager.add_handler(StdOutCallbackHandler(), False) if debug and not any(isinstance(handler, ConsoleCallbackHandler) for handler in callback_manager.handlers): callback_manager.add_handler(ConsoleCallbackHandler(), True) if tracing_enabled_ and not any( isinstance(handler, LangChainTracerV1) for handler in callback_manager.handlers ): if tracer: callback_manager.add_handler(tracer, True) else: handler = LangChainTracerV1() handler.load_session(tracer_session) callback_manager.add_handler(handler, True) if tracing_v2_enabled_ and not any( isinstance(handler, LangChainTracer) for handler in callback_manager.handlers ): if tracer_v2: callback_manager.add_handler(tracer_v2, True) else: try: handler = LangChainTracer(session_name=tracer_session) handler.ensure_session() callback_manager.add_handler(handler, True) except Exception as e: logger.debug("Unable to load requested LangChainTracer", e) if open_ai is not None and not any( isinstance(handler, OpenAICallbackHandler) for handler in callback_manager.handlers ): callback_manager.add_handler(open_ai, True) return callback_manager
[ "langchain.schema.get_buffer_string", "langchain.callbacks.stdout.StdOutCallbackHandler", "langchain.callbacks.tracers.stdout.ConsoleCallbackHandler", "langchain.callbacks.openai_info.OpenAICallbackHandler", "langchain.callbacks.tracers.langchain.LangChainTracer", "langchain.callbacks.tracers.langchain_v1.LangChainTracerV1" ]
[((1036, 1063), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1053, 1063), False, 'import logging\n'), ((1208, 1251), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1218, 1251), False, 'from contextvars import ContextVar\n'), ((1316, 1360), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback"""'], {'default': 'None'}), "('tracing_callback', default=None)\n", (1326, 1360), False, 'from contextvars import ContextVar\n'), ((1446, 1493), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback_v2"""'], {'default': 'None'}), "('tracing_callback_v2', default=None)\n", (1456, 1493), False, 'from contextvars import ContextVar\n'), ((5790, 5828), 'typing.TypeVar', 'TypeVar', (['"""BRM"""'], {'bound': '"""BaseRunManager"""'}), "('BRM', bound='BaseRunManager')\n", (5797, 5828), False, 'from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((22935, 22986), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'CallbackManager', 'AsyncCallbackManager'], {}), "('T', CallbackManager, AsyncCallbackManager)\n", (22942, 22986), False, 'from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((1731, 1754), 'langchain.callbacks.openai_info.OpenAICallbackHandler', 'OpenAICallbackHandler', ([], {}), '()\n', (1752, 1754), False, 'from langchain.callbacks.openai_info import OpenAICallbackHandler\n'), ((2005, 2024), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (2022, 2024), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((2570, 2696), 'warnings.warn', 'warnings.warn', (['"""The experimental tracing v2 is in development. This is not yet stable and may change in the future."""'], {}), "(\n 'The experimental tracing v2 is in development. This is not yet stable and may change in the future.'\n )\n", (2583, 2696), False, 'import warnings\n'), ((2787, 2907), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'tenant_id': 'tenant_id', 'session_name': 'session_name', 'example_id': 'example_id', 'session_extra': 'session_extra'}), '(tenant_id=tenant_id, session_name=session_name, example_id=\n example_id, session_extra=session_extra)\n', (2802, 2907), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((24635, 24670), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_SESSION"""'], {}), "('LANGCHAIN_SESSION')\n", (24649, 24670), False, 'import os\n'), ((2761, 2777), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (2765, 2777), False, 'from uuid import UUID, uuid4\n'), ((4562, 4596), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['event'], {}), '(event)\n', (4589, 4596), False, 'import asyncio\n'), ((6507, 6514), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (6512, 6514), False, 'from uuid import UUID, uuid4\n'), ((16682, 16689), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (16687, 16689), False, 'from uuid import UUID, uuid4\n'), ((17367, 17374), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (17372, 17374), False, 'from uuid import UUID, uuid4\n'), ((18148, 18155), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18153, 18155), False, 'from uuid import UUID, uuid4\n'), ((18861, 18868), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18866, 18868), False, 'from uuid import UUID, uuid4\n'), ((20121, 20128), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (20126, 20128), False, 'from uuid import UUID, uuid4\n'), ((20760, 20767), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (20765, 20767), False, 'from uuid import UUID, uuid4\n'), ((21471, 21478), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (21476, 21478), False, 'from uuid import UUID, uuid4\n'), ((22207, 22214), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (22212, 22214), False, 'from uuid import UUID, uuid4\n'), ((24322, 24357), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_TRACING"""'], {}), "('LANGCHAIN_TRACING')\n", (24336, 24357), False, 'import os\n'), ((24411, 24446), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HANDLER"""'], {}), "('LANGCHAIN_HANDLER')\n", (24425, 24446), False, 'import os\n'), ((24538, 24576), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_TRACING_V2"""'], {}), "('LANGCHAIN_TRACING_V2')\n", (24552, 24576), False, 'import os\n'), ((4161, 4216), 'logging.warning', 'logging.warning', (['f"""Error in {event_name} callback: {e}"""'], {}), "(f'Error in {event_name} callback: {e}')\n", (4176, 4216), False, 'import logging\n'), ((25265, 25289), 'langchain.callbacks.tracers.stdout.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (25287, 25289), False, 'from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler\n'), ((25567, 25586), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (25584, 25586), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((4889, 4909), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (4906, 4909), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((25076, 25099), 'langchain.callbacks.stdout.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (25097, 25099), False, 'from langchain.callbacks.stdout import StdOutCallbackHandler\n'), ((26002, 26046), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'session_name': 'tracer_session'}), '(session_name=tracer_session)\n', (26017, 26046), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((4730, 4771), 'functools.partial', 'functools.partial', (['event', '*args'], {}), '(event, *args, **kwargs)\n', (4747, 4771), False, 'import functools\n'), ((3713, 3733), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (3730, 3733), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((4683, 4707), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4705, 4707), False, 'import asyncio\n')]
"""Base interface for large language models to expose.""" import inspect import json import warnings from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union import yaml from pydantic import Extra, Field, root_validator, validator import langchain from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks, ) from langchain.schema import ( AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string, ) def _get_verbosity() -> bool: return langchain.verbose def get_prompts( params: Dict[str, Any], prompts: List[str] ) -> Tuple[Dict[int, List], str, List[int], List[str]]: """Get prompts that are already cached.""" llm_string = str(sorted([(k, v) for k, v in params.items()])) missing_prompts = [] missing_prompt_idxs = [] existing_prompts = {} for i, prompt in enumerate(prompts): if langchain.llm_cache is not None: cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): existing_prompts[i] = cache_val else: missing_prompts.append(prompt) missing_prompt_idxs.append(i) return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts def update_cache( existing_prompts: Dict[int, List], llm_string: str, missing_prompt_idxs: List[int], new_results: LLMResult, prompts: List[str], ) -> Optional[dict]: """Update the cache and get the LLM output.""" for i, result in enumerate(new_results.generations): existing_prompts[missing_prompt_idxs[i]] = result prompt = prompts[missing_prompt_idxs[i]] if langchain.llm_cache is not None: langchain.llm_cache.update(prompt, llm_string, result) llm_output = new_results.llm_output return llm_output class BaseLLM(BaseLanguageModel, ABC): """LLM wrapper should take in a prompt and return a string.""" cache: Optional[bool] = None verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callbacks: Callbacks = Field(default=None, exclude=True) callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose @abstractmethod def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompts.""" @abstractmethod async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompts.""" def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return self.generate(prompt_strings, stop=stop, callbacks=callbacks) async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return await self.agenerate(prompt_strings, stop=stop, callbacks=callbacks) def generate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # If string is passed in directly no errors will be raised but outputs will # not make sense. if not isinstance(prompts, list): raise ValueError( "Argument 'prompts' is expected to be of type List[str], received" f" argument of type {type(prompts)}." ) params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._generate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_manager = callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, invocation_params=params ) try: output = ( self._generate(prompts, stop=stop, run_manager=run_manager) if new_arg_supported else self._generate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e run_manager.on_llm_end(output) if run_manager: output.run = RunInfo(run_id=run_manager.run_id) return output if len(missing_prompts) > 0: run_manager = callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, invocation_params=params, ) try: new_results = ( self._generate(missing_prompts, stop=stop, run_manager=run_manager) if new_arg_supported else self._generate(missing_prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e run_manager.on_llm_end(new_results) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = None if run_manager: run_info = RunInfo(run_id=run_manager.run_id) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) async def agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._agenerate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_manager = await callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, invocation_params=params ) try: output = ( await self._agenerate(prompts, stop=stop, run_manager=run_manager) if new_arg_supported else await self._agenerate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e, verbose=self.verbose) raise e await run_manager.on_llm_end(output, verbose=self.verbose) if run_manager: output.run = RunInfo(run_id=run_manager.run_id) return output if len(missing_prompts) > 0: run_manager = await callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, invocation_params=params, ) try: new_results = ( await self._agenerate( missing_prompts, stop=stop, run_manager=run_manager ) if new_arg_supported else await self._agenerate(missing_prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e) raise e await run_manager.on_llm_end(new_results) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = None if run_manager: run_info = RunInfo(run_id=run_manager.run_id) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) def __call__( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None ) -> str: """Check Cache and run the LLM on the given prompt and input.""" if not isinstance(prompt, str): raise ValueError( "Argument `prompt` is expected to be a string. Instead found " f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " "`generate` instead." ) return ( self.generate([prompt], stop=stop, callbacks=callbacks) .generations[0][0] .text ) async def _call_async( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None ) -> str: """Check Cache and run the LLM on the given prompt and input.""" result = await self.agenerate([prompt], stop=stop, callbacks=callbacks) return result.generations[0][0].text def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: if stop is None: _stop = None else: _stop = list(stop) return self(text, stop=_stop) def predict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = self(text, stop=_stop) return AIMessage(content=content) async def apredict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: if stop is None: _stop = None else: _stop = list(stop) return await self._call_async(text, stop=_stop) async def apredict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = await self._call_async(text, stop=_stop) return AIMessage(content=content) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {} def __str__(self) -> str: """Get a string representation of the object for printing.""" cls_name = f"\033[1m{self.__class__.__name__}\033[0m" return f"{cls_name}\nParams: {self._identifying_params}" @property @abstractmethod def _llm_type(self) -> str: """Return type of llm.""" def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict["_type"] = self._llm_type return starter_dict def save(self, file_path: Union[Path, str]) -> None: """Save the LLM. Args: file_path: Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path="path/llm.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") class LLM(BaseLLM): """LLM class that expect subclasses to implement a simpler call method. The purpose of this class is to expose a simpler interface for working with LLMs, rather than expect the user to implement the full _generate method. """ @abstractmethod def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Run the LLM on the given prompt and input.""" async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> str: """Run the LLM on the given prompt and input.""" raise NotImplementedError("Async generation not implemented for this LLM.") def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # TODO: add caching here. generations = [] new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") for prompt in prompts: text = ( self._call(prompt, stop=stop, run_manager=run_manager) if new_arg_supported else self._call(prompt, stop=stop) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" generations = [] new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") for prompt in prompts: text = ( await self._acall(prompt, stop=stop, run_manager=run_manager) if new_arg_supported else await self._acall(prompt, stop=stop) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations)
[ "langchain.callbacks.manager.AsyncCallbackManager.configure", "langchain.schema.Generation", "langchain.schema.get_buffer_string", "langchain.callbacks.manager.CallbackManager.configure", "langchain.schema.RunInfo", "langchain.schema.AIMessage", "langchain.llm_cache.lookup", "langchain.llm_cache.update", "langchain.schema.LLMResult" ]
[((2315, 2352), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2320, 2352), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2426, 2459), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2431, 2459), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2514, 2547), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2519, 2547), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2696, 2712), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (2710, 2712), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((3148, 3191), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (3157, 3191), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((5520, 5586), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (5545, 5586), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((7818, 7889), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (7827, 7889), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((8435, 8506), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (8465, 8506), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((10893, 10964), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (10902, 10964), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12285, 12312), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12302, 12312), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12464, 12490), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (12473, 12490), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12886, 12913), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12903, 12913), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((13083, 13109), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (13092, 13109), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16290, 16324), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (16299, 16324), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((17006, 17040), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (17015, 17040), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((1248, 1294), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (1274, 1294), False, 'import langchain\n'), ((2036, 2090), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (2062, 2090), False, 'import langchain\n'), ((2903, 3005), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2916, 3005), False, 'import warnings\n'), ((14159, 14174), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14163, 14174), False, 'from pathlib import Path\n'), ((6670, 6704), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (6677, 6704), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((7625, 7659), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7632, 7659), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((9667, 9701), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (9674, 9701), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((10700, 10734), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (10707, 10734), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((14495, 14530), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (14504, 14530), False, 'import json\n'), ((5637, 5670), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (5654, 5670), False, 'import inspect\n'), ((8557, 8591), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (8574, 8591), False, 'import inspect\n'), ((14633, 14684), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (14642, 14684), False, 'import yaml\n'), ((15934, 15963), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (15951, 15963), False, 'import inspect\n'), ((16251, 16272), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16261, 16272), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16635, 16665), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (16652, 16665), False, 'import inspect\n'), ((16967, 16988), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16977, 16988), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n')]